]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Update i18n translation for Cisco plugins and cfg agent log msg's
authorBob Melander <bob.melander@gmail.com>
Mon, 24 Nov 2014 11:59:20 +0000 (12:59 +0100)
committerBob Melander <bob.melander@gmail.com>
Tue, 25 Nov 2014 09:13:35 +0000 (10:13 +0100)
All the existing LOG.info, LOG.warning, LOG.error and LOG.critical
messages should have _LI, _LW, _LE and _LC respectively. Also, debug
level log shouldn't be translated. This patch set will cover the cisco
directory under neutron/plugins.

Change-Id: I4463ea3f4ec72f683d61043105a7883f629cefe9
Partial-Bug: #1320867

22 files changed:
neutron/hacking/checks.py
neutron/plugins/cisco/cfg_agent/cfg_agent.py
neutron/plugins/cisco/cfg_agent/device_drivers/csr1kv/csr1kv_routing_driver.py
neutron/plugins/cisco/cfg_agent/device_drivers/driver_mgr.py
neutron/plugins/cisco/cfg_agent/device_status.py
neutron/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py
neutron/plugins/cisco/db/l3/device_handling_db.py
neutron/plugins/cisco/db/l3/l3_router_appliance_db.py
neutron/plugins/cisco/db/n1kv_db_v2.py
neutron/plugins/cisco/db/network_db_v2.py
neutron/plugins/cisco/l3/hosting_device_drivers/csr1kv_hd_driver.py
neutron/plugins/cisco/l3/plugging_drivers/n1kv_trunking_driver.py
neutron/plugins/cisco/l3/service_vm_lib.py
neutron/plugins/cisco/models/virt_phy_sw_v2.py
neutron/plugins/cisco/n1kv/n1kv_client.py
neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py
neutron/plugins/cisco/network_plugin.py
neutron/tests/unit/cisco/cfg_agent/test_cfg_agent.py
neutron/tests/unit/cisco/cfg_agent/test_device_status.py
neutron/tests/unit/cisco/cfg_agent/test_routing_svc_helper.py
neutron/tests/unit/cisco/l3/device_handling_test_support.py
neutron/tests/unit/cisco/n1kv/fake_client.py

index 7a2d27b83849a0d06a0e819b49639b3282fe6e1e..8474ee533bb4a98d9d5fcca69f7056eafe6da0c4 100644 (file)
@@ -60,7 +60,8 @@ def _directory_to_check_translation(filename):
             "neutron/services",
             "neutron/plugins/ml2",
             "neutron/plugins/openvswitch",
-            "neutron/plugins/linuxbridge"]
+            "neutron/plugins/linuxbridge",
+            "neutron/plugins/cisco"]
     return any([dir in filename for dir in dirs])
 
 
index 7512dee8aea7ddc86946c5f1bb1a1213f542f33f..d8f57ca7f7240e036d54fe8091dc845f9bbe7447 100644 (file)
@@ -36,6 +36,7 @@ from neutron.openstack.common import loopingcall
 from neutron.openstack.common import periodic_task
 from neutron.openstack.common import service
 from neutron.openstack.common import timeutils
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
 from neutron.plugins.cisco.cfg_agent import device_status
 from neutron.plugins.cisco.common import cisco_constants as c_constants
 from neutron import service as neutron_service
@@ -132,7 +133,7 @@ class CiscoCfgAgent(manager.Manager):
             self.routing_service_helper = importutils.import_object(
                 svc_helper_class, host, self.conf, self)
         except ImportError as e:
-            LOG.warn(_("Error in loading routing service helper. Class "
+            LOG.warning(_LW("Error in loading routing service helper. Class "
                        "specified is %(class)s. Reason:%(reason)s"),
                      {'class': self.conf.cfg_agent.routing_svc_helper_class,
                       'reason': e})
@@ -143,7 +144,7 @@ class CiscoCfgAgent(manager.Manager):
         self.loop.start(interval=self.conf.cfg_agent.rpc_loop_interval)
 
     def after_start(self):
-        LOG.info(_("Cisco cfg agent started"))
+        LOG.info(_LI("Cisco cfg agent started"))
 
     def get_routing_service_helper(self):
         return self.routing_service_helper
@@ -203,7 +204,7 @@ class CiscoCfgAgent(manager.Manager):
             self.routing_service_helper.process_service(device_ids,
                                                         removed_devices_info)
         else:
-            LOG.warn(_("No routing service helper loaded"))
+            LOG.warning(_LW("No routing service helper loaded"))
         LOG.debug("Processing services completed")
 
     def _process_backlogged_hosting_devices(self, context):
@@ -232,7 +233,7 @@ class CiscoCfgAgent(manager.Manager):
                 if payload['hosting_data'].keys():
                     self.process_services(removed_devices_info=payload)
         except KeyError as e:
-            LOG.error(_("Invalid payload format for received RPC message "
+            LOG.error(_LE("Invalid payload format for received RPC message "
                         "`hosting_devices_removed`. Error is %{error}s. "
                         "Payload is %(payload)s"),
                       {'error': e, 'payload': payload})
@@ -276,20 +277,20 @@ class CiscoCfgAgentWithStateReport(CiscoCfgAgent):
             self.send_agent_report(self.agent_state, context)
             res = self.devmgr_rpc.register_for_duty(context)
             if res is True:
-                LOG.info(_("[Agent registration] Agent successfully "
+                LOG.info(_LI("[Agent registration] Agent successfully "
                            "registered"))
                 return
             elif res is False:
-                LOG.warn(_("[Agent registration] Neutron server said that "
-                           "device manager was not ready. Retrying in %0.2f "
-                           "seconds "), REGISTRATION_RETRY_DELAY)
+                LOG.warning(_LW("[Agent registration] Neutron server said "
+                                "that device manager was not ready. Retrying "
+                                "in %0.2f seconds "), REGISTRATION_RETRY_DELAY)
                 time.sleep(REGISTRATION_RETRY_DELAY)
             elif res is None:
-                LOG.error(_("[Agent registration] Neutron server said that no "
-                            "device manager was found. Cannot "
-                            "continue. Exiting!"))
+                LOG.error(_LE("[Agent registration] Neutron server said that "
+                              "no device manager was found. Cannot continue. "
+                              "Exiting!"))
                 raise SystemExit("Cfg Agent exiting")
-        LOG.error(_("[Agent registration] %d unsuccessful registration "
+        LOG.error(_LE("[Agent registration] %d unsuccessful registration "
                     "attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS)
         raise SystemExit("Cfg Agent exiting")
 
@@ -323,12 +324,12 @@ class CiscoCfgAgentWithStateReport(CiscoCfgAgent):
             LOG.debug("Send agent report successfully completed")
         except AttributeError:
             # This means the server does not support report_state
-            LOG.warn(_("Neutron server does not support state report. "
+            LOG.warning(_LW("Neutron server does not support state report. "
                        "State report for this agent will be disabled."))
             self.heartbeat.stop()
             return
         except Exception:
-            LOG.exception(_("Failed sending agent report!"))
+            LOG.exception(_LE("Failed sending agent report!"))
 
 
 def main(manager='neutron.plugins.cisco.cfg_agent.'
index 9993ddb379688e9bb0497637db5228df88400cfe..fd328117f0735df51b27f656966b59891e01b31c 100644 (file)
@@ -23,6 +23,7 @@ from ncclient import manager
 
 from oslo.config import cfg
 
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
 from neutron.plugins.cisco.cfg_agent import cfg_exceptions as cfg_exc
 from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
     cisco_csr1kv_snippets as snippets)
@@ -59,7 +60,7 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
             self._csr_conn = None
             self._intfs_enabled = False
         except KeyError as e:
-            LOG.error(_("Missing device parameter:%s. Aborting "
+            LOG.error(_LE("Missing device parameter:%s. Aborting "
                         "CSR1kvRoutingDriver initialization"), e)
             raise cfg_exc.CSR1kvInitializationException()
 
@@ -225,7 +226,7 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
         elif action is 'delete':
             self._remove_static_route(dest, dest_mask, next_hop, vrf_name)
         else:
-            LOG.error(_('Unknown route command %s'), action)
+            LOG.error(_LE('Unknown route command %s'), action)
 
     def _csr_create_vrf(self, ri):
         vrf_name = self._csr_get_vrf_name(ri)
@@ -317,7 +318,7 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
         parse = ciscoconfparse.CiscoConfParse(ioscfg)
         intfs_raw = parse.find_lines("^interface GigabitEthernet")
         intfs = [raw_if.strip().split(' ')[1] for raw_if in intfs_raw]
-        LOG.info(_("Interfaces:%s"), intfs)
+        LOG.info(_LI("Interfaces:%s"), intfs)
         return intfs
 
     def _get_interface_ip(self, interface_name):
@@ -332,9 +333,9 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
         for line in children:
             if 'ip address' in line:
                 ip_address = line.strip().split(' ')[2]
-                LOG.info(_("IP Address:%s"), ip_address)
+                LOG.info(_LI("IP Address:%s"), ip_address)
                 return ip_address
-        LOG.warn(_("Cannot find interface: %s"), interface_name)
+        LOG.warning(_LW("Cannot find interface: %s"), interface_name)
         return None
 
     def _interface_exists(self, interface):
@@ -369,7 +370,7 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
                 confstr = snippets.ENABLE_INTF % i
                 rpc_obj = conn.edit_config(target='running', config=confstr)
                 if self._check_response(rpc_obj, 'ENABLE_INTF'):
-                    LOG.info(_("Enabled interface %s "), i)
+                    LOG.info(_LI("Enabled interface %s "), i)
                     time.sleep(1)
         except Exception:
             return False
@@ -388,7 +389,7 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
             #  raw format ['ip vrf <vrf-name>',....]
             vrf_name = line.strip().split(' ')[2]
             vrfs.append(vrf_name)
-        LOG.info(_("VRFs:%s"), vrfs)
+        LOG.info(_LI("VRFs:%s"), vrfs)
         return vrfs
 
     def _get_capabilities(self):
@@ -433,7 +434,7 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
         if acls_raw:
             if exp_cfg_lines[1] in acls_raw:
                 return True
-            LOG.error(_("Mismatch in ACL configuration for %s"), acl_no)
+            LOG.error(_LE("Mismatch in ACL configuration for %s"), acl_no)
             return False
         LOG.debug("%s is not present in config", acl_no)
         return False
@@ -462,9 +463,9 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
             confstr = snippets.CREATE_VRF % vrf_name
             rpc_obj = conn.edit_config(target='running', config=confstr)
             if self._check_response(rpc_obj, 'CREATE_VRF'):
-                LOG.info(_("VRF %s successfully created"), vrf_name)
+                LOG.info(_LI("VRF %s successfully created"), vrf_name)
         except Exception:
-            LOG.exception(_("Failed creating VRF %s"), vrf_name)
+            LOG.exception(_LE("Failed creating VRF %s"), vrf_name)
 
     def _remove_vrf(self, vrf_name):
         if vrf_name in self._get_vrfs():
@@ -472,13 +473,13 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
             confstr = snippets.REMOVE_VRF % vrf_name
             rpc_obj = conn.edit_config(target='running', config=confstr)
             if self._check_response(rpc_obj, 'REMOVE_VRF'):
-                LOG.info(_("VRF %s removed"), vrf_name)
+                LOG.info(_LI("VRF %s removed"), vrf_name)
         else:
-            LOG.warning(_("VRF %s not present"), vrf_name)
+            LOG.warning(_LW("VRF %s not present"), vrf_name)
 
     def _create_subinterface(self, subinterface, vlan_id, vrf_name, ip, mask):
         if vrf_name not in self._get_vrfs():
-            LOG.error(_("VRF %s not present"), vrf_name)
+            LOG.error(_LE("VRF %s not present"), vrf_name)
         confstr = snippets.CREATE_SUBINTERFACE % (subinterface, vlan_id,
                                                   vrf_name, ip, mask)
         self._edit_running_config(confstr, 'CREATE_SUBINTERFACE')
@@ -491,7 +492,7 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
 
     def _set_ha_HSRP(self, subinterface, vrf_name, priority, group, ip):
         if vrf_name not in self._get_vrfs():
-            LOG.error(_("VRF %s not present"), vrf_name)
+            LOG.error(_LE("VRF %s not present"), vrf_name)
         confstr = snippets.SET_INTC_HSRP % (subinterface, vrf_name, group,
                                             priority, group, ip)
         action = "SET_INTC_HSRP (Group: %s, Priority: % s)" % (group, priority)
@@ -676,7 +677,7 @@ class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
         xml_str = rpc_obj.xml
         if "<ok />" in xml_str:
             LOG.debug("RPCReply for %s is OK", snippet_name)
-            LOG.info(_("%s successfully executed"), snippet_name)
+            LOG.info(_LI("%s successfully executed"), snippet_name)
             return True
         # Not Ok, we throw a ConfigurationException
         e_type = rpc_obj._root[0][0].text
index f48288e03bacb292aa2479a5eacf1737f9f61d7d..69a2e3cecee8dfa5968175fd048d2b55f0d04ea5 100644 (file)
@@ -13,6 +13,7 @@
 #    under the License.
 
 from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LE
 from neutron.openstack.common import importutils
 from neutron.openstack.common import log as logging
 from neutron.plugins.cisco.cfg_agent import cfg_exceptions
@@ -76,7 +77,7 @@ class DeviceDriverManager(object):
             return driver
         except ImportError:
             with excutils.save_and_reraise_exception(reraise=False):
-                LOG.exception(_("Error loading cfg agent driver %(driver)s "
+                LOG.exception(_LE("Error loading cfg agent driver %(driver)s "
                                 "for hosting device template "
                                 "%(t_name)s(%(t_id)s)"),
                               {'driver': driver_class, 't_id': hd_id,
index 5bd9fc8af22d605b9bcfd7147f5068af208ca2ae..a41f8f40708d549a21ef3e07cd64b423dfe08629 100644 (file)
@@ -19,6 +19,8 @@ from oslo.config import cfg
 from neutron.agent.linux import utils as linux_utils
 from neutron.openstack.common import log as logging
 from neutron.openstack.common import timeutils
+from neutron.openstack.common.gettextutils import _LI, _LW
+
 
 LOG = logging.getLogger(__name__)
 
@@ -54,7 +56,7 @@ def _is_pingable(ip):
         linux_utils.execute(ping_cmd, check_exit_code=True)
         return True
     except RuntimeError:
-        LOG.warn(_("Cannot ping ip address: %s"), ip)
+        LOG.warning(_LW("Cannot ping ip address: %s"), ip)
         return False
 
 
@@ -138,22 +140,22 @@ class DeviceStatus(object):
             hd = self.backlog_hosting_devices[hd_id]['hd']
             if not timeutils.is_older_than(hd['created_at'],
                                            hd['booting_time']):
-                LOG.info(_("Hosting device: %(hd_id)s @ %(ip)s hasn't passed "
-                           "minimum boot time. Skipping it. "),
+                LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s hasn't "
+                             "passed minimum boot time. Skipping it. "),
                          {'hd_id': hd_id, 'ip': hd['management_ip_address']})
                 continue
-            LOG.info(_("Checking hosting device: %(hd_id)s @ %(ip)s for "
+            LOG.info(_LI("Checking hosting device: %(hd_id)s @ %(ip)s for "
                        "reachability."), {'hd_id': hd_id,
                                           'ip': hd['management_ip_address']})
             if _is_pingable(hd['management_ip_address']):
                 hd.pop('backlog_insertion_ts', None)
                 del self.backlog_hosting_devices[hd_id]
                 response_dict['reachable'].append(hd_id)
-                LOG.info(_("Hosting device: %(hd_id)s @ %(ip)s is now "
+                LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s is now "
                            "reachable. Adding it to response"),
                          {'hd_id': hd_id, 'ip': hd['management_ip_address']})
             else:
-                LOG.info(_("Hosting device: %(hd_id)s @ %(ip)s still not "
+                LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s still not "
                            "reachable "), {'hd_id': hd_id,
                                            'ip': hd['management_ip_address']})
                 if timeutils.is_older_than(
index d19a1b17c479b15d493942dc90bb9e2cc6f140d6..5a7327a73eca2a686e79e9797572d9af344a085a 100644 (file)
@@ -25,7 +25,7 @@ from neutron.common import utils as common_utils
 from neutron import context as n_context
 from neutron.openstack.common import excutils
 from neutron.openstack.common import log as logging
-
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
 from neutron.plugins.cisco.cfg_agent import cfg_exceptions
 from neutron.plugins.cisco.cfg_agent.device_drivers import driver_mgr
 from neutron.plugins.cisco.cfg_agent import device_status
@@ -226,7 +226,7 @@ class RoutingServiceHelper():
                     self._drivermgr.remove_driver_for_hosting_device(hd_id)
             LOG.debug("Routing service processing successfully completed")
         except Exception:
-            LOG.exception(_("Failed processing routers"))
+            LOG.exception(_LE("Failed processing routers"))
             self.fullsync = True
 
     def collect_state(self, configurations):
@@ -287,7 +287,7 @@ class RoutingServiceHelper():
                 return self.plugin_rpc.get_routers(self.context,
                                                    hd_ids=device_ids)
         except messaging.MessagingException:
-            LOG.exception(_("RPC Error in fetching routers from plugin"))
+            LOG.exception(_LE("RPC Error in fetching routers from plugin"))
             self.fullsync = True
 
     @staticmethod
@@ -374,7 +374,7 @@ class RoutingServiceHelper():
                     cur_router_ids.add(r['id'])
                     hd = r['hosting_device']
                     if not self._dev_status.is_hosting_device_reachable(hd):
-                        LOG.info(_("Router: %(id)s is on an unreachable "
+                        LOG.info(_LI("Router: %(id)s is on an unreachable "
                                    "hosting device. "), {'id': r['id']})
                         continue
                     if r['id'] not in self.router_info:
@@ -383,11 +383,11 @@ class RoutingServiceHelper():
                     ri.router = r
                     self._process_router(ri)
                 except KeyError as e:
-                    LOG.exception(_("Key Error, missing key: %s"), e)
+                    LOG.exception(_LE("Key Error, missing key: %s"), e)
                     self.updated_routers.add(r['id'])
                     continue
                 except cfg_exceptions.DriverException as e:
-                    LOG.exception(_("Driver Exception on router:%(id)s. "
+                    LOG.exception(_LE("Driver Exception on router:%(id)s. "
                                     "Error is %(e)s"), {'id': r['id'], 'e': e})
                     self.updated_routers.update(r['id'])
                     continue
@@ -398,7 +398,7 @@ class RoutingServiceHelper():
                 for router in removed_routers:
                     self._router_removed(router['id'])
         except Exception:
-            LOG.exception(_("Exception in processing routers on device:%s"),
+            LOG.exception(_LE("Exception in processing routers on device:%s"),
                           device_id)
             self.sync_devices.add(device_id)
 
@@ -541,7 +541,7 @@ class RoutingServiceHelper():
         """
         ri = self.router_info.get(router_id)
         if ri is None:
-            LOG.warn(_("Info for router %s was not found. "
+            LOG.warning(_LW("Info for router %s was not found. "
                        "Skipping router removal"), router_id)
             return
         ri.router['gw_port'] = None
@@ -556,7 +556,7 @@ class RoutingServiceHelper():
             del self.router_info[router_id]
             self.removed_routers.discard(router_id)
         except cfg_exceptions.DriverException:
-            LOG.warn(_("Router remove for router_id: %s was incomplete. "
+            LOG.warning(_LW("Router remove for router_id: %s was incomplete. "
                        "Adding the router to removed_routers list"), router_id)
             self.removed_routers.add(router_id)
             # remove this router from updated_routers if it is there. It might
@@ -634,6 +634,7 @@ class RoutingServiceHelper():
         if not ips:
             raise Exception(_("Router port %s has no IP address") % port['id'])
         if len(ips) > 1:
-            LOG.error(_("Ignoring multiple IPs on router port %s"), port['id'])
+            LOG.error(_LE("Ignoring multiple IPs on router port %s"),
+                      port['id'])
         prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
         port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
index 3e9469ba0dbfb521ef1d3c812a663db0a705fe80..c3fe25c52cc3aa6f24c501c7274b6d3313d3b7cb 100644 (file)
@@ -29,6 +29,7 @@ from neutron.openstack.common import importutils
 from neutron.openstack.common import log as logging
 from neutron.openstack.common import timeutils
 from neutron.openstack.common import uuidutils
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
 from neutron.plugins.cisco.common import cisco_constants as c_constants
 from neutron.plugins.cisco.db.l3 import l3_models
 from neutron.plugins.cisco.l3 import service_vm_lib
@@ -119,10 +120,10 @@ class DeviceHandlingMixin(object):
                     name=cfg.CONF.general.l3_admin_tenant)
                 cls._l3_tenant_uuid = tenant.id
             except k_exceptions.NotFound:
-                LOG.error(_('No tenant with a name or ID of %s exists.'),
+                LOG.error(_LE('No tenant with a name or ID of %s exists.'),
                           cfg.CONF.general.l3_admin_tenant)
             except k_exceptions.NoUniqueMatch:
-                LOG.error(_('Multiple tenants matches found for %s'),
+                LOG.error(_LE('Multiple tenants matches found for %s'),
                           cfg.CONF.general.l3_admin_tenant)
         return cls._l3_tenant_uuid
 
@@ -141,21 +142,21 @@ class DeviceHandlingMixin(object):
             if len(net) == 1:
                 num_subnets = len(net[0]['subnets'])
                 if num_subnets == 0:
-                    LOG.error(_('The virtual management network has no '
+                    LOG.error(_LE('The virtual management network has no '
                                 'subnet. Please assign one.'))
                     return
                 elif num_subnets > 1:
-                    LOG.info(_('The virtual management network has %d '
+                    LOG.info(_LI('The virtual management network has %d '
                                'subnets. The first one will be used.'),
                              num_subnets)
                 cls._mgmt_nw_uuid = net[0].get('id')
             elif len(net) > 1:
                 # Management network must have a unique name.
-                LOG.error(_('The virtual management network does not have '
+                LOG.error(_LE('The virtual management network does not have '
                             'unique name. Please ensure that it is.'))
             else:
                 # Management network has not been created.
-                LOG.error(_('There is no virtual management network. Please '
+                LOG.error(_LE('There is no virtual management network. Please '
                             'create one.'))
         return cls._mgmt_nw_uuid
 
@@ -177,12 +178,12 @@ class DeviceHandlingMixin(object):
                 cls._mgmt_sec_grp_id = res[0].get('id')
             elif len(res) > 1:
                 # the mgmt sec group must be unique.
-                LOG.error(_('The security group for the virtual management '
+                LOG.error(_LE('The security group for the virtual management '
                             'network does not have unique name. Please ensure '
                             'that it is.'))
             else:
                 # CSR Mgmt security group is not present.
-                LOG.error(_('There is no security group for the virtual '
+                LOG.error(_LE('There is no security group for the virtual '
                             'management network. Please create one.'))
         return cls._mgmt_sec_grp_id
 
@@ -196,7 +197,7 @@ class DeviceHandlingMixin(object):
                 cls._hosting_device_driver = importutils.import_object(
                     cfg.CONF.hosting_devices.csr1kv_device_driver)
             except (ImportError, TypeError, n_exc.NeutronException):
-                LOG.exception(_('Error loading hosting device driver'))
+                LOG.exception(_LE('Error loading hosting device driver'))
             return cls._hosting_device_driver
 
     @classmethod
@@ -209,7 +210,7 @@ class DeviceHandlingMixin(object):
                 cls._plugging_driver = importutils.import_object(
                     cfg.CONF.hosting_devices.csr1kv_plugging_driver)
             except (ImportError, TypeError, n_exc.NeutronException):
-                LOG.exception(_('Error loading plugging driver'))
+                LOG.exception(_LE('Error loading plugging driver'))
             return cls._plugging_driver
 
     def get_hosting_devices_qry(self, context, hosting_device_ids,
@@ -325,7 +326,8 @@ class DeviceHandlingMixin(object):
                 return False
             if self.is_agent_down(
                     cfg_agent.heartbeat_timestamp):
-                LOG.warn(_('Cisco cfg agent %s is not alive'), cfg_agent.id)
+                LOG.warning(_LW('Cisco cfg agent %s is not alive'),
+                            cfg_agent.id)
             query = context.session.query(l3_models.HostingDevice)
             query = query.filter_by(cfg_agent_id=None)
             for hd in query:
@@ -362,7 +364,7 @@ class DeviceHandlingMixin(object):
             if self._svc_vm_mgr.nova_services_up():
                 self.__class__._nova_running = True
             else:
-                LOG.info(_('Not all Nova services are up and running. '
+                LOG.info(_LI('Not all Nova services are up and running. '
                            'Skipping this CSR1kv vm create request.'))
                 return
         plugging_drv = self.get_hosting_device_plugging_driver()
@@ -399,7 +401,7 @@ class DeviceHandlingMixin(object):
                 plugging_drv.delete_hosting_device_resources(
                     context, self.l3_tenant_id(), **res)
                 return
-        LOG.info(_('Created a CSR1kv hosting device VM'))
+        LOG.info(_LI('Created a CSR1kv hosting device VM'))
         return hosting_device
 
     def _delete_service_vm_hosting_device(self, context, hosting_device):
@@ -417,7 +419,7 @@ class DeviceHandlingMixin(object):
             self.l3_tenant_id(), self.mgmt_nw_id())
         if not self._svc_vm_mgr.delete_service_vm(context,
                                                   hosting_device['id']):
-            LOG.error(_('Failed to delete hosting device %s service VM. '
+            LOG.error(_LE('Failed to delete hosting device %s service VM. '
                         'Will un-register it anyway.'),
                       hosting_device['id'])
         plugging_drv.delete_hosting_device_resources(
@@ -458,7 +460,7 @@ class DeviceHandlingMixin(object):
         with context.session.begin(subtransactions=True):
             active_cfg_agents = self._get_cfg_agents(context, active=True)
             if not active_cfg_agents:
-                LOG.warn(_('There are no active Cisco cfg agents'))
+                LOG.warning(_LW('There are no active Cisco cfg agents'))
                 # No worries, once a Cisco cfg agent is started and
                 # announces itself any "dangling" hosting devices
                 # will be scheduled to it.
index 1284081455f33ba6d0207487bfd0a5ee83b76c94..e97523781d312ece0a40da6b163dee68baa4d44e 100644 (file)
@@ -30,6 +30,7 @@ from neutron.db import l3_db
 from neutron.db import models_v2
 from neutron.db import portbindings_db as p_binding
 from neutron.extensions import providernet as pr_net
+from neutron.openstack.common.gettextutils import _LE, _LI
 from neutron.openstack.common import lockutils
 from neutron.openstack.common import log as logging
 from neutron.openstack.common import loopingcall
@@ -178,7 +179,7 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
             port_db = self._get_router_port_db_on_subnet(
                 context, router_id, subnet_db)
         else:
-            msg = "Either subnet_id or port_id must be specified"
+            msg = _("Either subnet_id or port_id must be specified")
             raise n_exc.BadRequest(resource='router', msg=msg)
         routers = [self.get_router(context, router_id)]
         with context.session.begin(subtransactions=True):
@@ -314,7 +315,7 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
         return sync_data
 
     def schedule_router_on_hosting_device(self, context, r_hd_binding):
-        LOG.info(_('Attempting to schedule router %s.'),
+        LOG.info(_LI('Attempting to schedule router %s.'),
                  r_hd_binding['router']['id'])
         result = self._create_csr1kv_vm_hosting_device(context.elevated())
         if result is None:
@@ -326,14 +327,14 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
             router = r_hd_binding['router']
             r_hd_binding.hosting_device = result
             self.remove_router_from_backlog(router['id'])
-            LOG.info(_('Successfully scheduled router %(r_id)s to '
+            LOG.info(_LI('Successfully scheduled router %(r_id)s to '
                        'hosting device %(d_id)s'),
                      {'r_id': r_hd_binding['router']['id'],
                       'd_id': result['id']})
         return True
 
     def unschedule_router_from_hosting_device(self, context, r_hd_binding):
-        LOG.info(_('Un-schedule router %s.'),
+        LOG.info(_LI('Un-schedule router %s.'),
                  r_hd_binding['router']['id'])
         hosting_device = r_hd_binding['hosting_device']
         if r_hd_binding['hosting_device'] is None:
@@ -346,14 +347,14 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
         if ((router or {}).get('id') is None or
                 router['id'] in self._backlogged_routers):
             return
-        LOG.info(_('Backlogging router %s for renewed scheduling attempt '
+        LOG.info(_LI('Backlogging router %s for renewed scheduling attempt '
                    'later'), router['id'])
         self._backlogged_routers[router['id']] = router
 
     @lockutils.synchronized('routers', 'neutron-')
     def remove_router_from_backlog(self, id):
         self._backlogged_routers.pop(id, None)
-        LOG.info(_('Router %s removed from backlog'), id)
+        LOG.info(_LI('Router %s removed from backlog'), id)
 
     @lockutils.synchronized('routerbacklog', 'neutron-')
     def _process_backlogged_routers(self):
@@ -363,7 +364,7 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
             return
         context = n_context.get_admin_context()
         scheduled_routers = []
-        LOG.info(_('Processing router (scheduling) backlog'))
+        LOG.info(_LI('Processing router (scheduling) backlog'))
         # try to reschedule
         for r_id, router in self._backlogged_routers.items():
             self._add_type_and_hosting_device_info(context, router)
@@ -383,7 +384,7 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
             interval=cfg.CONF.general.backlog_processing_interval)
 
     def _sync_router_backlog(self):
-        LOG.info(_('Synchronizing router (scheduling) backlog'))
+        LOG.info(_LI('Synchronizing router (scheduling) backlog'))
         context = n_context.get_admin_context()
         query = context.session.query(l3_models.RouterHostingDeviceBinding)
         query = query.options(joinedload('router'))
@@ -406,13 +407,13 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
             return query.one()
         except exc.NoResultFound:
             # This should not happen
-            LOG.error(_('DB inconsistency: No type and hosting info associated'
-                        ' with router %s'), id)
+            LOG.error(_LE('DB inconsistency: No type and hosting info '
+                          'associated with router %s'), id)
             raise RouterBindingInfoError(router_id=id)
         except exc.MultipleResultsFound:
             # This should not happen either
-            LOG.error(_('DB inconsistency: Multiple type and hosting info'
-                        associated with router %s'), id)
+            LOG.error(_LE('DB inconsistency: Multiple type and hosting info '
+                          'associated with router %s'), id)
             raise RouterBindingInfoError(router_id=id)
 
     def _get_hosting_device_bindings(self, context, id, load_routers=False,
@@ -434,7 +435,7 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
                 binding_info = self._get_router_binding_info(context,
                                                              router['id'])
         except RouterBindingInfoError:
-            LOG.error(_('DB inconsistency: No hosting info associated with '
+            LOG.error(_LE('DB inconsistency: No hosting info associated with '
                         'router %s'), router['id'])
             router['hosting_device'] = None
             return
@@ -511,7 +512,7 @@ class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
         alloc = plugging_driver.allocate_hosting_port(
             context, router_id, port_db, network_type, hosting_device_id)
         if alloc is None:
-            LOG.error(_('Failed to allocate hosting port for port %s'),
+            LOG.error(_LE('Failed to allocate hosting port for port %s'),
                       port_db['id'])
             return
         with context.session.begin(subtransactions=True):
index eaaff10fa85b4f1dbdee2b7f8f6b906c561943a1..e87d2552b1ff4e890c4e7d64da584ca8b1a4c9ec 100644 (file)
@@ -22,6 +22,7 @@ from neutron.common import constants
 from neutron.common import exceptions as n_exc
 import neutron.db.api as db
 from neutron.db import models_v2
+from neutron.openstack.common.gettextutils import _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.cisco.common import cisco_constants as c_const
 from neutron.plugins.cisco.common import cisco_exceptions as c_exc
@@ -322,7 +323,7 @@ def get_segment_range(network_profile):
     # Sort the range to ensure min, max is in order
     seg_min, seg_max = sorted(
         int(i) for i in network_profile.segment_range.split('-'))
-    LOG.debug(_("seg_min %(seg_min)s, seg_max %(seg_max)s"),
+    LOG.debug("seg_min %(seg_min)s, seg_max %(seg_max)s",
               {'seg_min': seg_min, 'seg_max': seg_max})
     return seg_min, seg_max
 
@@ -553,8 +554,8 @@ def reserve_specific_vlan(db_session, physical_network, vlan_id):
                 else:
                     raise n_exc.VlanIdInUse(vlan_id=vlan_id,
                                             physical_network=physical_network)
-            LOG.debug(_("Reserving specific vlan %(vlan)s on physical "
-                        "network %(network)s from pool"),
+            LOG.debug("Reserving specific vlan %(vlan)s on physical network "
+                      "%(network)s from pool",
                       {"vlan": vlan_id, "network": physical_network})
             alloc.allocated = True
             db_session.add(alloc)
@@ -578,7 +579,7 @@ def release_vlan(db_session, physical_network, vlan_id):
                      one())
             alloc.allocated = False
         except exc.NoResultFound:
-            LOG.warning(_("vlan_id %(vlan)s on physical network %(network)s "
+            LOG.warning(_LW("vlan_id %(vlan)s on physical network %(network)s "
                           "not found"),
                         {"vlan": vlan_id, "network": physical_network})
 
@@ -634,7 +635,7 @@ def reserve_specific_vxlan(db_session, vxlan_id):
                      one())
             if alloc.allocated:
                 raise c_exc.VxlanIDInUse(vxlan_id=vxlan_id)
-            LOG.debug(_("Reserving specific vxlan %s from pool"), vxlan_id)
+            LOG.debug("Reserving specific vxlan %s from pool", vxlan_id)
             alloc.allocated = True
             db_session.add(alloc)
         except exc.NoResultFound:
@@ -655,7 +656,7 @@ def release_vxlan(db_session, vxlan_id):
                      one())
             alloc.allocated = False
         except exc.NoResultFound:
-            LOG.warning(_("vxlan_id %s not found"), vxlan_id)
+            LOG.warning(_LW("vxlan_id %s not found"), vxlan_id)
 
 
 def set_port_status(port_id, status):
@@ -764,7 +765,7 @@ def delete_vm_network(db_session, policy_profile_id, network_id):
 
 def create_network_profile(db_session, network_profile):
     """Create a network profile."""
-    LOG.debug(_("create_network_profile()"))
+    LOG.debug("create_network_profile()")
     with db_session.begin(subtransactions=True):
         kwargs = {"name": network_profile["name"],
                   "segment_type": network_profile["segment_type"]}
@@ -786,7 +787,7 @@ def create_network_profile(db_session, network_profile):
 
 def delete_network_profile(db_session, id):
     """Delete Network Profile."""
-    LOG.debug(_("delete_network_profile()"))
+    LOG.debug("delete_network_profile()")
     with db_session.begin(subtransactions=True):
         try:
             network_profile = get_network_profile(db_session, id)
@@ -800,7 +801,7 @@ def delete_network_profile(db_session, id):
 
 def update_network_profile(db_session, id, network_profile):
     """Update Network Profile."""
-    LOG.debug(_("update_network_profile()"))
+    LOG.debug("update_network_profile()")
     with db_session.begin(subtransactions=True):
         profile = get_network_profile(db_session, id)
         profile.update(network_profile)
@@ -809,7 +810,7 @@ def update_network_profile(db_session, id, network_profile):
 
 def get_network_profile(db_session, id):
     """Get Network Profile."""
-    LOG.debug(_("get_network_profile()"))
+    LOG.debug("get_network_profile()")
     try:
         return db_session.query(
             n1kv_models_v2.NetworkProfile).filter_by(id=id).one()
@@ -834,7 +835,7 @@ def _get_network_profiles(db_session=None, physical_network=None):
 
 def create_policy_profile(policy_profile):
     """Create Policy Profile."""
-    LOG.debug(_("create_policy_profile()"))
+    LOG.debug("create_policy_profile()")
     db_session = db.get_session()
     with db_session.begin(subtransactions=True):
         p_profile = n1kv_models_v2.PolicyProfile(id=policy_profile["id"],
@@ -845,7 +846,7 @@ def create_policy_profile(policy_profile):
 
 def delete_policy_profile(id):
     """Delete Policy Profile."""
-    LOG.debug(_("delete_policy_profile()"))
+    LOG.debug("delete_policy_profile()")
     db_session = db.get_session()
     with db_session.begin(subtransactions=True):
         policy_profile = get_policy_profile(db_session, id)
@@ -854,7 +855,7 @@ def delete_policy_profile(id):
 
 def update_policy_profile(db_session, id, policy_profile):
     """Update a policy profile."""
-    LOG.debug(_("update_policy_profile()"))
+    LOG.debug("update_policy_profile()")
     with db_session.begin(subtransactions=True):
         _profile = get_policy_profile(db_session, id)
         _profile.update(policy_profile)
@@ -863,7 +864,7 @@ def update_policy_profile(db_session, id, policy_profile):
 
 def get_policy_profile(db_session, id):
     """Get Policy Profile."""
-    LOG.debug(_("get_policy_profile()"))
+    LOG.debug("get_policy_profile()")
     try:
         return db_session.query(
             n1kv_models_v2.PolicyProfile).filter_by(id=id).one()
@@ -900,7 +901,7 @@ def create_profile_binding(db_session, tenant_id, profile_id, profile_type):
 
 def _profile_binding_exists(db_session, tenant_id, profile_id, profile_type):
     """Check if the profile-tenant binding exists."""
-    LOG.debug(_("_profile_binding_exists()"))
+    LOG.debug("_profile_binding_exists()")
     db_session = db_session or db.get_session()
     return (db_session.query(n1kv_models_v2.ProfileBinding).
             filter_by(tenant_id=tenant_id, profile_id=profile_id,
@@ -909,7 +910,7 @@ def _profile_binding_exists(db_session, tenant_id, profile_id, profile_type):
 
 def get_profile_binding(db_session, tenant_id, profile_id):
     """Get Network/Policy Profile - Tenant binding."""
-    LOG.debug(_("get_profile_binding()"))
+    LOG.debug("get_profile_binding()")
     try:
         return (db_session.query(n1kv_models_v2.ProfileBinding).filter_by(
             tenant_id=tenant_id, profile_id=profile_id).one())
@@ -919,15 +920,15 @@ def get_profile_binding(db_session, tenant_id, profile_id):
 
 def delete_profile_binding(db_session, tenant_id, profile_id):
     """Delete Policy Binding."""
-    LOG.debug(_("delete_profile_binding()"))
+    LOG.debug("delete_profile_binding()")
     db_session = db_session or db.get_session()
     try:
         binding = get_profile_binding(db_session, tenant_id, profile_id)
         with db_session.begin(subtransactions=True):
             db_session.delete(binding)
     except c_exc.ProfileTenantBindingNotFound:
-        LOG.debug(_("Profile-Tenant binding missing for profile ID "
-                    "%(profile_id)s and tenant ID %(tenant_id)s"),
+        LOG.debug("Profile-Tenant binding missing for profile ID "
+                  "%(profile_id)s and tenant ID %(tenant_id)s",
                   {"profile_id": profile_id, "tenant_id": tenant_id})
         return
 
index 1afc3297ac94ff057301f5d4ca126c769766d499..f4724865b27a3bf1229e6f127e3f1f7b5c0055d4 100644 (file)
@@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
 
 def get_all_qoss(tenant_id):
     """Lists all the qos to tenant associations."""
-    LOG.debug(_("get_all_qoss() called"))
+    LOG.debug("get_all_qoss() called")
     session = db.get_session()
     return (session.query(network_models_v2.QoS).
             filter_by(tenant_id=tenant_id).all())
@@ -35,7 +35,7 @@ def get_all_qoss(tenant_id):
 
 def get_qos(tenant_id, qos_id):
     """Lists the qos given a tenant_id and qos_id."""
-    LOG.debug(_("get_qos() called"))
+    LOG.debug("get_qos() called")
     session = db.get_session()
     try:
         return (session.query(network_models_v2.QoS).
@@ -48,7 +48,7 @@ def get_qos(tenant_id, qos_id):
 
 def add_qos(tenant_id, qos_name, qos_desc):
     """Adds a qos to tenant association."""
-    LOG.debug(_("add_qos() called"))
+    LOG.debug("add_qos() called")
     session = db.get_session()
     try:
         qos = (session.query(network_models_v2.QoS).
index 22999f178e206a2cf39962635ca9c32b08beccc7..1cb36e826d78017f43af57c2454b9f51428e15e7 100644 (file)
@@ -17,6 +17,7 @@ import netaddr
 from oslo.config import cfg
 
 from neutron import manager
+from neutron.openstack.common.gettextutils import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.cisco.l3 import hosting_device_drivers
 
@@ -63,7 +64,7 @@ class CSR1kvHostingDeviceDriver(hosting_device_drivers.HostingDeviceDriver):
                     vm_cfg_data += line
             return {'iosxe_config.txt': vm_cfg_data}
         except IOError as e:
-            LOG.error(_('Failed to create config file: %s. Trying to'
+            LOG.error(_LE('Failed to create config file: %s. Trying to'
                         'clean up.'), str(e))
             self.delete_configdrive_files(context, mgmtport)
             raise
index 5cd6a1eac9b768726bf8d8055679ebaf1643361a..e73251756ee14120cc7b794cfe93a5ba1f9063d6 100644 (file)
@@ -25,6 +25,7 @@ from neutron.db import models_v2
 from neutron.extensions import providernet as pr_net
 from neutron import manager
 from neutron.openstack.common import log as logging
+from neutron.openstack.common.gettextutils import _LE, _LI, _LW
 from neutron.plugins.cisco.db.l3 import l3_models
 from neutron.plugins.cisco.extensions import n1kv
 import neutron.plugins.cisco.l3.plugging_drivers as plug
@@ -104,12 +105,12 @@ class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
             return profiles[0]['id']
         elif len(profiles) > 1:
             # Profile must have a unique name.
-            LOG.error(_('The %(resource)s %(name)s does not have unique name. '
-                        'Please refer to admin guide and create one.'),
+            LOG.error(_LE('The %(resource)s %(name)s does not have unique '
+                          'name. Please refer to admin guide and create one.'),
                       {'resource': resource, 'name': name})
         else:
             # Profile has not been created.
-            LOG.error(_('There is no %(resource)s %(name)s. Please refer to '
+            LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to '
                         'admin guide and create one.'),
                       {'resource': resource, 'name': name})
 
@@ -209,7 +210,7 @@ class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
                         n1kv_const.T2_PORT_NAME, self.t2_port_profile_id(),
                         t_p)
             except n_exc.NeutronException as e:
-                LOG.error(_('Error %s when creating service VM resources. '
+                LOG.error(_LE('Error %s when creating service VM resources. '
                             'Cleaning up.'), e)
                 resources = {'ports': t_p, 'networks': t1_n + t2_n,
                              'subnets': t1_sn + t2_sn}
@@ -280,13 +281,14 @@ class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
 
         while mgmt_port is not None or port_ids or subnet_ids or net_ids:
             if attempts == DELETION_ATTEMPTS:
-                LOG.warning(_('Aborting resource deletion after %d '
+                LOG.warning(_LW('Aborting resource deletion after %d '
                               'unsuccessful attempts'), DELETION_ATTEMPTS)
                 return
             else:
                 if attempts > 1:
                     eventlet.sleep(SECONDS_BETWEEN_DELETION_ATTEMPTS)
-                LOG.info(_('Resource deletion attempt %d starting'), attempts)
+                LOG.info(_LI('Resource deletion attempt %d starting'),
+                         attempts)
             # Remove anything created.
             if mgmt_port is not None:
                 ml = set([mgmt_port['id']])
@@ -305,7 +307,7 @@ class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
                                    self._core_plugin.delete_network,
                                    n_exc.NetworkNotFound, net_ids)
             attempts += 1
-        LOG.info(_('Resource deletion succeeded'))
+        LOG.info(_LI('Resource deletion succeeded'))
 
     def _delete_resources(self, context, name, deleter, exception_type,
                           resource_ids):
@@ -316,7 +318,7 @@ class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
             except exception_type:
                 resource_ids.remove(item_id)
             except n_exc.NeutronException as e:
-                LOG.error(_('Failed to delete %(resource_name)s %(net_id)s '
+                LOG.error(_LE('Failed to delete %(resource_name)s %(net_id)s '
                             'for service vm due to %(err)s'),
                           {'resource_name': name, 'net_id': item_id, 'err': e})
 
@@ -408,7 +410,7 @@ class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
                           str(port_db.hosting_info.segmentation_id))
         else:
             trunk_spec = port_db['network_id']
-        LOG.info(_('Updating trunk: %(action)s VLAN %(tag)d for network_id '
+        LOG.info(_LI('Updating trunk: %(action)s VLAN %(tag)d for network_id '
                    '%(id)s'), {'action': action,
                                'tag': port_db.hosting_info.segmentation_id,
                                'id': port_db['network_id']})
@@ -448,14 +450,14 @@ class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
             if res is None:
                 if attempts >= MAX_HOSTING_PORT_LOOKUP_ATTEMPTS:
                     # This should not happen ...
-                    LOG.error(_('Hosting port DB inconsistency for '
+                    LOG.error(_LE('Hosting port DB inconsistency for '
                                 'hosting device %s'), hd_id)
                     return
                 else:
                     # The service VM may not have plugged its VIF into the
                     # Neutron Port yet so we wait and make another lookup.
                     attempts += 1
-                    LOG.info(_('Attempt %(attempt)d to find trunk ports for '
+                    LOG.info(_LI('Attempt %(attempt)d to find trunk ports for '
                                'hosting device %(hd_id)s failed. Trying '
                                'again in %(time)d seconds.'),
                              {'attempt': attempts, 'hd_id': hd_id,
@@ -501,6 +503,6 @@ class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
             return other_port['id']
         except (exc.NoResultFound, exc.MultipleResultsFound):
             # This should not happen ...
-            LOG.error(_('Port trunk pair DB inconsistency for port %s'),
+            LOG.error(_LE('Port trunk pair DB inconsistency for port %s'),
                       port_id)
             return
index 8ffe63bb9cd41230c3354f4eda5ce3898b5f7ee6..095aa54637850b776331524bf56b86e4bcb64f29 100644 (file)
@@ -18,6 +18,7 @@ from novaclient.v1_1 import client
 from oslo.config import cfg
 
 from neutron import manager
+from neutron.openstack.common.gettextutils import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.cisco.common import cisco_constants as c_constants
 
@@ -64,7 +65,7 @@ class ServiceVMManager(object):
                 nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
                 nova_exc.ConnectionRefused, nova_exc.ClientException,
                 Exception) as e:
-            LOG.error(_('Failure determining running Nova services: %s'), e)
+            LOG.error(_LE('Failure determining running Nova services: %s'), e)
             return False
         return not bool(required.difference(
             [service.binary for service in services
@@ -81,8 +82,8 @@ class ServiceVMManager(object):
                 nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
                 nova_exc.ConnectionRefused, nova_exc.ClientException,
                 Exception) as e:
-            LOG.error(_('Failed to get status of service VM instance %(id)s, '
-                        'due to %(err)s'), {'id': vm_id, 'err': e})
+            LOG.error(_LE('Failed to get status of service VM instance '
+                          '%(id)s, due to %(err)s'), {'id': vm_id, 'err': e})
             status = c_constants.SVM_ERROR
         return status
 
@@ -97,7 +98,7 @@ class ServiceVMManager(object):
             image = n_utils.find_resource(self._nclient.images, vm_image)
             flavor = n_utils.find_resource(self._nclient.flavors, vm_flavor)
         except (nova_exc.CommandError, Exception) as e:
-            LOG.error(_('Failure finding needed Nova resource: %s'), e)
+            LOG.error(_LE('Failure finding needed Nova resource: %s'), e)
             return
 
         try:
@@ -119,7 +120,7 @@ class ServiceVMManager(object):
                 nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
                 nova_exc.ConnectionRefused, nova_exc.ClientException,
                 Exception) as e:
-            LOG.error(_('Failed to create service VM instance: %s'), e)
+            LOG.error(_LE('Failed to create service VM instance: %s'), e)
             return
         return {'id': server.id}
 
@@ -135,6 +136,6 @@ class ServiceVMManager(object):
                 nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
                 nova_exc.ConnectionRefused, nova_exc.ClientException,
                 Exception) as e:
-            LOG.error(_('Failed to delete service VM instance %(id)s, '
+            LOG.error(_LE('Failed to delete service VM instance %(id)s, '
                         'due to %(err)s'), {'id': vm_id, 'err': e})
             return False
index a9902d75eec43a3cb876cd0969cd580f2968060b..706ca00c616d6c08e11e582d5a664781cae1bafc 100644 (file)
@@ -20,7 +20,7 @@ from neutron.extensions import portbindings
 from neutron.extensions import providernet as provider
 from neutron import neutron_plugin_base_v2
 from neutron.openstack.common import excutils
-from neutron.openstack.common.gettextutils import _LE
+from neutron.openstack.common.gettextutils import _LE, _LI
 from neutron.openstack.common import importutils
 from neutron.openstack.common import log as logging
 from neutron.plugins.cisco.common import cisco_constants as const
@@ -72,7 +72,7 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
 
         # Initialize credential store after database initialization
         cred.Store.initialize()
-        LOG.debug(_("%(module)s.%(name)s init done"),
+        LOG.debug("%(module)s.%(name)s init done",
                   {'module': __name__,
                    'name': self.__class__.__name__})
 
@@ -113,9 +113,9 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
         plugin implementation) for completing this operation.
         """
         if plugin_key not in self._plugins:
-            LOG.info(_("No %s Plugin loaded"), plugin_key)
-            LOG.info(_("%(plugin_key)s: %(function_name)s with args %(args)s "
-                     "ignored"),
+            LOG.info(_LI("No %s Plugin loaded"), plugin_key)
+            LOG.info(_LI("%(plugin_key)s: %(function_name)s with args "
+                         "%(args)s ignored"),
                      {'plugin_key': plugin_key,
                       'function_name': function_name,
                       'args': args})
@@ -138,7 +138,7 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
         Perform this operation in the context of the configured device
         plugins.
         """
-        LOG.debug(_("create_network() called"))
+        LOG.debug("create_network() called")
         provider_vlan_id = self._get_provider_vlan_id(network[const.NETWORK])
         args = [context, network]
         switch_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
@@ -151,8 +151,8 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
             cdb.add_provider_network(network_id,
                                      const.NETWORK_TYPE_VLAN,
                                      provider_vlan_id)
-            LOG.debug(_("Provider network added to DB: %(network_id)s, "
-                        "%(vlan_id)s"),
+            LOG.debug("Provider network added to DB: %(network_id)s, "
+                      "%(vlan_id)s",
                       {'network_id': network_id, 'vlan_id': provider_vlan_id})
         return switch_output
 
@@ -162,7 +162,7 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
         Perform this operation in the context of the configured device
         plugins.
         """
-        LOG.debug(_("update_network() called"))
+        LOG.debug("update_network() called")
 
         # We can only support updating of provider attributes if all the
         # configured sub-plugins support it. Currently we have no method
@@ -186,7 +186,7 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
                                                        self._func_name(),
                                                        args)
         if cdb.remove_provider_network(id):
-            LOG.debug(_("Provider network removed from DB: %s"), id)
+            LOG.debug("Provider network removed from DB: %s", id)
         return switch_output
 
     def get_network(self, context, id, fields=None):
@@ -228,7 +228,7 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
         Perform this operation in the context of the configured device
         plugins.
         """
-        LOG.debug(_("create_port() called"))
+        LOG.debug("create_port() called")
         args = [context, port]
         return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
                                               self._func_name(),
@@ -254,7 +254,7 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
         Perform this operation in the context of the configured device
         plugins.
         """
-        LOG.debug(_("update_port() called"))
+        LOG.debug("update_port() called")
         args = [context, id, port]
         return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
                                               self._func_name(),
@@ -266,7 +266,7 @@ class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
         Perform this operation in the context of the configured device
         plugins.
         """
-        LOG.debug(_("delete_port() called"))
+        LOG.debug("delete_port() called")
         port = self.get_port(context, id)
 
         try:
index 12fa20d236e6fca4df7b41d2e5eb824b17fb25bb..e0b03bbffc6d09acfcf344393a66e895c062f224 100644 (file)
@@ -221,7 +221,7 @@ class Client(object):
         :param network_profile: network profile dict
         :param tenant_id: UUID representing the tenant
         """
-        LOG.debug(_("Logical network"))
+        LOG.debug("Logical network")
         body = {'description': network_profile['name'],
                 'tenantId': tenant_id}
         logical_network_name = (network_profile['id'] +
@@ -246,7 +246,7 @@ class Client(object):
         :param network_profile: network profile dict
         :param tenant_id: UUID representing the tenant
         """
-        LOG.debug(_("network_segment_pool"))
+        LOG.debug("network_segment_pool")
         logical_network_name = (network_profile['id'] +
                                 c_const.LOGICAL_NETWORK_SUFFIX)
         body = {'name': network_profile['name'],
@@ -435,7 +435,7 @@ class Client(object):
         headers['Accept'] = self._set_content_type('json')
         if body:
             body = jsonutils.dumps(body, indent=2)
-            LOG.debug(_("req: %s"), body)
+            LOG.debug("req: %s", body)
         try:
             resp = self.pool.spawn(requests.request,
                                    method,
@@ -445,7 +445,7 @@ class Client(object):
                                    timeout=self.timeout).wait()
         except Exception as e:
             raise c_exc.VSMConnectionFailed(reason=e)
-        LOG.debug(_("status_code %s"), resp.status_code)
+        LOG.debug("status_code %s", resp.status_code)
         if resp.status_code == requests.codes.OK:
             if 'application/json' in resp.headers['content-type']:
                 try:
@@ -453,7 +453,7 @@ class Client(object):
                 except ValueError:
                     return {}
             elif 'text/plain' in resp.headers['content-type']:
-                LOG.debug(_("VSM: %s"), resp.text)
+                LOG.debug("VSM: %s", resp.text)
         else:
             raise c_exc.VSMError(reason=resp.text)
 
index 82322aee5419d0af9d6adf3f1f0572576b822776..8bafe9a58d3ca628e0cb2d8ce3dd6e6e50c93bab 100644 (file)
@@ -35,6 +35,7 @@ from neutron.extensions import portbindings
 from neutron.extensions import providernet
 from neutron import manager
 from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LW
 from neutron.openstack.common import importutils
 from neutron.openstack.common import log as logging
 from neutron.openstack.common import uuidutils as uuidutils
@@ -123,7 +124,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         is instantiated for the first time and then continue to poll for
         policy profile updates.
         """
-        LOG.debug(_('_setup_vsm'))
+        LOG.debug('_setup_vsm')
         self.agent_vsm = True
         # Poll VSM for create/delete of policy profile.
         eventlet.spawn(self._poll_policy_profiles)
@@ -142,7 +143,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         from the VSM. Hence we associate the policy profiles with fake
         tenant-ids.
         """
-        LOG.debug(_('_populate_policy_profiles'))
+        LOG.debug('_populate_policy_profiles')
         try:
             n1kvclient = n1kv_client.Client()
             policy_profiles = n1kvclient.list_port_profiles()
@@ -169,7 +170,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
             self._remove_all_fake_policy_profiles()
         except (cisco_exceptions.VSMError,
                 cisco_exceptions.VSMConnectionFailed):
-            LOG.warning(_('No policy profile populated from VSM'))
+            LOG.warning(_LW('No policy profile populated from VSM'))
 
     def _extend_network_dict_provider(self, context, network):
         """Add extended network parameters."""
@@ -424,7 +425,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                               that needs to be trunked
         :param oper: Operation to be performed
         """
-        LOG.debug(_('_populate_member_segments %s'), segment_pairs)
+        LOG.debug('_populate_member_segments %s', segment_pairs)
         trunk_list = []
         for (segment, dot1qtag) in segment_pairs:
             net = self.get_network(context, segment)
@@ -468,7 +469,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                 else:
                     pair_list.append((segment1, segment2))
             else:
-                LOG.debug(_('Invalid UUID supplied in %s'), pair)
+                LOG.debug('Invalid UUID supplied in %s', pair)
                 msg = _("Invalid UUID supplied")
                 raise n_exc.InvalidInput(error_message=msg)
         return pair_list
@@ -527,7 +528,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                         raise n_exc.InvalidInput(error_message=msg)
                 pair_list.append((segment, dot1qtag))
             else:
-                LOG.debug(_('%s is not a valid uuid'), segment)
+                LOG.debug('%s is not a valid uuid', segment)
                 msg = _("'%s' is not a valid UUID") % segment
                 raise n_exc.InvalidInput(error_message=msg)
         return pair_list
@@ -589,7 +590,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param network_profile: network profile dictionary
         :param tenant_id: UUID representing the tenant
         """
-        LOG.debug(_('_send_create_logical_network'))
+        LOG.debug('_send_create_logical_network')
         n1kvclient = n1kv_client.Client()
         n1kvclient.create_logical_network(network_profile, tenant_id)
 
@@ -612,7 +613,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param context: neutron api request context
         :param profile: network profile dictionary
         """
-        LOG.debug(_('_send_create_network_profile_request: %s'), profile['id'])
+        LOG.debug('_send_create_network_profile_request: %s', profile['id'])
         n1kvclient = n1kv_client.Client()
         n1kvclient.create_network_segment_pool(profile, context.tenant_id)
 
@@ -622,7 +623,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
 
         :param profile: network profile dictionary
         """
-        LOG.debug(_('_send_update_network_profile_request: %s'), profile['id'])
+        LOG.debug('_send_update_network_profile_request: %s', profile['id'])
         n1kvclient = n1kv_client.Client()
         n1kvclient.update_network_segment_pool(profile)
 
@@ -632,7 +633,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
 
         :param profile: network profile dictionary
         """
-        LOG.debug(_('_send_delete_network_profile_request: %s'),
+        LOG.debug('_send_delete_network_profile_request: %s',
                   profile['name'])
         n1kvclient = n1kv_client.Client()
         n1kvclient.delete_network_segment_pool(profile['id'])
@@ -647,7 +648,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param segment_pairs: List of segments in UUID pairs
                               that need to be bridged
         """
-        LOG.debug(_('_send_create_network_request: %s'), network['id'])
+        LOG.debug('_send_create_network_request: %s', network['id'])
         profile = self.get_network_profile(context,
                                            network[n1kv.PROFILE_ID])
         n1kvclient = n1kv_client.Client()
@@ -679,7 +680,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param del_segments: List of segments bindings
                              that need to be deleted
         """
-        LOG.debug(_('_send_update_network_request: %s'), network['id'])
+        LOG.debug('_send_update_network_request: %s', network['id'])
         db_session = context.session
         profile = n1kv_db_v2.get_network_profile(
             db_session, network[n1kv.PROFILE_ID])
@@ -701,8 +702,8 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
             body['segmentType'] = profile['sub_type']
             body['addSegments'] = network['add_segment_list']
             body['delSegments'] = network['del_segment_list']
-            LOG.debug(_('add_segments=%s'), body['addSegments'])
-            LOG.debug(_('del_segments=%s'), body['delSegments'])
+            LOG.debug('add_segments=%s', body['addSegments'])
+            LOG.debug('del_segments=%s', body['delSegments'])
             if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY:
                 encap_profile = (network['id'] +
                                  c_const.ENCAPSULATION_PROFILE_SUFFIX)
@@ -726,7 +727,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param context: neutron api request context
         :param network: network dictionary
         """
-        LOG.debug(_('_send_delete_network_request: %s'), network['id'])
+        LOG.debug('_send_delete_network_request: %s', network['id'])
         n1kvclient = n1kv_client.Client()
         session = context.session
         if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY:
@@ -766,7 +767,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param context: neutron api request context
         :param subnet: subnet dictionary
         """
-        LOG.debug(_('_send_create_subnet_request: %s'), subnet['id'])
+        LOG.debug('_send_create_subnet_request: %s', subnet['id'])
         n1kvclient = n1kv_client.Client()
         n1kvclient.create_ip_pool(subnet)
 
@@ -776,7 +777,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
 
         :param subnet: subnet dictionary
         """
-        LOG.debug(_('_send_update_subnet_request: %s'), subnet['name'])
+        LOG.debug('_send_update_subnet_request: %s', subnet['name'])
         n1kvclient = n1kv_client.Client()
         n1kvclient.update_ip_pool(subnet)
 
@@ -787,7 +788,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param context: neutron api request context
         :param subnet: subnet dictionary
         """
-        LOG.debug(_('_send_delete_subnet_request: %s'), subnet['name'])
+        LOG.debug('_send_delete_subnet_request: %s', subnet['name'])
         body = {'ipPool': subnet['id'], 'deleteSubnet': True}
         n1kvclient = n1kv_client.Client()
         n1kvclient.update_network_segment(subnet['network_id'], body=body)
@@ -813,7 +814,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param vm_network_name: string representing the name of the VM
                                 network
         """
-        LOG.debug(_('_send_create_port_request: %s'), port)
+        LOG.debug('_send_create_port_request: %s', port)
         n1kvclient = n1kv_client.Client()
         if port_count == 1:
             n1kvclient.create_vm_network(port,
@@ -830,7 +831,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param mac_address: string representing the mac address
         :param vm_network_name: VM network name to which the port is bound
         """
-        LOG.debug(_('_send_update_port_request: %s'), port_id)
+        LOG.debug('_send_update_port_request: %s', port_id)
         body = {'portId': port_id,
                 'macAddress': mac_address}
         n1kvclient = n1kv_client.Client()
@@ -845,7 +846,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param port: port object which is to be deleted
         :param vm_network: VM network object with which the port is associated
         """
-        LOG.debug(_('_send_delete_port_request: %s'), port['id'])
+        LOG.debug('_send_delete_port_request: %s', port['id'])
         n1kvclient = n1kv_client.Client()
         n1kvclient.delete_n1kv_port(vm_network['name'], port['id'])
 
@@ -874,7 +875,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                                                           network['network'])
         profile_id = self._process_network_profile(context, network['network'])
         segment_pairs = None
-        LOG.debug(_('Create network: profile_id=%s'), profile_id)
+        LOG.debug('Create network: profile_id=%s', profile_id)
         session = context.session
         with session.begin(subtransactions=True):
             if not network_type:
@@ -882,10 +883,10 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                 (physical_network, network_type, segmentation_id,
                     multicast_ip) = n1kv_db_v2.alloc_network(session,
                                                              profile_id)
-                LOG.debug(_('Physical_network %(phy_net)s, '
-                            'seg_type %(net_type)s, '
-                            'seg_id %(seg_id)s, '
-                            'multicast_ip %(multicast_ip)s'),
+                LOG.debug('Physical_network %(phy_net)s, '
+                          'seg_type %(net_type)s, '
+                          'seg_id %(seg_id)s, '
+                          'multicast_ip %(multicast_ip)s',
                           {'phy_net': physical_network,
                            'net_type': network_type,
                            'seg_id': segmentation_id,
@@ -894,7 +895,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                     segment_pairs = (
                         self._parse_multi_segments(context, network['network'],
                                                    n1kv.SEGMENT_ADD))
-                    LOG.debug(_('Seg list %s '), segment_pairs)
+                    LOG.debug('Seg list %s ', segment_pairs)
                 elif network_type == c_const.NETWORK_TYPE_TRUNK:
                     network_profile = self.get_network_profile(context,
                                                                profile_id)
@@ -904,7 +905,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                                                    physical_network,
                                                    network_profile['sub_type']
                                                    ))
-                    LOG.debug(_('Seg list %s '), segment_pairs)
+                    LOG.debug('Seg list %s ', segment_pairs)
                 else:
                     if not segmentation_id:
                         raise n_exc.TenantNetworksDisabled()
@@ -945,7 +946,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
             with excutils.save_and_reraise_exception():
                 self._delete_network_db(context, net['id'])
         else:
-            LOG.debug(_("Created network: %s"), net['id'])
+            LOG.debug("Created network: %s", net['id'])
             return net
 
     def update_network(self, context, id, network):
@@ -1003,7 +1004,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
             if binding.network_type != c_const.NETWORK_TYPE_MULTI_SEGMENT:
                 self._send_update_network_request(context, net, add_segments,
                                                   del_segments)
-            LOG.debug(_("Updated network: %s"), net['id'])
+            LOG.debug("Updated network: %s", net['id'])
             return net
 
     def delete_network(self, context, id):
@@ -1053,7 +1054,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param id: UUID representing the network to fetch
         :returns: requested network dictionary
         """
-        LOG.debug(_("Get network: %s"), id)
+        LOG.debug("Get network: %s", id)
         net = super(N1kvNeutronPluginV2, self).get_network(context, id, None)
         self._extend_network_dict_provider(context, net)
         self._extend_network_dict_profile(context, net)
@@ -1075,7 +1076,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                         dictionary. Only these fields will be returned.
         :returns: list of network dictionaries.
         """
-        LOG.debug(_("Get networks"))
+        LOG.debug("Get networks")
         nets = super(N1kvNeutronPluginV2, self).get_networks(context, filters,
                                                              None)
         for net in nets:
@@ -1126,7 +1127,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
 
         profile_id = self._process_policy_profile(context,
                                                   port['port'])
-        LOG.debug(_('Create port: profile_id=%s'), profile_id)
+        LOG.debug('Create port: profile_id=%s', profile_id)
         session = context.session
         with session.begin(subtransactions=True):
             pt = super(N1kvNeutronPluginV2, self).create_port(context,
@@ -1173,7 +1174,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
             with excutils.save_and_reraise_exception():
                 self._delete_port_db(context, pt, vm_network)
         else:
-            LOG.debug(_("Created port: %s"), pt)
+            LOG.debug("Created port: %s", pt)
             return pt
 
     def update_port(self, context, id, port):
@@ -1184,7 +1185,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param id: UUID representing the port to update
         :returns: updated port object
         """
-        LOG.debug(_("Update port: %s"), id)
+        LOG.debug("Update port: %s", id)
         with context.session.begin(subtransactions=True):
             updated_port = super(N1kvNeutronPluginV2,
                                  self).update_port(context, id, port)
@@ -1247,7 +1248,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                        dictionary. Only these fields will be returned.
         :returns: port dictionary
         """
-        LOG.debug(_("Get port: %s"), id)
+        LOG.debug("Get port: %s", id)
         port = super(N1kvNeutronPluginV2, self).get_port(context, id, None)
         self._extend_port_dict_profile(context, port)
         return self._fields(port, fields)
@@ -1267,7 +1268,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                         dictionary. Only these fields will be returned.
         :returns: list of port dictionaries
         """
-        LOG.debug(_("Get ports"))
+        LOG.debug("Get ports")
         ports = super(N1kvNeutronPluginV2, self).get_ports(context, filters,
                                                            None)
         for port in ports:
@@ -1283,7 +1284,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param subnet: subnet dictionary
         :returns: subnet object
         """
-        LOG.debug(_('Create subnet'))
+        LOG.debug('Create subnet')
         sub = super(N1kvNeutronPluginV2, self).create_subnet(context, subnet)
         try:
             self._send_create_subnet_request(context, sub)
@@ -1293,7 +1294,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                 super(N1kvNeutronPluginV2,
                       self).delete_subnet(context, sub['id'])
         else:
-            LOG.debug(_("Created subnet: %s"), sub['id'])
+            LOG.debug("Created subnet: %s", sub['id'])
             if not q_conf.CONF.network_auto_schedule:
                 # Schedule network to a DHCP agent
                 net = self.get_network(context, sub['network_id'])
@@ -1308,7 +1309,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param id: UUID representing subnet to update
         :returns: updated subnet object
         """
-        LOG.debug(_('Update subnet'))
+        LOG.debug('Update subnet')
         sub = super(N1kvNeutronPluginV2, self).update_subnet(context,
                                                              id,
                                                              subnet)
@@ -1323,7 +1324,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
         :param id: UUID representing subnet to delete
         :returns: deleted subnet object
         """
-        LOG.debug(_('Delete subnet: %s'), id)
+        LOG.debug('Delete subnet: %s', id)
         subnet = self.get_subnet(context, id)
         self._send_delete_subnet_request(context, subnet)
         return super(N1kvNeutronPluginV2, self).delete_subnet(context, id)
@@ -1338,7 +1339,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                         dictionary. Only these fields will be returned.
         :returns: subnet object
         """
-        LOG.debug(_("Get subnet: %s"), id)
+        LOG.debug("Get subnet: %s", id)
         subnet = super(N1kvNeutronPluginV2, self).get_subnet(context, id,
                                                              None)
         return self._fields(subnet, fields)
@@ -1358,7 +1359,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
                         dictionary. Only these fields will be returned.
         :returns: list of dictionaries of subnets
         """
-        LOG.debug(_("Get subnets"))
+        LOG.debug("Get subnets")
         subnets = super(N1kvNeutronPluginV2, self).get_subnets(context,
                                                                filters,
                                                                None)
index 7ea86136985b27ea2221720b16df0ebbe0aca71d..e60da7987ede326929c646c97925c00091d0d5dd 100644 (file)
@@ -84,7 +84,7 @@ class PluginV2(db_base_plugin_v2.NeutronDbPluginV2):
         # Extend the fault map
         self._extend_fault_map()
 
-        LOG.debug(_("Plugin initialization complete"))
+        LOG.debug("Plugin initialization complete")
 
     def __getattribute__(self, name):
         """Delegate core API calls to the model class.
@@ -129,44 +129,44 @@ class PluginV2(db_base_plugin_v2.NeutronDbPluginV2):
     """
     def get_all_qoss(self, tenant_id):
         """Get all QoS levels."""
-        LOG.debug(_("get_all_qoss() called"))
+        LOG.debug("get_all_qoss() called")
         qoslist = cdb.get_all_qoss(tenant_id)
         return qoslist
 
     def get_qos_details(self, tenant_id, qos_id):
         """Get QoS Details."""
-        LOG.debug(_("get_qos_details() called"))
+        LOG.debug("get_qos_details() called")
         return cdb.get_qos(tenant_id, qos_id)
 
     def create_qos(self, tenant_id, qos_name, qos_desc):
         """Create a QoS level."""
-        LOG.debug(_("create_qos() called"))
+        LOG.debug("create_qos() called")
         qos = cdb.add_qos(tenant_id, qos_name, str(qos_desc))
         return qos
 
     def delete_qos(self, tenant_id, qos_id):
         """Delete a QoS level."""
-        LOG.debug(_("delete_qos() called"))
+        LOG.debug("delete_qos() called")
         return cdb.remove_qos(tenant_id, qos_id)
 
     def rename_qos(self, tenant_id, qos_id, new_name):
         """Rename QoS level."""
-        LOG.debug(_("rename_qos() called"))
+        LOG.debug("rename_qos() called")
         return cdb.update_qos(tenant_id, qos_id, new_name)
 
     def get_all_credentials(self):
         """Get all credentials."""
-        LOG.debug(_("get_all_credentials() called"))
+        LOG.debug("get_all_credentials() called")
         credential_list = cdb.get_all_credentials()
         return credential_list
 
     def get_credential_details(self, credential_id):
         """Get a particular credential."""
-        LOG.debug(_("get_credential_details() called"))
+        LOG.debug("get_credential_details() called")
         return cdb.get_credential(credential_id)
 
     def rename_credential(self, credential_id, new_name, new_password):
         """Rename the particular credential resource."""
-        LOG.debug(_("rename_credential() called"))
+        LOG.debug("rename_credential() called")
         return cdb.update_credential(credential_id, new_name,
                                      new_password=new_password)
index e9df1a09022f855b6aa93c7a8e5de10c0be10cd4..f3db1313d87e6b2bfe278e5e30183c302979daa8 100644 (file)
@@ -19,7 +19,6 @@ import testtools
 from neutron.agent.common import config
 from neutron.common import config as base_config
 from neutron.common import constants as l3_constants
-from neutron.openstack.common import log as logging
 from neutron.openstack.common import uuidutils
 from neutron.plugins.cisco.cfg_agent import cfg_agent
 from neutron.tests import base
@@ -28,8 +27,6 @@ _uuid = uuidutils.generate_uuid
 HOSTNAME = 'myhost'
 FAKE_ID = _uuid()
 
-LOG = logging.getLogger(__name__)
-
 
 def prepare_router_data(enable_snat=None, num_internal_ports=1):
     router_id = _uuid()
index 49b1babc865a98917efb994ed86b737c60e607a9..aa3a60fb8e34836003c9fb7aa4bced541d6ad6e1 100644 (file)
@@ -16,7 +16,6 @@ import sys
 import datetime
 import mock
 
-from neutron.openstack.common import log as logging
 from neutron.openstack.common import uuidutils
 
 sys.modules['ncclient'] = mock.MagicMock()
@@ -25,7 +24,6 @@ from neutron.plugins.cisco.cfg_agent import device_status
 from neutron.tests import base
 
 _uuid = uuidutils.generate_uuid
-LOG = logging.getLogger(__name__)
 
 TYPE_STRING = 'string'
 TYPE_DATETIME = 'datetime'
index 44cb69f75d8dab2ca5bb0d1507fd582194efa8ea..9aae29b1a2f24c5e3aef19324f9059ed3040d394 100644 (file)
@@ -19,7 +19,6 @@ from oslo import messaging
 
 from neutron.common import config as base_config
 from neutron.common import constants as l3_constants
-from neutron.openstack.common import log as logging
 from neutron.openstack.common import uuidutils
 from neutron.plugins.cisco.cfg_agent import cfg_agent
 from neutron.plugins.cisco.cfg_agent import cfg_exceptions
@@ -36,8 +35,6 @@ _uuid = uuidutils.generate_uuid
 HOST = 'myhost'
 FAKE_ID = _uuid()
 
-LOG = logging.getLogger(__name__)
-
 
 def prepare_router_data(enable_snat=None, num_internal_ports=1):
     router_id = _uuid()
index 974da4e7ff2d95e90e48993b60c9db77d64ee0b4..1bcbc6a29a951fd372096f8bca03f97449b98a04 100644 (file)
@@ -19,6 +19,7 @@ from oslo.config import cfg
 from neutron import context as n_context
 from neutron import manager
 from neutron.openstack.common import excutils
+from neutron.openstack.common.gettextutils import _LE
 from neutron.openstack.common import log as logging
 from neutron.openstack.common import uuidutils
 from neutron.plugins.common import constants
@@ -95,8 +96,8 @@ class DeviceHandlingTestSupportMixin(object):
                 self._delete('ports', port['id'])
             except Exception as e:
                 with excutils.save_and_reraise_exception(reraise=False):
-                    LOG.error('Failed to delete port %(p_id)s for vm instance '
-                              '%(v_id)s due to %(err)s',
+                    LOG.error(_LE('Failed to delete port %(p_id)s for vm '
+                                  'instance %(v_id)s due to %(err)s'),
                               {'p_id': port['id'], 'v_id': vm_id, 'err': e})
                     raise nova_exc.InternalServerError()
 
index 1a4db29f4a14cfef5460a84c53bcce6dd72c9dd4..724e0f3500699db51db7e6c982f51071dc4ead3c 100755 (executable)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from neutron.openstack.common import log as logging
 from neutron.plugins.cisco.common import cisco_exceptions as c_exc
 from neutron.plugins.cisco.n1kv import n1kv_client
 
-LOG = logging.getLogger(__name__)
-
 _resource_metadata = {'port': ['id', 'macAddress', 'ipAddress', 'subnetId'],
                       'vmnetwork': ['name', 'networkSegmentId',
                                     'networkSegment', 'portProfile',