]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Update i18n translation for neutron.server/scheduler log msg's
authorGary Kotton <gkotton@vmware.com>
Tue, 11 Nov 2014 13:43:55 +0000 (05:43 -0800)
committerGary Kotton <gkotton@vmware.com>
Thu, 20 Nov 2014 07:25:48 +0000 (23:25 -0800)
Validate that hacking rules apply to directories:
- neutron/scheduler
- neutron/server

Change-Id: I1353f5c8b01f85e4995ee1fb23a066506ed98e3f
Partial-bug: #1320867

neutron/hacking/checks.py
neutron/scheduler/dhcp_agent_scheduler.py
neutron/scheduler/l3_agent_scheduler.py
neutron/server/__init__.py

index 6aefc8ca6d9ce3bdcee219aadd35fbe3ea308674..37afc2d8a31d48d8c994fcfbff75ecc3e7f49d31 100644 (file)
@@ -54,7 +54,9 @@ def _directory_to_check_translation(filename):
             "neutron/hacking",
             "neutron/locale",
             "neutron/notifiers",
-            "neutron/openstack"]
+            "neutron/openstack",
+            "neutron/scheduler",
+            "neutron/server"]
     return any([dir in filename for dir in dirs])
 
 
index f29b823cb650de50f40e3d1914ec9713be7f0959..935810846299df837bf2de3778a29494786bdd31 100644 (file)
@@ -22,6 +22,7 @@ from sqlalchemy import sql
 from neutron.common import constants
 from neutron.db import agents_db
 from neutron.db import agentschedulers_db
+from neutron.openstack.common.gettextutils import _LI, _LW
 from neutron.openstack.common import log as logging
 
 
@@ -48,9 +49,9 @@ class ChanceScheduler(object):
             except db_exc.DBDuplicateEntry:
                 # it's totally ok, someone just did our job!
                 context.session.rollback()
-                LOG.info(_('Agent %s already present'), agent)
-            LOG.debug(_('Network %(network_id)s is scheduled to be '
-                        'hosted by DHCP agent %(agent_id)s'),
+                LOG.info(_LI('Agent %s already present'), agent)
+            LOG.debug('Network %(network_id)s is scheduled to be '
+                      'hosted by DHCP agent %(agent_id)s',
                       {'network_id': network_id,
                        'agent_id': agent})
 
@@ -67,7 +68,7 @@ class ChanceScheduler(object):
             dhcp_agents = plugin.get_dhcp_agents_hosting_networks(
                 context, [network['id']], active=True)
             if len(dhcp_agents) >= agents_per_network:
-                LOG.debug(_('Network %s is hosted already'),
+                LOG.debug('Network %s is hosted already',
                           network['id'])
                 return
             n_agents = agents_per_network - len(dhcp_agents)
@@ -76,7 +77,7 @@ class ChanceScheduler(object):
                     'agent_type': [constants.AGENT_TYPE_DHCP],
                     'admin_state_up': [True]})
             if not enabled_dhcp_agents:
-                LOG.warn(_('No more DHCP agents'))
+                LOG.warn(_LW('No more DHCP agents'))
                 return
             active_dhcp_agents = [
                 agent for agent in set(enabled_dhcp_agents)
@@ -85,7 +86,7 @@ class ChanceScheduler(object):
                 and agent not in dhcp_agents
             ]
             if not active_dhcp_agents:
-                LOG.warn(_('No more DHCP agents'))
+                LOG.warn(_LW('No more DHCP agents'))
                 return
             n_agents = min(len(active_dhcp_agents), n_agents)
             chosen_agents = random.sample(active_dhcp_agents, n_agents)
@@ -105,7 +106,7 @@ class ChanceScheduler(object):
             net_ids = set(s['network_id'] for s in subnets
                           if s['enable_dhcp'])
             if not net_ids:
-                LOG.debug(_('No non-hosted networks'))
+                LOG.debug('No non-hosted networks')
                 return False
             query = context.session.query(agents_db.Agent)
             query = query.filter(agents_db.Agent.agent_type ==
@@ -116,7 +117,7 @@ class ChanceScheduler(object):
             for dhcp_agent in dhcp_agents:
                 if agents_db.AgentDbMixin.is_agent_down(
                     dhcp_agent.heartbeat_timestamp):
-                    LOG.warn(_('DHCP agent %s is not active'), dhcp_agent.id)
+                    LOG.warn(_LW('DHCP agent %s is not active'), dhcp_agent.id)
                     continue
                 for net_id in net_ids:
                     agents = plugin.get_dhcp_agents_hosting_networks(
index e86a9c2fccf7a4450efa775a5bf8fe41fe893cef..52e9bba7ddaf3f8dee5abc7b4bb0e1472605606b 100644 (file)
@@ -27,7 +27,7 @@ from neutron.common import utils
 from neutron.db import l3_agentschedulers_db
 from neutron.db import l3_db
 from neutron.db import l3_hamode_db
-from neutron.openstack.common.gettextutils import _LE
+from neutron.openstack.common.gettextutils import _LE, _LW
 from neutron.openstack.common import log as logging
 
 
@@ -67,8 +67,8 @@ class L3Scheduler(object):
             l3_agents = plugin.get_l3_agents_hosting_routers(
                 context, [router['id']], admin_state_up=True)
             if l3_agents:
-                LOG.debug(('Router %(router_id)s has already been '
-                           'hosted by L3 agent %(agent_id)s'),
+                LOG.debug('Router %(router_id)s has already been '
+                          'hosted by L3 agent %(agent_id)s',
                           {'router_id': router['id'],
                            'agent_id': l3_agents[0]['id']})
             else:
@@ -153,8 +153,8 @@ class L3Scheduler(object):
         target_routers = self.get_routers_can_schedule(
             context, plugin, unscheduled_routers, l3_agent)
         if not target_routers:
-            LOG.warn(_('No routers compatible with L3 agent configuration'
-                       ' on host %s'), host)
+            LOG.warn(_LW('No routers compatible with L3 agent configuration'
+                         ' on host %s'), host)
             return False
 
         self.bind_routers(context, plugin, target_routers, l3_agent)
@@ -170,15 +170,15 @@ class L3Scheduler(object):
             l3_agents = plugin.get_l3_agents_hosting_routers(
                 context, [sync_router['id']], admin_state_up=True)
             if l3_agents and not sync_router.get('distributed', False):
-                LOG.debug(_('Router %(router_id)s has already been hosted'
-                            ' by L3 agent %(agent_id)s'),
+                LOG.debug('Router %(router_id)s has already been hosted'
+                          ' by L3 agent %(agent_id)s',
                           {'router_id': sync_router['id'],
                            'agent_id': l3_agents[0]['id']})
                 return
 
             active_l3_agents = plugin.get_l3_agents(context, active=True)
             if not active_l3_agents:
-                LOG.warn(_('No active L3 agents'))
+                LOG.warn(_LW('No active L3 agents'))
                 return
             new_l3agents = plugin.get_l3_agent_candidates(context,
                                                           sync_router,
@@ -190,7 +190,7 @@ class L3Scheduler(object):
             else:
                 candidates = new_l3agents
                 if not candidates:
-                    LOG.warn(_('No L3 agents can host the router %s'),
+                    LOG.warn(_LW('No L3 agents can host the router %s'),
                              sync_router['id'])
 
             return candidates
index eb34ad8510b8a5bd489d53dedc97e42d4e5cacf4..ea598d5e77b0db402816ba78d1f0cd323287a829 100755 (executable)
@@ -29,6 +29,7 @@ from neutron.common import config
 from neutron import service
 
 from neutron.openstack.common import gettextutils
+from neutron.openstack.common.gettextutils import _LI
 from neutron.openstack.common import log as logging
 gettextutils.install('neutron', lazy=True)
 
@@ -51,7 +52,8 @@ def main():
         try:
             neutron_rpc = service.serve_rpc()
         except NotImplementedError:
-            LOG.info(_("RPC was already started in parent process by plugin."))
+            LOG.info(_LI("RPC was already started in parent process by "
+                         "plugin."))
         else:
             rpc_thread = pool.spawn(neutron_rpc.wait)