From: Mike Mason Date: Tue, 25 Nov 2014 15:57:24 +0000 (+0000) Subject: Implementing the use of _L’x’/i18n markers X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=f69c40ebb40714c15bcb983643b93a6ba2f5424c;p=openstack-build%2Fcinder-build.git Implementing the use of _L’x’/i18n markers Placing the _Lx markers back into the code. No other cleaner solution has has been implemented. Patches will be submitted in a series of sub directories and in a fashion that is manageable. Partial-Bug: #1384312 Change-Id: I3974b58bd9b8b9e3c34d5a609228e30c6a08a3c3 --- diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index b37e35c57..7def024a6 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -42,7 +42,7 @@ from sqlalchemy.sql import func from cinder.common import sqlalchemyutils from cinder.db.sqlalchemy import models from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LW from cinder.openstack.common import log as logging from cinder.openstack.common import uuidutils @@ -889,8 +889,8 @@ def quota_reserve(context, resources, quotas, deltas, expire, usages[resource].reserved += delta if unders: - LOG.warning(_("Change will make usage less than 0 for the following " - "resources: %s") % unders) + LOG.warning(_LW("Change will make usage less than 0 for the following " + "resources: %s") % unders) if overs: usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) for k, v in usages.items()) diff --git a/cinder/image/glance.py b/cinder/image/glance.py index efed24aac..66bf46ee0 100644 --- a/cinder/image/glance.py +++ b/cinder/image/glance.py @@ -33,7 +33,7 @@ from oslo.utils import timeutils import six.moves.urllib.parse as urlparse from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LW from cinder.openstack.common import log as logging @@ -130,7 +130,7 @@ class GlanceClientWrapper(object): self.version = version if CONF.glance_num_retries < 0: - LOG.warning(_( + LOG.warning(_LW( "glance_num_retries shouldn't be a negative value. " "The number of retries will be set to 0 until this is" "corrected in the cinder.conf.")) diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py index 521571ade..6abeaf2a5 100644 --- a/cinder/scheduler/filter_scheduler.py +++ b/cinder/scheduler/filter_scheduler.py @@ -23,7 +23,7 @@ Weighing Functions. from oslo.config import cfg from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LW from cinder.openstack.common import log as logging from cinder.scheduler import driver from cinder.scheduler import scheduler_options @@ -397,8 +397,8 @@ class FilterScheduler(driver.Scheduler): weighed_hosts = self._get_weighted_candidates(context, request_spec, filter_properties) if not weighed_hosts: - LOG.warning(_('No weighed hosts found for volume ' - 'with properties: %s'), + LOG.warning(_LW('No weighed hosts found for volume ' + 'with properties: %s'), filter_properties['request_spec']['volume_type']) return None return self._choose_top_host(weighed_hosts, request_spec) diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py index 0e64f20ce..ec7b551da 100644 --- a/cinder/scheduler/filters/capacity_filter.py +++ b/cinder/scheduler/filters/capacity_filter.py @@ -18,7 +18,7 @@ import math -from cinder.i18n import _ +from cinder.i18n import _LE, _LW from cinder.openstack.common import log as logging from cinder.openstack.common.scheduler import filters @@ -41,8 +41,8 @@ class CapacityFilter(filters.BaseHostFilter): if host_state.free_capacity_gb is None: # Fail Safe - LOG.error(_("Free capacity not set: " - "volume node info collection broken.")) + LOG.error(_LE("Free capacity not set: " + "volume node info collection broken.")) return False free_space = host_state.free_capacity_gb @@ -59,9 +59,9 @@ class CapacityFilter(filters.BaseHostFilter): "requested": volume_size, "available": free} if free < volume_size: - LOG.warning(_("Insufficient free space for volume creation " - "on host %(host)s (requested / avail): " - "%(requested)s/%(available)s") % msg_args) + LOG.warning(_LW("Insufficient free space for volume creation " + "on host %(host)s (requested / avail): " + "%(requested)s/%(available)s") % msg_args) else: LOG.debug("Sufficient free space for volume creation " "on host %(host)s (requested / avail): " diff --git a/cinder/scheduler/flows/create_volume.py b/cinder/scheduler/flows/create_volume.py index 4e9fcfcd3..b6f70305a 100644 --- a/cinder/scheduler/flows/create_volume.py +++ b/cinder/scheduler/flows/create_volume.py @@ -16,7 +16,7 @@ from taskflow.patterns import linear_flow from cinder import exception from cinder import flow_utils -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder import rpc from cinder import utils @@ -100,7 +100,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask): try: self._notify_failure(context, request_spec, cause) finally: - LOG.error(_("Failed to run task %(name)s: %(cause)s") % + LOG.error(_LE("Failed to run task %(name)s: %(cause)s") % {'cause': cause, 'name': self.name}) def _notify_failure(self, context, request_spec, cause): @@ -117,8 +117,8 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask): rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC, payload) except exception.CinderException: - LOG.exception(_("Failed notifying on %(topic)s " - "payload %(payload)s") % + LOG.exception(_LE("Failed notifying on %(topic)s " + "payload %(payload)s") % {'topic': self.FAILURE_TOPIC, 'payload': payload}) def execute(self, context, request_spec, filter_properties): diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index c7c7cdfee..9563ea172 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -24,7 +24,7 @@ from oslo.utils import timeutils from cinder import db from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LI from cinder.openstack.common import log as logging from cinder.openstack.common.scheduler import filters from cinder.openstack.common.scheduler import weights @@ -471,8 +471,8 @@ class HostManager(object): # remove non-active hosts from host_state_map nonactive_hosts = set(self.host_state_map.keys()) - active_hosts for host in nonactive_hosts: - LOG.info(_("Removing non-active host: %(host)s from " - "scheduler cache.") % {'host': host}) + LOG.info(_LI("Removing non-active host: %(host)s from " + "scheduler cache.") % {'host': host}) del self.host_state_map[host] # build a pool_state map and return that map instead of host_state_map diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py index 0aeddd6ff..5cd482af0 100644 --- a/cinder/scheduler/manager.py +++ b/cinder/scheduler/manager.py @@ -28,7 +28,7 @@ from cinder import context from cinder import db from cinder import exception from cinder import flow_utils -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder import manager from cinder.openstack.common import log as logging from cinder import quota @@ -105,8 +105,8 @@ class SchedulerManager(manager.Manager): {'status': 'error'}) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to create consistency group " - "%(group_id)s."), + LOG.exception(_LE("Failed to create consistency group " + "%(group_id)s."), {'group_id': group_id}) db.consistencygroup_update(context, group_id, {'status': 'error'}) @@ -124,7 +124,8 @@ class SchedulerManager(manager.Manager): snapshot_id, image_id) except Exception: - LOG.exception(_("Failed to create scheduler manager volume flow")) + LOG.exception(_LE("Failed to create scheduler " + "manager volume flow")) raise exception.CinderException( _("Failed to create scheduler manager volume flow")) diff --git a/cinder/scheduler/scheduler_options.py b/cinder/scheduler/scheduler_options.py index 970bb6068..a95df1382 100644 --- a/cinder/scheduler/scheduler_options.py +++ b/cinder/scheduler/scheduler_options.py @@ -27,7 +27,7 @@ import os from oslo.config import cfg from oslo.utils import timeutils -from cinder.i18n import _ +from cinder.i18n import _LE from cinder.openstack.common import log as logging @@ -66,8 +66,8 @@ class SchedulerOptions(object): try: return os.path.getmtime(filename) except os.error as e: - LOG.exception(_("Could not stat scheduler options file " - "%(filename)s: '%(e)s'"), + LOG.exception(_LE("Could not stat scheduler options file " + "%(filename)s: '%(e)s'"), {'filename': filename, 'e': e}) raise @@ -76,7 +76,7 @@ class SchedulerOptions(object): try: return json.load(handle) except ValueError as e: - LOG.exception(_("Could not decode scheduler options: '%s'") % e) + LOG.exception(_LE("Could not decode scheduler options: '%s'") % e) return {} def _get_time_now(self): diff --git a/cinder/transfer/api.py b/cinder/transfer/api.py index 7065f3312..f6ff3de97 100644 --- a/cinder/transfer/api.py +++ b/cinder/transfer/api.py @@ -27,7 +27,7 @@ from oslo.utils import excutils from cinder.db import base from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE, _LI from cinder.openstack.common import log as logging from cinder import quota from cinder.volume import api as volume_api @@ -184,8 +184,8 @@ class API(base.Base): gigabytes=-vol_ref['size']) except Exception: donor_reservations = None - LOG.exception(_("Failed to update quota donating volume" - " transfer id %s") % transfer_id) + LOG.exception(_LE("Failed to update quota donating volume" + " transfer id %s") % transfer_id) try: # Transfer ownership of the volume now, must use an elevated @@ -201,7 +201,7 @@ class API(base.Base): QUOTAS.commit(context, reservations) if donor_reservations: QUOTAS.commit(context, donor_reservations, project_id=donor_id) - LOG.info(_("Volume %s has been transferred.") % volume_id) + LOG.info(_LI("Volume %s has been transferred.") % volume_id) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py b/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py index 7e00e0823..26c5a6d94 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py @@ -22,7 +22,7 @@ from oslo.utils import excutils import paramiko from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder import utils from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts @@ -140,8 +140,8 @@ class BrcdFCSanLookupService(FCSanLookupService): nsinfo = self.get_nameserver_info() except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed collecting name server info from " - "fabric %s") % fabric_ip) + LOG.error(_LE("Failed collecting name server info from" + " fabric %s") % fabric_ip) except Exception as e: msg = _("SSH connection failed " "for %(fabric)s with error: %(err)s" @@ -199,14 +199,14 @@ class BrcdFCSanLookupService(FCSanLookupService): cli_output = self._get_switch_data(ZoneConstant.NS_SHOW) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed collecting nsshow info for fabric")) + LOG.error(_LE("Failed collecting nsshow info for fabric")) if cli_output: nsinfo_list = self._parse_ns_output(cli_output) try: cli_output = self._get_switch_data(ZoneConstant.NS_CAM_SHOW) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed collecting nscamshow")) + LOG.error(_LE("Failed collecting nscamshow")) if cli_output: nsinfo_list.extend(self._parse_ns_output(cli_output)) LOG.debug("Connector returning nsinfo-%s", nsinfo_list) diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py index fc9122f29..ecdded689 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py @@ -29,7 +29,7 @@ from oslo.concurrency import processutils from oslo.utils import excutils from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder import ssh_utils from cinder import utils @@ -79,8 +79,8 @@ class BrcdFCZoneClientCLI(object): [ZoneConstant.GET_ACTIVE_ZONE_CFG]) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed getting active zone set " - "from fabric %s"), self.switch_ip) + LOG.error(_LE("Failed getting active zone set " + "from fabric %s"), self.switch_ip) try: for line in switch_data: line_split = re.split('\\t', line) @@ -148,7 +148,7 @@ class BrcdFCZoneClientCLI(object): self.delete_zones(zone, activate, active_zone_set) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_("Deleting zone failed %s"), zone) + LOG.error(_LE("Deleting zone failed %s"), zone) LOG.debug("Deleted Zone before insert : %s", zone) zone_members_with_sep = ';'.join(str(member) for member in zones[zone]) @@ -257,8 +257,8 @@ class BrcdFCZoneClientCLI(object): cli_output = self._get_switch_info([cmd]) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed collecting nsshow " - "info for fabric %s"), self.switch_ip) + LOG.error(_LE("Failed collecting nsshow " + "info for fabric %s"), self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) cli_output = None @@ -329,7 +329,7 @@ class BrcdFCZoneClientCLI(object): firmware = int(ver[0] + ver[1]) return firmware > 63 else: - LOG.error(_("No CLI output for firmware version check")) + LOG.error(_LE("No CLI output for firmware version check")) return False except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " @@ -414,7 +414,7 @@ class BrcdFCZoneClientCLI(object): cmd=command) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_("Error running SSH command: %s") % command) + LOG.error(_LE("Error running SSH command: %s") % command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. @@ -479,7 +479,7 @@ class BrcdFCZoneClientCLI(object): cmd=command) except Exception as e: with excutils.save_and_reraise_exception(): - LOG.error(_("Error executing command via ssh: %s"), e) + LOG.error(_LE("Error executing command via ssh: %s"), e) finally: if stdin: stdin.flush() diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py index 6740433fb..41d8319eb 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py @@ -36,7 +36,7 @@ from oslo.utils import excutils from oslo.utils import importutils from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE, _LI from cinder.openstack.common import log as logging from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts from cinder.zonemanager.drivers.fc_zone_driver import FCZoneDriver @@ -134,15 +134,15 @@ class BrcdFCZoneDriver(FCZoneDriver): :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Add connection for Fabric:%s", fabric) - LOG.info(_("BrcdFCZoneDriver - Add connection " - "for I-T map: %s"), initiator_target_map) + LOG.info(_LI("BrcdFCZoneDriver - Add connection " + "for I-T map: %s"), initiator_target_map) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab - LOG.info(_("Zoning policy for Fabric %s"), zoning_policy) + LOG.info(_LI("Zoning policy for Fabric %s"), zoning_policy) cli_client = self._get_cli_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(cli_client) @@ -169,8 +169,8 @@ class BrcdFCZoneDriver(FCZoneDriver): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone already exists. - LOG.info(_("Zone exists in I-T mode. " - "Skipping zone creation %s"), zone_name) + LOG.info(_LI("Zone exists in I-T mode. " + "Skipping zone creation %s"), zone_name) elif zoning_policy == 'initiator': zone_members = [self.get_formatted_wwn(initiator)] for t in t_list: @@ -192,7 +192,7 @@ class BrcdFCZoneDriver(FCZoneDriver): LOG.error(msg) raise exception.FCZoneDriverException(msg) - LOG.info(_("Zone map to add: %s"), zone_map) + LOG.info(_LI("Zone map to add: %s"), zone_map) if len(zone_map) > 0: try: @@ -220,7 +220,7 @@ class BrcdFCZoneDriver(FCZoneDriver): :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric:%s", fabric) - LOG.info(_("BrcdFCZoneDriver - Delete connection for I-T map: %s"), + LOG.info(_LI("BrcdFCZoneDriver - Delete connection for I-T map: %s"), initiator_target_map) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( @@ -228,7 +228,7 @@ class BrcdFCZoneDriver(FCZoneDriver): if zoning_policy_fab: zoning_policy = zoning_policy_fab - LOG.info(_("Zoning policy for fabric %s"), zoning_policy) + LOG.info(_LI("Zoning policy for fabric %s"), zoning_policy) conn = self._get_cli_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(conn) @@ -292,8 +292,8 @@ class BrcdFCZoneDriver(FCZoneDriver): else: zones_to_delete.append(zone_name) else: - LOG.info(_("Zoning Policy: %s, not " - "recognized"), zoning_policy) + LOG.info(_LI("Zoning Policy: %s, not " + "recognized"), zoning_policy) LOG.debug("Final Zone map to update: %s", zone_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) try: @@ -360,8 +360,8 @@ class BrcdFCZoneDriver(FCZoneDriver): LOG.error(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): - LOG.error(_("Error getting name server " - "info: %s"), ex) + LOG.error(_LE("Error getting name server " + "info: %s"), ex) except Exception as e: msg = (_("Failed to get name server info:%s") % e) LOG.error(msg) @@ -371,7 +371,7 @@ class BrcdFCZoneDriver(FCZoneDriver): nsinfo) if visible_targets: - LOG.info(_("Filtered targets for SAN is: %s"), + LOG.info(_LI("Filtered targets for SAN is: %s"), {fabric_name: visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py index 419489030..48765ba2b 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py @@ -23,7 +23,7 @@ from oslo.utils import excutils import six from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder import ssh_utils from cinder import utils @@ -181,8 +181,8 @@ class CiscoFCSanLookupService(FCSanLookupService): cli_output = self._get_switch_info(cmd) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed collecting show fcns database for" - " fabric")) + LOG.error(_LE("Failed collecting show fcns database for" + " fabric")) if cli_output: nsinfo_list = self._parse_ns_output(cli_output) @@ -266,7 +266,7 @@ class CiscoFCSanLookupService(FCSanLookupService): cmd=command) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_("Error running SSH command: %s") % command) + LOG.error(_LE("Error running SSH command: %s") % command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py index 133485fa6..879a43e79 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py @@ -27,7 +27,7 @@ from oslo.utils import excutils import six from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE, _LI from cinder.openstack.common import log as logging from cinder import ssh_utils from cinder import utils @@ -87,8 +87,8 @@ class CiscoFCZoneClientCLI(object): ' | no-more']) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed getting active zone set " - "from fabric %s"), self.switch_ip) + LOG.error(_LE("Failed getting active zone set " + "from fabric %s"), self.switch_ip) try: for line in switch_data: # Split on non-word characters, @@ -173,7 +173,7 @@ class CiscoFCZoneClientCLI(object): active_zone_set, zone_status) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_("Deleting zone failed %s"), zone) + LOG.error(_LE("Deleting zone failed %s"), zone) LOG.debug("Deleted Zone before insert : %s", zone) zone_cmds.append(['zone', 'name', zone]) @@ -222,8 +222,8 @@ class CiscoFCZoneClientCLI(object): [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed getting zone status " - "from fabric %s"), self.switch_ip) + LOG.error(_LE("Failed getting zone status " + "from fabric %s"), self.switch_ip) try: for line in switch_data: # Split on non-word characters, @@ -303,13 +303,13 @@ class CiscoFCZoneClientCLI(object): self.fabric_vsan]) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_("Failed collecting fcns database " - "info for fabric %s"), self.switch_ip) + LOG.error(_LE("Failed collecting fcns database " + "info for fabric %s"), self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) - LOG.info(_("Connector returning fcnsinfo-%s"), return_list) + LOG.info(_LI("Connector returning fcnsinfo-%s"), return_list) return return_list @@ -394,7 +394,7 @@ class CiscoFCZoneClientCLI(object): cmd=command) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_("Error running SSH command: %s") % command) + LOG.error(_LE("Error running SSH command: %s") % command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py index 04791c5ea..676cf98e6 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py @@ -34,7 +34,7 @@ from oslo.utils import importutils import six from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE, _LI from cinder.openstack.common import log as logging from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts from cinder.zonemanager.drivers.fc_zone_driver import FCZoneDriver @@ -123,8 +123,8 @@ class CiscoFCZoneDriver(FCZoneDriver): """ LOG.debug("Add connection for Fabric:%s", fabric) - LOG.info(_("CiscoFCZoneDriver - Add connection " - "for I-T map: %s"), initiator_target_map) + LOG.info(_LI("CiscoFCZoneDriver - Add connection " + "for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( @@ -141,7 +141,7 @@ class CiscoFCZoneDriver(FCZoneDriver): zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') - LOG.info(_("Zoning policy for Fabric %s"), zoning_policy) + LOG.info(_LI("Zoning policy for Fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) @@ -173,8 +173,8 @@ class CiscoFCZoneDriver(FCZoneDriver): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. - LOG.info(_("Zone exists in I-T mode. " - "Skipping zone creation %s"), + LOG.info(_LI("Zone exists in I-T mode. " + "Skipping zone creation %s"), zone_name) elif zoning_policy == 'initiator': zone_members = [get_formatted_wwn(initiator)] @@ -196,7 +196,7 @@ class CiscoFCZoneDriver(FCZoneDriver): LOG.error(msg) raise exception.FCZoneDriverException(msg) - LOG.info(_("Zone map to add: %s"), zone_map) + LOG.info(_LI("Zone map to add: %s"), zone_map) if len(zone_map) > 0: conn = None @@ -237,7 +237,7 @@ class CiscoFCZoneDriver(FCZoneDriver): :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric:%s", fabric) - LOG.info(_("CiscoFCZoneDriver - Delete connection for I-T map: %s"), + LOG.info(_LI("CiscoFCZoneDriver - Delete connection for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') @@ -256,7 +256,7 @@ class CiscoFCZoneDriver(FCZoneDriver): zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') - LOG.info(_("Zoning policy for fabric %s"), zoning_policy) + LOG.info(_LI("Zoning policy for fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) @@ -327,7 +327,7 @@ class CiscoFCZoneDriver(FCZoneDriver): else: zones_to_delete.append(zone_name) else: - LOG.info(_("Zoning Policy: %s, not recognized"), + LOG.info(_LI("Zoning Policy: %s, not recognized"), zoning_policy) LOG.debug("Final Zone map to update: %s", zone_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) @@ -418,8 +418,8 @@ class CiscoFCZoneDriver(FCZoneDriver): conn.cleanup() except exception.CiscoZoningCliException as ex: with excutils.save_and_reraise_exception(): - LOG.error(_("Error getting show fcns database " - "info: %s"), six.text_type(ex)) + LOG.error(_LE("Error getting show fcns database " + "info: %s"), six.text_type(ex)) except Exception as e: msg = (_("Failed to get show fcns database info:%s") % six.text_type(e)) @@ -429,7 +429,7 @@ class CiscoFCZoneDriver(FCZoneDriver): lambda x: x in formatted_target_list, nsinfo) if visible_targets: - LOG.info(_("Filtered targets for SAN is: %s"), + LOG.info(_LI("Filtered targets for SAN is: %s"), {fabric_name: visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): diff --git a/cinder/zonemanager/fc_zone_manager.py b/cinder/zonemanager/fc_zone_manager.py index 42868da6e..46400a1ee 100644 --- a/cinder/zonemanager/fc_zone_manager.py +++ b/cinder/zonemanager/fc_zone_manager.py @@ -35,7 +35,7 @@ from oslo.config import cfg from oslo.utils import importutils from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LI from cinder.openstack.common import log as logging from cinder.volume import configuration as config from cinder.zonemanager import fc_common @@ -142,14 +142,14 @@ class ZoneManager(fc_common.FCCommon): i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, True) - LOG.info(_("Final filtered map for fabric: %s"), + LOG.info(_LI("Final filtered map for fabric: %s"), {fabric: valid_i_t_map}) # Call driver to add connection control self.driver.add_connection(fabric, valid_i_t_map) - LOG.info(_("Add Connection: Finished iterating " - "over all target list")) + LOG.info(_LI("Add Connection: Finished iterating " + "over all target list")) except Exception as e: msg = _("Failed adding connection for fabric=%(fabric)s: " "Error:%(err)s") % {'fabric': connected_fabric, @@ -172,7 +172,7 @@ class ZoneManager(fc_common.FCCommon): try: for initiator in initiator_target_map.keys(): target_list = initiator_target_map[initiator] - LOG.info(_("Delete connection Target List:%s"), + LOG.info(_LI("Delete connection Target List:%s"), {initiator: target_list}) # get SAN context for the target list @@ -188,8 +188,8 @@ class ZoneManager(fc_common.FCCommon): i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, False) - LOG.info(_("Final filtered map for delete " - "connection: %s"), valid_i_t_map) + LOG.info(_LI("Final filtered map for delete " + "connection: %s"), valid_i_t_map) # Call driver to delete connection control if len(valid_i_t_map) > 0: @@ -239,6 +239,6 @@ class ZoneManager(fc_common.FCCommon): if t_list: filtered_i_t_map[initiator] = t_list else: - LOG.info(_("No targets to add or remove connection for " - "I: %s"), initiator) + LOG.info(_LI("No targets to add or remove connection for " + "I: %s"), initiator) return filtered_i_t_map diff --git a/cinder/zonemanager/utils.py b/cinder/zonemanager/utils.py index ea12918d2..1dc8c4d36 100644 --- a/cinder/zonemanager/utils.py +++ b/cinder/zonemanager/utils.py @@ -19,7 +19,7 @@ Utility functions related to the Zone Manager. """ import logging -from cinder.i18n import _ +from cinder.i18n import _, _LI from cinder.openstack.common import log from cinder.volume.configuration import Configuration from cinder.volume import manager @@ -37,8 +37,8 @@ def create_zone_manager(): if config.safe_get('zoning_mode') == 'fabric': LOG.debug("FC Zone Manager enabled.") zm = fc_zone_manager.ZoneManager(configuration=config) - LOG.info(_("Using FC Zone Manager %(zm_version)s," - " Driver %(drv_name)s %(drv_version)s.") % + LOG.info(_LI("Using FC Zone Manager %(zm_version)s," + " Driver %(drv_name)s %(drv_version)s.") % {'zm_version': zm.get_version(), 'drv_name': zm.driver.__class__.__name__, 'drv_version': zm.driver.get_version()}) @@ -54,7 +54,7 @@ def create_lookup_service(): if config.safe_get('zoning_mode') == 'fabric': LOG.debug("FC Lookup Service enabled.") lookup = fc_san_lookup_service.FCSanLookupService(configuration=config) - LOG.info(_("Using FC lookup service %s") % lookup.lookup_service) + LOG.info(_LI("Using FC lookup service %s") % lookup.lookup_service) return lookup else: LOG.debug("FC Lookup Service not enabled in cinder.conf.")