From: Mike Mason Date: Tue, 11 Nov 2014 09:11:29 +0000 (+0000) Subject: Implementing the use of _L’x’/i18n markers X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=1fd4c55c85549ef066597b69c05c15dbb975405a;p=openstack-build%2Fcinder-build.git Implementing the use of _L’x’/i18n markers Placing the _Lx markers back into the code. No other cleaner solution has has been implemented. Patches will be submitted in a series of sub directories and in a fashion that is manageable. This is the fourth commit of this kind Change-Id: Ibbef7f06a391e9e6efca082d45caecdf60a9e811 Partial-Bug: #1384312 --- diff --git a/cinder/volume/api.py b/cinder/volume/api.py index fd7279a61..0abf67eea 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -32,7 +32,7 @@ from cinder import context from cinder.db import base from cinder import exception from cinder import flow_utils -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.image import glance from cinder import keymgr from cinder.openstack.common import log as logging @@ -237,7 +237,7 @@ class API(base.Base): availability_zones, create_what) except Exception: - LOG.exception(_("Failed to create api volume flow")) + LOG.exception(_LE("Failed to create api volume flow")) raise exception.CinderException( _("Failed to create api volume flow")) @@ -271,7 +271,8 @@ class API(base.Base): **reserve_opts) except Exception: reservations = None - LOG.exception(_("Failed to update quota for deleting volume")) + LOG.exception(_LE("Failed to update quota for " + "deleting volume")) self.db.volume_destroy(context.elevated(), volume_id) if reservations: diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index 6739dc1d5..4b8af246f 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -27,7 +27,7 @@ import six from cinder.db.sqlalchemy import api from cinder.db.sqlalchemy import models from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder import utils from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib @@ -718,7 +718,7 @@ class HBSDCommon(object): total_gb, free_gb = self.command.comm_get_dp_pool( self.configuration.hitachi_pool_id) except Exception as ex: - LOG.error(_('Failed to update volume status: %s') % + LOG.error(_LE('Failed to update volume status: %s') % six.text_type(ex)) return None diff --git a/cinder/volume/drivers/hitachi/hbsd_horcm.py b/cinder/volume/drivers/hitachi/hbsd_horcm.py index 17ba43ceb..108735a81 100644 --- a/cinder/volume/drivers/hitachi/hbsd_horcm.py +++ b/cinder/volume/drivers/hitachi/hbsd_horcm.py @@ -26,7 +26,7 @@ from oslo.utils import excutils import six from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder import utils @@ -214,7 +214,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib): raise loopingcall.LoopingCallDone() if self.shutdown_horcm(inst): - LOG.error(_("Failed to shutdown horcm.")) + LOG.error(_LE("Failed to shutdown horcm.")) raise loopingcall.LoopingCallDone() @horcm_synchronized @@ -275,14 +275,14 @@ class HBSDHORCM(basic_lib.HBSDBasicLib): raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if time.time() - start >= EXEC_MAX_WAITTIME: - LOG.error(_("horcm command timeout.")) + LOG.error(_LE("horcm command timeout.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if (ret == EX_ENAUTH and not re.search("-login %s %s" % (user, passwd), args)): _ret, _stdout, _stderr = self.comm_login() if _ret: - LOG.error(_("Failed to authenticate user.")) + LOG.error(_LE("Failed to authenticate user.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) elif ret in HORCM_ERROR: @@ -291,11 +291,11 @@ class HBSDHORCM(basic_lib.HBSDBasicLib): if self.check_horcm(inst) != HORCM_RUNNING: _ret, _stdout, _stderr = self.start_horcm(inst) if _ret and _ret != HORCM_RUNNING: - LOG.error(_("Failed to start horcm.")) + LOG.error(_LE("Failed to start horcm.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) elif ret not in COMMAND_IO_TO_RAID: - LOG.error(_("Unexpected error occurs in horcm.")) + LOG.error(_LE("Unexpected error occurs in horcm.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) def exec_raidcom(self, cmd, args, printflag=True): diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index 8f2d3ccf4..1b1246f86 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -24,7 +24,7 @@ from oslo.config import cfg import six from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _LE from cinder.openstack.common import log as logging from cinder import utils import cinder.volume.driver @@ -186,7 +186,7 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver): % {'port': port, 'gid': gid}) break if gid is None: - LOG.error(_('Failed to add target(port: %s)') % port) + LOG.error(_LE('Failed to add target(port: %s)') % port) continue try: if added_hostgroup: diff --git a/cinder/volume/drivers/hitachi/hbsd_snm2.py b/cinder/volume/drivers/hitachi/hbsd_snm2.py index 417918252..58426bac3 100644 --- a/cinder/volume/drivers/hitachi/hbsd_snm2.py +++ b/cinder/volume/drivers/hitachi/hbsd_snm2.py @@ -21,7 +21,7 @@ import time import six from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder import utils @@ -72,7 +72,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib): raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if time.time() - start >= timeout: - LOG.error(_("snm2 command timeout.")) + LOG.error(_LE("snm2 command timeout.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if (re.search('DMEC002047', stderr) @@ -86,7 +86,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib): or re.search('DMER0800CF', stderr) or re.search('DMER0800D[0-6D]', stderr) or re.search('DMES052602', stderr)): - LOG.error(_("Unexpected error occurs in snm2.")) + LOG.error(_LE("Unexpected error occurs in snm2.")) raise loopingcall.LoopingCallDone((ret, stdout, stderr)) def exec_hsnm(self, command, args, printflag=True, noretry=False, diff --git a/cinder/volume/drivers/huawei/huawei_utils.py b/cinder/volume/drivers/huawei/huawei_utils.py index 9c6b9e5cf..be3a38406 100644 --- a/cinder/volume/drivers/huawei/huawei_utils.py +++ b/cinder/volume/drivers/huawei/huawei_utils.py @@ -16,7 +16,7 @@ from xml.etree import ElementTree as ET -from cinder.i18n import _ +from cinder.i18n import _LE from cinder.openstack.common import log as logging LOG = logging.getLogger(__name__) @@ -38,7 +38,7 @@ def parse_xml_file(filepath): root = tree.getroot() return root except IOError as err: - LOG.error(_('parse_xml_file: %s') % err) + LOG.error(_LE('parse_xml_file: %s') % err) raise err diff --git a/cinder/volume/drivers/huawei/rest_common.py b/cinder/volume/drivers/huawei/rest_common.py index d7e56f83d..93a8a7a74 100644 --- a/cinder/volume/drivers/huawei/rest_common.py +++ b/cinder/volume/drivers/huawei/rest_common.py @@ -28,7 +28,7 @@ from oslo.utils import units from cinder import context from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder import utils from cinder.volume.drivers.huawei import huawei_utils @@ -80,7 +80,7 @@ class HVSCommon(): try: res_json = json.loads(res) except Exception as err: - LOG.error(_('JSON transfer error')) + LOG.error(_LE('JSON transfer error')) raise err return res_json diff --git a/cinder/volume/drivers/huawei/ssh_common.py b/cinder/volume/drivers/huawei/ssh_common.py index 0cf026f96..3e970d0e9 100644 --- a/cinder/volume/drivers/huawei/ssh_common.py +++ b/cinder/volume/drivers/huawei/ssh_common.py @@ -30,7 +30,7 @@ from oslo.utils import excutils from cinder import context from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE, _LI from cinder.openstack.common import log as logging from cinder import ssh_utils from cinder import utils @@ -168,7 +168,7 @@ class TseriesCommon(): try: tree.write(filename, 'UTF-8') except Exception as err: - LOG.info(_('_get_login_info: %s') % err) + LOG.info(_LI('_get_login_info: %s') % err) return logininfo @@ -464,9 +464,9 @@ class TseriesCommon(): self.ssh_pool.ip = ip1 old_ip = ip0 - LOG.info(_('_execute_cli: Can not connect to IP ' - '%(old)s, try to connect to the other ' - 'IP %(new)s.') + LOG.info(_LI('_execute_cli: Can not connect to IP ' + '%(old)s, try to connect to the other ' + 'IP %(new)s.') % {'old': old_ip, 'new': self.ssh_pool.ip}) if not ssh_client: @@ -499,7 +499,7 @@ class TseriesCommon(): else: if ssh_client: self.ssh_pool.remove(ssh_client) - LOG.error(_('_execute_cli: %s') % err) + LOG.error(_LE('_execute_cli: %s') % err) raise err def _reset_transport_timeout(self, ssh, time): @@ -931,7 +931,7 @@ class TseriesCommon(): """Map a volume to a host.""" # Map a LUN to a host if not mapped. if not self._check_volume_created(volume_id): - LOG.error(_('map_volume: Volume %s was not found.') % volume_id) + LOG.error(_LE('map_volume: Volume %s was not found.') % volume_id) raise exception.VolumeNotFound(volume_id=volume_id) hostlun_id = None @@ -1100,11 +1100,12 @@ class TseriesCommon(): host_name = HOST_NAME_PREFIX + host_name host_id = self._get_host_id(host_name, self.hostgroup_id) if host_id is None: - LOG.error(_('remove_map: Host %s does not exist.') % host_name) + LOG.error(_LE('remove_map: Host %s does ' + 'not exist.') % host_name) raise exception.HostNotFound(host=host_name) if not self._check_volume_created(volume_id): - LOG.error(_('remove_map: Volume %s does not exist.') % volume_id) + LOG.error(_LE('remove_map: Volume %s does not exist.') % volume_id) raise exception.VolumeNotFound(volume_id=volume_id) map_id = None @@ -1309,8 +1310,8 @@ class DoradoCommon(TseriesCommon): elif re.search('Dorado5100$', line): return 'Dorado5100' else: - LOG.error(_('_get_device_type: The driver only supports ' - 'Dorado5100 and Dorado 2100 G2 now.')) + LOG.error(_LE('_get_device_type: The driver only supports ' + 'Dorado5100 and Dorado 2100 G2 now.')) raise exception.InvalidResults() def _get_lun_distribution_info(self, luns): diff --git a/cinder/volume/drivers/ibm/gpfs.py b/cinder/volume/drivers/ibm/gpfs.py index 5025495c1..1a871d0a7 100644 --- a/cinder/volume/drivers/ibm/gpfs.py +++ b/cinder/volume/drivers/ibm/gpfs.py @@ -26,7 +26,7 @@ from oslo.config import cfg from oslo.utils import units from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.image import image_utils from cinder.openstack.common import fileutils from cinder.openstack.common import log as logging @@ -123,7 +123,7 @@ class GPFSDriver(driver.VolumeDriver): (out, err) = self._execute('mmgetstate', '-Y', run_as_root=True) return out except processutils.ProcessExecutionError as exc: - LOG.error(_('Failed to issue mmgetstate command, error: %s.') % + LOG.error(_LE('Failed to issue mmgetstate command, error: %s.') % exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -134,7 +134,7 @@ class GPFSDriver(driver.VolumeDriver): state_token = lines[0].split(':').index('state') gpfs_state = lines[1].split(':')[state_token] if gpfs_state != 'active': - LOG.error(_('GPFS is not active. Detailed output: %s.') % out) + LOG.error(_LE('GPFS is not active. Detailed output: %s.') % out) exception_message = (_('GPFS is not running, state: %s.') % gpfs_state) raise exception.VolumeBackendAPIException(data=exception_message) @@ -147,8 +147,8 @@ class GPFSDriver(driver.VolumeDriver): filesystem = lines[1].split()[0] return filesystem except processutils.ProcessExecutionError as exc: - LOG.error(_('Failed to issue df command for path %(path)s, ' - 'error: %(error)s.') % + LOG.error(_LE('Failed to issue df command for path %(path)s, ' + 'error: %(error)s.') % {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -163,7 +163,7 @@ class GPFSDriver(driver.VolumeDriver): cluster_id = lines[1].split(':')[value_token] return cluster_id except processutils.ProcessExecutionError as exc: - LOG.error(_('Failed to issue mmlsconfig command, error: %s.') % + LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.') % exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -174,8 +174,8 @@ class GPFSDriver(driver.VolumeDriver): (out, err) = self._execute('mmlsattr', '-L', path, run_as_root=True) except processutils.ProcessExecutionError as exc: - LOG.error(_('Failed to issue mmlsattr command on path %(path)s, ' - 'error: %(error)s') % + LOG.error(_LE('Failed to issue mmlsattr command on path %(path)s, ' + 'error: %(error)s') % {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -232,8 +232,8 @@ class GPFSDriver(driver.VolumeDriver): (out, err) = self._execute('mmlsfs', filesystem, '-V', '-Y', run_as_root=True) except processutils.ProcessExecutionError as exc: - LOG.error(_('Failed to issue mmlsfs command for path %(path)s, ' - 'error: %(error)s.') % + LOG.error(_LE('Failed to issue mmlsfs command for path %(path)s, ' + 'error: %(error)s.') % {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -252,7 +252,7 @@ class GPFSDriver(driver.VolumeDriver): (out, err) = self._execute('mmlsconfig', 'minreleaseLeveldaemon', '-Y', run_as_root=True) except processutils.ProcessExecutionError as exc: - LOG.error(_('Failed to issue mmlsconfig command, error: %s.') % + LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.') % exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -269,8 +269,9 @@ class GPFSDriver(driver.VolumeDriver): try: self._execute('mmlsattr', directory, run_as_root=True) except processutils.ProcessExecutionError as exc: - LOG.error(_('Failed to issue mmlsattr command for path %(path)s, ' - 'error: %(error)s.') % + LOG.error(_LE('Failed to issue mmlsattr command ' + 'for path %(path)s, ' + 'error: %(error)s.') % {'path': directory, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) diff --git a/cinder/volume/drivers/ibm/storwize_svc/__init__.py b/cinder/volume/drivers/ibm/storwize_svc/__init__.py index 7adbf4f63..8ff9cc5b8 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/__init__.py +++ b/cinder/volume/drivers/ibm/storwize_svc/__init__.py @@ -43,7 +43,7 @@ from oslo.utils import units from cinder import context from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder import utils @@ -289,7 +289,7 @@ class StorwizeSVCDriver(san.SanDriver): """ volume_defined = self._helpers.is_vdisk_defined(volume['name']) if not volume_defined: - LOG.error(_('ensure_export: Volume %s not found on storage') + LOG.error(_LE('ensure_export: Volume %s not found on storage') % volume['name']) def create_export(self, ctxt, volume): @@ -383,8 +383,8 @@ class StorwizeSVCDriver(san.SanDriver): preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: - LOG.error(_('Did not find expected column name in ' - 'lsvdisk: %s') % e) + LOG.error(_LE('Did not find expected column name in ' + 'lsvdisk: %s') % e) msg = (_('initialize_connection: Missing volume ' 'attribute for volume %s') % volume_name) raise exception.VolumeBackendAPIException(data=msg) @@ -482,10 +482,11 @@ class StorwizeSVCDriver(san.SanDriver): except Exception: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) - LOG.error(_('initialize_connection: Failed to collect return ' - 'properties for volume %(vol)s and connector ' - '%(conn)s.\n') % {'vol': volume, - 'conn': connector}) + LOG.error(_LE('initialize_connection: Failed ' + 'to collect return ' + 'properties for volume %(vol)s and connector ' + '%(conn)s.\n') % {'vol': volume, + 'conn': connector}) LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector %(conn)s\n properties: %(prop)s' diff --git a/cinder/volume/drivers/ibm/storwize_svc/helpers.py b/cinder/volume/drivers/ibm/storwize_svc/helpers.py index 872ddc839..6d02b0893 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/helpers.py +++ b/cinder/volume/drivers/ibm/storwize_svc/helpers.py @@ -25,7 +25,7 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder.volume.drivers.ibm.storwize_svc import ssh as storwize_ssh @@ -471,8 +471,8 @@ class StorwizeHelpers(object): key = 'protocol' words = value.split() if not (words and len(words) == 2 and words[0] == ''): - LOG.error(_('Protocol must be specified as ' - '\' iSCSI\' or \' FC\'.')) + LOG.error(_LE('Protocol must be specified as ' + '\' iSCSI\' or \' FC\'.')) del words[0] value = words[0] @@ -486,8 +486,8 @@ class StorwizeHelpers(object): key = 'replication' words = value.split() if not (words and len(words) == 2 and words[0] == ''): - LOG.error(_('Replication must be specified as ' - '\' True\' or \' False\'.')) + LOG.error(_LE('Replication must be specified as ' + '\' True\' or \' False\'.')) del words[0] value = words[0] diff --git a/cinder/volume/drivers/ibm/storwize_svc/ssh.py b/cinder/volume/drivers/ibm/storwize_svc/ssh.py index 0e58d93be..2243a21d3 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/ssh.py +++ b/cinder/volume/drivers/ibm/storwize_svc/ssh.py @@ -19,7 +19,7 @@ import re from oslo.concurrency import processutils from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging LOG = logging.getLogger(__name__) @@ -164,8 +164,8 @@ class StorwizeSSH(object): raise exception.VolumeBackendAPIException(data=msg) if err.startswith('CMMVC6071E'): if not multihostmap: - LOG.error(_('storwize_svc_multihostmap_enabled is set ' - 'to False, not allowing multi host mapping.')) + LOG.error(_LE('storwize_svc_multihostmap_enabled is set ' + 'to False, not allowing multi host mapping.')) msg = 'CMMVC6071E The VDisk-to-host mapping '\ 'was not created because the VDisk is '\ 'already mapped to a host.\n"' diff --git a/cinder/volume/drivers/san/hp/hp_3par_common.py b/cinder/volume/drivers/san/hp/hp_3par_common.py index ed15db9ab..e1e7bc01a 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_common.py +++ b/cinder/volume/drivers/san/hp/hp_3par_common.py @@ -57,7 +57,7 @@ from oslo.utils import units from cinder import context from cinder import exception from cinder import flow_utils -from cinder.i18n import _ +from cinder.i18n import _, _LE, _LI from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder.volume import qos_specs @@ -268,7 +268,7 @@ class HP3PARCommon(object): self.client = self._create_client() except hpexceptions.UnsupportedVersion as ex: raise exception.InvalidInput(ex) - LOG.info(_("HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s") + LOG.info(_LI("HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s") % {"common_ver": self.VERSION, "rest_ver": hp3parclient.get_version_string()}) if self.config.hp3par_debug: @@ -373,20 +373,21 @@ class HP3PARCommon(object): {'newName': new_vol_name, 'comment': json.dumps(new_comment)}) - LOG.info(_("Virtual volume '%(ref)s' renamed to '%(new)s'.") % - {'ref': target_vol_name, 'new': new_vol_name}) + LOG.info(_LI("Virtual volume '%(ref)s' renamed to '%(new)s'.") % + {'ref': existing_ref['source-name'], 'new': new_vol_name}) retyped = False model_update = None if volume_type: - LOG.info(_("Virtual volume %(disp)s '%(new)s' is being retyped.") % + LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " + "being retyped.") % {'disp': display_name, 'new': new_vol_name}) try: retyped, model_update = self._retype_from_no_type(volume, volume_type) - LOG.info(_("Virtual volume %(disp)s successfully retyped to " - "%(new_type)s.") % + LOG.info(_LI("Virtual volume %(disp)s successfully retyped to " + "%(new_type)s.") % {'disp': display_name, 'new_type': volume_type.get('name')}) except Exception: @@ -404,7 +405,8 @@ class HP3PARCommon(object): if retyped and model_update: updates.update(model_update) - LOG.info(_("Virtual volume %(disp)s '%(new)s' is now being managed.") % + LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " + "now being managed.") % {'disp': display_name, 'new': new_vol_name}) # Return display name to update the name displayed in the GUI and @@ -445,8 +447,8 @@ class HP3PARCommon(object): new_vol_name = self._get_3par_unm_name(volume['id']) self.client.modifyVolume(vol_name, {'newName': new_vol_name}) - LOG.info(_("Virtual volume %(disp)s '%(vol)s' is no longer managed. " - "Volume renamed to '%(new)s'.") % + LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no longer managed. " + "Volume renamed to '%(new)s'.") % {'disp': volume['display_name'], 'vol': vol_name, 'new': new_vol_name}) @@ -496,8 +498,8 @@ class HP3PARCommon(object): growth_size_mib, _convert_to_base=True) else: - LOG.error(_("Error extending volume: %(vol)s. " - "Exception: %(ex)s") % + LOG.error(_LE("Error extending volume: %(vol)s. " + "Exception: %(ex)s") % {'vol': volume_name, 'ex': ex}) return model_update @@ -872,7 +874,7 @@ class HP3PARCommon(object): self.client.createQoSRules(vvs_name, qosRule) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_("Error creating QOS rule %s") % qosRule) + LOG.error(_LE("Error creating QOS rule %s") % qosRule) def _add_volume_to_volume_set(self, volume, volume_name, cpg, vvs_name, qos): @@ -1320,7 +1322,8 @@ class HP3PARCommon(object): {'id': volume['id'], 'size': growth_size}) self.client.growVolume(volume_name, growth_size_mib) except Exception as ex: - LOG.error(_("Error extending volume %(id)s. Ex: %(ex)s") % + LOG.error(_LE("Error extending volume %(id)s. " + "Ex: %(ex)s") % {'id': volume['id'], 'ex': ex}) # Delete the volume if unable to grow it self.client.deleteVolume(volume_name) @@ -1431,7 +1434,7 @@ class HP3PARCommon(object): instance_uuid) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_("Error attaching volume %s") % volume) + LOG.error(_LE("Error attaching volume %s") % volume) def detach_volume(self, volume): LOG.debug("Detach Volume\n%s" % pprint.pformat(volume)) @@ -1439,7 +1442,7 @@ class HP3PARCommon(object): self.clear_volume_key_value_pair(volume, 'HPQ-CS-instance_uuid') except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_("Error detaching volume %s") % volume) + LOG.error(_LE("Error detaching volume %s") % volume) def migrate_volume(self, volume, host): """Migrate directly if source and dest are managed by same storage. @@ -1468,8 +1471,8 @@ class HP3PARCommon(object): try: ret = self.retype(volume, volume_type, None, host) except Exception as e: - LOG.info(_('3PAR driver cannot perform migration. ' - 'Retype exception: %s') % six.text_type(e)) + LOG.info(_LI('3PAR driver cannot perform migration. ' + 'Retype exception: %s') % six.text_type(e)) LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s.' % dbg) LOG.debug('migrate_volume result: %s, %s' % ret) @@ -1533,8 +1536,8 @@ class HP3PARCommon(object): # Rename the new volume to the original name self.client.modifyVolume(temp_vol_name, {'newName': volume_name}) - LOG.info(_('Completed: convert_to_base_volume: ' - 'id=%s.') % volume['id']) + LOG.info(_LI('Completed: convert_to_base_volume: ' + 'id=%s.') % volume['id']) except hpexceptions.HTTPConflict: msg = _("Volume (%s) already exists on array.") % volume_name LOG.error(msg) @@ -1647,8 +1650,9 @@ class HP3PARCommon(object): if old_tpvv == new_tpvv: if new_cpg != old_cpg: - LOG.info(_("Modifying %(volume_name)s userCPG from %(old_cpg)s" - " to %(new_cpg)s") % + LOG.info(_LI("Modifying %(volume_name)s userCPG " + "from %(old_cpg)s" + " to %(new_cpg)s") % {'volume_name': volume_name, 'old_cpg': old_cpg, 'new_cpg': new_cpg}) _response, body = self.client.modifyVolume( @@ -1667,13 +1671,13 @@ class HP3PARCommon(object): else: if old_tpvv: cop = self.CONVERT_TO_FULL - LOG.info(_("Converting %(volume_name)s to full provisioning " - "with userCPG=%(new_cpg)s") % + LOG.info(_LI("Converting %(volume_name)s to full provisioning " + "with userCPG=%(new_cpg)s") % {'volume_name': volume_name, 'new_cpg': new_cpg}) else: cop = self.CONVERT_TO_THIN - LOG.info(_("Converting %(volume_name)s to thin provisioning " - "with userCPG=%(new_cpg)s") % + LOG.info(_LI("Converting %(volume_name)s to thin provisioning " + "with userCPG=%(new_cpg)s") % {'volume_name': volume_name, 'new_cpg': new_cpg}) try: @@ -1688,8 +1692,8 @@ class HP3PARCommon(object): # Cannot retype with snapshots because we don't want to # use keepVV and have straggling volumes. Log additional # info and then raise. - LOG.info(_("tunevv failed because the volume '%s' " - "has snapshots.") % volume_name) + LOG.info(_LI("tunevv failed because the volume '%s' " + "has snapshots.") % volume_name) raise ex task_id = body['taskid'] @@ -1965,8 +1969,8 @@ class ModifyVolumeTask(flow_utils.CinderTask): if new_snap_cpg != old_snap_cpg: # Modify the snap_cpg. This will fail with snapshots. - LOG.info(_("Modifying %(volume_name)s snap_cpg from " - "%(old_snap_cpg)s to %(new_snap_cpg)s.") % + LOG.info(_LI("Modifying %(volume_name)s snap_cpg from " + "%(old_snap_cpg)s to %(new_snap_cpg)s.") % {'volume_name': volume_name, 'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg}) @@ -1976,7 +1980,7 @@ class ModifyVolumeTask(flow_utils.CinderTask): 'comment': json.dumps(comment_dict)}) self.needs_revert = True else: - LOG.info(_("Modifying %s comments.") % volume_name) + LOG.info(_LI("Modifying %s comments.") % volume_name) common.client.modifyVolume( volume_name, {'comment': json.dumps(comment_dict)}) @@ -1985,8 +1989,8 @@ class ModifyVolumeTask(flow_utils.CinderTask): def revert(self, common, volume_name, old_snap_cpg, new_snap_cpg, old_comment, **kwargs): if self.needs_revert: - LOG.info(_("Retype revert %(volume_name)s snap_cpg from " - "%(new_snap_cpg)s back to %(old_snap_cpg)s.") % + LOG.info(_LI("Retype revert %(volume_name)s snap_cpg from " + "%(new_snap_cpg)s back to %(old_snap_cpg)s.") % {'volume_name': volume_name, 'new_snap_cpg': new_snap_cpg, 'old_snap_cpg': old_snap_cpg}) @@ -1995,7 +1999,7 @@ class ModifyVolumeTask(flow_utils.CinderTask): volume_name, {'snapCPG': old_snap_cpg, 'comment': old_comment}) except Exception as ex: - LOG.error(_("Exception during snapCPG revert: %s") % ex) + LOG.error(_LE("Exception during snapCPG revert: %s") % ex) class TuneVolumeTask(flow_utils.CinderTask): @@ -2063,9 +2067,8 @@ class ModifySpecsTask(flow_utils.CinderTask): except hpexceptions.HTTPNotFound as ex: # HTTPNotFound(code=102) is OK. Set does not exist. if ex.get_code() != 102: - LOG.error( - _("Unexpected error when retype() tried to " - "deleteVolumeSet(%s)") % vvs_name) + LOG.error(_LE("Unexpected error when retype() tried to " + "deleteVolumeSet(%s)") % vvs_name) raise ex if new_vvs or new_qos: @@ -2085,36 +2088,32 @@ class ModifySpecsTask(flow_utils.CinderTask): except hpexceptions.HTTPNotFound as ex: # HTTPNotFound(code=102) is OK. Set does not exist. if ex.get_code() != 102: - LOG.error( - _("Unexpected error when retype() revert " - "tried to deleteVolumeSet(%s)") % vvs_name) + LOG.error(_LE("Unexpected error when retype() revert " + "tried to deleteVolumeSet(%s)") % vvs_name) except Exception: - LOG.error( - _("Unexpected error when retype() revert " - "tried to deleteVolumeSet(%s)") % vvs_name) + LOG.error(_LE("Unexpected error when retype() revert " + "tried to deleteVolumeSet(%s)") % vvs_name) if old_vvs is not None or old_qos is not None: try: common._add_volume_to_volume_set( volume, volume_name, old_cpg, old_vvs, old_qos) except Exception as ex: - LOG.error( - _("%(exception)s: Exception during revert of " - "retype for volume %(volume_name)s. " - "Original volume set/QOS settings may not " - "have been fully restored.") % - {'exception': ex, 'volume_name': volume_name}) + LOG.error(_LE("%(exception)s: Exception during revert of " + "retype for volume %(volume_name)s. " + "Original volume set/QOS settings may not " + "have been fully restored.") % + {'exception': ex, 'volume_name': volume_name}) if new_vvs is not None and old_vvs != new_vvs: try: common.client.removeVolumeFromVolumeSet( new_vvs, volume_name) except Exception as ex: - LOG.error( - _("%(exception)s: Exception during revert of " - "retype for volume %(volume_name)s. " - "Failed to remove from new volume set " - "%(new_vvs)s.") % - {'exception': ex, - 'volume_name': volume_name, - 'new_vvs': new_vvs}) + LOG.error(_LE("%(exception)s: Exception during revert of " + "retype for volume %(volume_name)s. " + "Failed to remove from new volume set " + "%(new_vvs)s.") % + {'exception': ex, + 'volume_name': volume_name, + 'new_vvs': new_vvs}) diff --git a/cinder/volume/drivers/san/hp/hp_3par_fc.py b/cinder/volume/drivers/san/hp/hp_3par_fc.py index 7d289604a..981dc4248 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_fc.py +++ b/cinder/volume/drivers/san/hp/hp_3par_fc.py @@ -35,7 +35,7 @@ except ImportError: hpexceptions = None from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LI from cinder.openstack.common import log as logging from cinder import utils import cinder.volume.driver @@ -255,8 +255,8 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver): self.common.client.getHostVLUNs(hostname) except hpexceptions.HTTPNotFound: # No more exports for this host. - LOG.info(_("Need to remove FC Zone, building initiator " - "target map")) + LOG.info(_LI("Need to remove FC Zone, building initiator " + "target map")) target_wwns, init_targ_map, _numPaths = \ self._build_initiator_target_map(connector) diff --git a/cinder/volume/drivers/san/hp/hp_3par_iscsi.py b/cinder/volume/drivers/san/hp/hp_3par_iscsi.py index b2e7ae2eb..6f1d673bf 100644 --- a/cinder/volume/drivers/san/hp/hp_3par_iscsi.py +++ b/cinder/volume/drivers/san/hp/hp_3par_iscsi.py @@ -36,7 +36,7 @@ except ImportError: hpexceptions = None from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder import utils import cinder.volume.driver @@ -511,7 +511,7 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver): vol_name = self.common._get_3par_vol_name(volume['id']) self.common.client.getVolume(vol_name) except hpexceptions.HTTPNotFound: - LOG.error(_("Volume %s doesn't exist on array.") % vol_name) + LOG.error(_LE("Volume %s doesn't exist on array.") % vol_name) else: metadata = self.common.client.getAllVolumeMetaData(vol_name) diff --git a/cinder/volume/drivers/san/hp/hp_lefthand_cliq_proxy.py b/cinder/volume/drivers/san/hp/hp_lefthand_cliq_proxy.py index 487d39859..c75e0983a 100644 --- a/cinder/volume/drivers/san/hp/hp_lefthand_cliq_proxy.py +++ b/cinder/volume/drivers/san/hp/hp_lefthand_cliq_proxy.py @@ -25,7 +25,7 @@ from oslo.concurrency import processutils from oslo.utils import units from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder.volume.drivers.san.san import SanISCSIDriver @@ -317,7 +317,7 @@ class HPLeftHandCLIQProxy(SanISCSIDriver): try: self._cliq_get_volume_info(volume['name']) except processutils.ProcessExecutionError: - LOG.error(_("Volume did not exist. It will not be deleted")) + LOG.error(_LE("Volume did not exist. It will not be deleted")) return self._cliq_run_xml("deleteVolume", cliq_args) @@ -329,7 +329,7 @@ class HPLeftHandCLIQProxy(SanISCSIDriver): try: self._cliq_get_snapshot_info(snapshot['name']) except processutils.ProcessExecutionError: - LOG.error(_("Snapshot did not exist. It will not be deleted")) + LOG.error(_LE("Snapshot did not exist. It will not be deleted")) return try: self._cliq_run_xml("deleteSnapshot", cliq_args) diff --git a/cinder/volume/drivers/san/hp/hp_lefthand_rest_proxy.py b/cinder/volume/drivers/san/hp/hp_lefthand_rest_proxy.py index a916d4054..e6757bf28 100644 --- a/cinder/volume/drivers/san/hp/hp_lefthand_rest_proxy.py +++ b/cinder/volume/drivers/san/hp/hp_lefthand_rest_proxy.py @@ -20,7 +20,7 @@ from oslo.utils import units from cinder import context from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder.volume.driver import ISCSIDriver from cinder.volume import utils @@ -185,7 +185,7 @@ class HPLeftHandRESTProxy(ISCSIDriver): volume_info = self.client.getVolumeByName(volume['name']) self.client.deleteVolume(volume_info['id']) except hpexceptions.HTTPNotFound: - LOG.error(_("Volume did not exist. It will not be deleted")) + LOG.error(_LE("Volume did not exist. It will not be deleted")) except Exception as ex: raise exception.VolumeBackendAPIException(ex) @@ -218,7 +218,7 @@ class HPLeftHandRESTProxy(ISCSIDriver): snap_info = self.client.getSnapshotByName(snapshot['name']) self.client.deleteSnapshot(snap_info['id']) except hpexceptions.HTTPNotFound: - LOG.error(_("Snapshot did not exist. It will not be deleted")) + LOG.error(_LE("Snapshot did not exist. It will not be deleted")) except hpexceptions.HTTPServerError as ex: in_use_msg = 'cannot be deleted because it is a clone point' if in_use_msg in ex.get_description(): @@ -357,8 +357,8 @@ class HPLeftHandRESTProxy(ISCSIDriver): client_value = value_map[value] client_options[client_key] = client_value except KeyError: - LOG.error(_("'%(value)s' is an invalid value " - "for extra spec '%(key)s'") % + LOG.error(_LE("'%(value)s' is an invalid value " + "for extra spec '%(key)s'") % {'value': value, 'key': key}) return client_options diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index 28cfdf62d..65eb11eb0 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -32,7 +32,7 @@ from oslo.utils import excutils from oslo.utils import units from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import fileutils from cinder.openstack.common import log as logging from cinder.openstack.common import uuidutils @@ -1887,7 +1887,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): if vc_version and vc_version >= self.PBM_ENABLED_VC_VERSION: self.pbm_wsdl = self._get_pbm_wsdl_location(vc_version) if not self.pbm_wsdl: - LOG.error(_("Not able to configure PBM for VC server: %s"), + LOG.error(_LE("Not able to configure PBM for VC server: %s"), vc_version) raise error_util.VMwareDriverException() self._storage_policy_enabled = True diff --git a/cinder/volume/drivers/windows/remotefs.py b/cinder/volume/drivers/windows/remotefs.py index 0d9482e8a..0afa4d498 100644 --- a/cinder/volume/drivers/windows/remotefs.py +++ b/cinder/volume/drivers/windows/remotefs.py @@ -21,7 +21,7 @@ if sys.platform == 'win32': from cinder.brick.remotefs import remotefs from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE, _LI from cinder.openstack.common import log as logging LOG = logging.getLogger(__name__) @@ -112,7 +112,7 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient): options.get('pass')) try: - LOG.info(_('Mounting share: %s') % smbfs_share) + LOG.info(_LI('Mounting share: %s') % smbfs_share) self.smb_conn.Msft_SmbMapping.Create(**smb_opts) except wmi.x_wmi as exc: err_msg = (_( @@ -134,7 +134,7 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient): ctypes.pointer(total_bytes), ctypes.pointer(free_bytes)) if retcode == 0: - LOG.error(_("Could not get share %s capacity info.") % + LOG.error(_LE("Could not get share %s capacity info.") % smbfs_share) return 0, 0 return total_bytes.value, free_bytes.value diff --git a/cinder/volume/drivers/zfssa/restclient.py b/cinder/volume/drivers/zfssa/restclient.py index 4f98c22b5..48aa1400a 100644 --- a/cinder/volume/drivers/zfssa/restclient.py +++ b/cinder/volume/drivers/zfssa/restclient.py @@ -21,7 +21,7 @@ import StringIO import time import urllib2 -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log LOG = log.getLogger(__name__) @@ -279,21 +279,21 @@ class RestClientURL(object): if err.code == httplib.NOT_FOUND: LOG.debug('REST Not Found: %s' % err.code) else: - LOG.error(_('REST Not Available: %s') % err.code) + LOG.error(_LE('REST Not Available: %s') % err.code) if err.code == httplib.SERVICE_UNAVAILABLE and \ retry < maxreqretries: retry += 1 time.sleep(1) - LOG.error(_('Server Busy retry request: %s') % retry) + LOG.error(_LE('Server Busy retry request: %s') % retry) continue if (err.code == httplib.UNAUTHORIZED or err.code == httplib.INTERNAL_SERVER_ERROR) and \ '/access/v1' not in zfssaurl: try: - LOG.error(_('Authorizing request: ' - '%(zfssaurl)s' - 'retry: %(retry)d .') + LOG.error(_LE('Authorizing request: ' + '%(zfssaurl)s' + 'retry: %(retry)d .') % {'zfssaurl': zfssaurl, 'retry': retry}) self._authorize() @@ -308,7 +308,7 @@ class RestClientURL(object): return RestResult(err=err) except urllib2.URLError as err: - LOG.error(_('URLError: %s') % err.reason) + LOG.error(_LE('URLError: %s') % err.reason) raise RestClientError(-1, name="ERR_URLError", message=err.reason) diff --git a/cinder/volume/drivers/zfssa/zfssaiscsi.py b/cinder/volume/drivers/zfssa/zfssaiscsi.py index 1842c245f..882b28a12 100644 --- a/cinder/volume/drivers/zfssa/zfssaiscsi.py +++ b/cinder/volume/drivers/zfssa/zfssaiscsi.py @@ -20,7 +20,7 @@ from oslo.config import cfg from oslo.utils import units from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log from cinder.volume import driver from cinder.volume.drivers.san import san @@ -246,7 +246,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): snapshot['volume_name'], snapshot['name']) if has_clones: - LOG.error(_('Snapshot %s: has clones') % snapshot['name']) + LOG.error(_LE('Snapshot %s: has clones') % snapshot['name']) raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) self.zfssa.delete_snapshot(lcfg.zfssa_pool, @@ -360,9 +360,9 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): try: self.create_volume_from_snapshot(volume, zfssa_snapshot) except exception.VolumeBackendAPIException: - LOG.error(_('Clone Volume:' - '%(volume)s failed from source volume:' - '%(src_vref)s') + LOG.error(_LE('Clone Volume:' + '%(volume)s failed from source volume:' + '%(src_vref)s') % {'volume': volume['name'], 'src_vref': src_vref['name']}) # Cleanup snapshot diff --git a/cinder/volume/iscsi.py b/cinder/volume/iscsi.py index c424499f8..143f5fe8c 100644 --- a/cinder/volume/iscsi.py +++ b/cinder/volume/iscsi.py @@ -20,7 +20,7 @@ from oslo.concurrency import processutils as putils from cinder.brick.iscsi import iscsi from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LI from cinder.openstack.common import log as logging from cinder.volume import utils @@ -73,8 +73,8 @@ class _ExportMixin(object): try: iscsi_target = self._get_iscsi_target(context, volume['id']) except exception.NotFound: - LOG.info(_("Skipping remove_export. No iscsi_target " - "provisioned for volume: %s"), volume['id']) + LOG.info(_LI("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) return try: @@ -88,8 +88,8 @@ class _ExportMixin(object): self.show_target(iscsi_target, iqn=iqn) except Exception: - LOG.info(_("Skipping remove_export. No iscsi_target " - "is presently exported for volume: %s"), volume['id']) + LOG.info(_LI("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %s"), volume['id']) return self.remove_iscsi_target(iscsi_target, 0, volume['id'], volume['name']) @@ -99,8 +99,8 @@ class _ExportMixin(object): iscsi_target = self._get_target_for_ensure_export(context, volume['id']) if iscsi_target is None: - LOG.info(_("Skipping remove_export. No iscsi_target " - "provisioned for volume: %s"), volume['id']) + LOG.info(_LI("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) return chap_auth = None # Check for https://bugs.launchpad.net/cinder/+bug/1065702 @@ -244,8 +244,8 @@ class LioAdm(_ExportMixin, iscsi.LioAdm): iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) except exception.NotFound: - LOG.info(_("Skipping remove_export. No iscsi_target " - "provisioned for volume: %s"), volume['id']) + LOG.info(_LI("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) return self.remove_iscsi_target(iscsi_target, 0, volume['id'], volume['name']) @@ -255,8 +255,8 @@ class LioAdm(_ExportMixin, iscsi.LioAdm): try: volume_info = self.db.volume_get(context, volume['id']) except exception.NotFound: - LOG.info(_("Skipping ensure_export. No iscsi_target " - "provision for volume: %s"), volume['id']) + LOG.info(_LI("Skipping ensure_export. No iscsi_target " + "provision for volume: %s"), volume['id']) return (auth_method, diff --git a/cinder/volume/qos_specs.py b/cinder/volume/qos_specs.py index 64a84417f..e90ae13b2 100644 --- a/cinder/volume/qos_specs.py +++ b/cinder/volume/qos_specs.py @@ -22,7 +22,7 @@ from oslo.db import exception as db_exc from cinder import context from cinder import db from cinder import exception -from cinder.i18n import _ +from cinder.i18n import _, _LE from cinder.openstack.common import log as logging from cinder.volume import volume_types @@ -82,7 +82,7 @@ def create(context, name, specs=None): try: qos_specs_ref = db.qos_specs_create(context, values) except db_exc.DBError as e: - LOG.exception(_('DB error: %s') % e) + LOG.exception(_LE('DB error: %s') % e) raise exception.QoSSpecsCreateFailed(name=name, qos_specs=specs) return qos_specs_ref @@ -103,7 +103,7 @@ def update(context, qos_specs_id, specs): try: res = db.qos_specs_update(context, qos_specs_id, specs) except db_exc.DBError as e: - LOG.exception(_('DB error: %s') % e) + LOG.exception(_LE('DB error: %s') % e) raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id, qos_specs=specs) @@ -153,7 +153,7 @@ def get_associations(context, specs_id): # query returns a list of volume types associated with qos specs associates = db.qos_specs_associations_get(context, specs_id) except db_exc.DBError as e: - LOG.exception(_('DB error: %s') % e) + LOG.exception(_LE('DB error: %s') % e) msg = _('Failed to get all associations of ' 'qos specs %s') % specs_id LOG.warn(msg) @@ -195,7 +195,7 @@ def associate_qos_with_type(context, specs_id, type_id): else: db.qos_specs_associate(context, specs_id, type_id) except db_exc.DBError as e: - LOG.exception(_('DB error: %s') % e) + LOG.exception(_LE('DB error: %s') % e) LOG.warn(_('Failed to associate qos specs ' '%(id)s with type: %(vol_type_id)s') % dict(id=specs_id, vol_type_id=type_id)) @@ -209,7 +209,7 @@ def disassociate_qos_specs(context, specs_id, type_id): get_qos_specs(context, specs_id) db.qos_specs_disassociate(context, specs_id, type_id) except db_exc.DBError as e: - LOG.exception(_('DB error: %s') % e) + LOG.exception(_LE('DB error: %s') % e) LOG.warn(_('Failed to disassociate qos specs ' '%(id)s with type: %(vol_type_id)s') % dict(id=specs_id, vol_type_id=type_id)) @@ -223,7 +223,7 @@ def disassociate_all(context, specs_id): get_qos_specs(context, specs_id) db.qos_specs_disassociate_all(context, specs_id) except db_exc.DBError as e: - LOG.exception(_('DB error: %s') % e) + LOG.exception(_LE('DB error: %s') % e) LOG.warn(_('Failed to disassociate qos specs %s.') % specs_id) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=None)