]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Implementing the use of _L’x’/i18n markers
authorMike Mason <mikemason010@gmail.com>
Mon, 17 Nov 2014 09:58:10 +0000 (09:58 +0000)
committerMike Mason <mikemason010@gmail.com>
Wed, 19 Nov 2014 10:07:29 +0000 (10:07 +0000)
Placing the _Lx markers back into the code.  No other cleaner solution has
has been implemented. Patches will be submitted in a series of sub
directories and in a fashion that is manageable.
This is the sixth commit of this kind

Partial-Bug: #1384312

Change-Id: I42b4e168deec9930571c1869fe1a181d4aad1112

23 files changed:
cinder/volume/drivers/datera.py
cinder/volume/drivers/emc/emc_vmax_fast.py
cinder/volume/drivers/emc/emc_vmax_utils.py
cinder/volume/drivers/emc/emc_vnx_cli.py
cinder/volume/drivers/emc/xtremio.py
cinder/volume/drivers/eqlx.py
cinder/volume/drivers/fujitsu_eternus_dx_common.py
cinder/volume/drivers/glusterfs.py
cinder/volume/drivers/ibm/ibmnas.py
cinder/volume/drivers/lvm.py
cinder/volume/drivers/nimble.py
cinder/volume/drivers/prophetstor/dpl_iscsi.py
cinder/volume/drivers/prophetstor/dplcommon.py
cinder/volume/drivers/pure.py
cinder/volume/drivers/rbd.py
cinder/volume/drivers/sheepdog.py
cinder/volume/drivers/solidfire.py
cinder/volume/drivers/vmware/read_write_util.py
cinder/volume/flows/api/create_volume.py
cinder/volume/flows/common.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/flows/manager/manage_existing.py
cinder/volume/manager.py

index 04dd3f7ec21f30519f0906abcab19f952424d60f..6c28072b2bbe01b205ff25022fb9a40f541021ba 100644 (file)
@@ -19,7 +19,7 @@ from oslo.config import cfg
 import requests
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import units
 from cinder.volume.drivers.san import san
@@ -178,7 +178,7 @@ class DateraDriver(san.SanISCSIDriver):
         results = self._issue_api_request('cluster')
 
         if 'uuid' not in results:
-            LOG.error(_('Failed to get updated stats from Datera Cluster.'))
+            LOG.error(_LE('Failed to get updated stats from Datera Cluster.'))
 
         backend_name = self.configuration.safe_get('volume_backend_name')
         stats = {
index 9a2312cf947a2a664e273bce95699209141bdd99..677a2c7a2479806ab63467bd6fe2eb07d0f0ace3 100644 (file)
@@ -15,7 +15,7 @@
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.emc import emc_vmax_provision
 from cinder.volume.drivers.emc import emc_vmax_utils
@@ -401,7 +401,8 @@ class EMCVMAXFast(object):
 
         if len(storageTierInstanceNames) == 0:
             storageTierInstanceNames = None
-            LOG.warn(_("Unable to get storage tiers from tier policy rule  "))
+            LOG.warn(_LW("Unable to get storage tiers "
+                         "from tier policy rule  "))
 
         return storageTierInstanceNames
 
@@ -509,7 +510,7 @@ class EMCVMAXFast(object):
                     storageGroupInstanceName, tierPolicyRuleInstanceName,
                     storageGroupName, fastPolicyName)
             except Exception as ex:
-                LOG.error(_("Exception: %s") % six.text_type(ex))
+                LOG.error(_LE("Exception: %s") % six.text_type(ex))
                 errorMessage = (_(
                     "Failed to add storage group %(storageGroupInstanceName)s "
                     " to tier policy rule %(tierPolicyRuleInstanceName)s")
@@ -576,14 +577,14 @@ class EMCVMAXFast(object):
             if rc != 0L:
                 rc, errordesc = self.utils.wait_for_job_complete(conn, job)
                 if rc != 0L:
-                    LOG.error(_("Error disassociating storage group from "
+                    LOG.error(_LE("Error disassociating storage group from "
                               "policy: %s") % errordesc)
                 else:
                     LOG.debug("Disassociated storage group from policy %s")
             else:
                 LOG.debug("ModifyStorageTierPolicyRule completed")
         except Exception as e:
-            LOG.info(_("Storage group not associated with the policy %s")
+            LOG.info(_LI("Storage group not associated with the policy %s")
                      % six.text_type(e))
 
     def get_pool_associated_to_policy(
@@ -651,7 +652,7 @@ class EMCVMAXFast(object):
             isTieringPolicySupported = self.is_tiering_policy_enabled(
                 conn, tierPolicyServiceInstanceName)
         except Exception as e:
-            LOG.error(_("Exception: %s") % six.text_type(e))
+            LOG.error(_LE("Exception: %s") % six.text_type(e))
             return False
 
         return isTieringPolicySupported
index ab7e1c4101d6b02736c52ecd8a3fb9685749f95d..59ba6db4ec71c5c5dd62ee171c65f7626863ace4 100644 (file)
@@ -21,7 +21,7 @@ import six
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.volume import volume_types
@@ -57,7 +57,7 @@ class EMCVMAXUtils(object):
 
     def __init__(self, prtcl):
         if not pywbemAvailable:
-            LOG.info(_(
+            LOG.info(_LI(
                 'Module PyWBEM not installed.  '
                 'Install PyWBEM using the python-pywbem package.'))
         self.protocol = prtcl
@@ -290,8 +290,9 @@ class EMCVMAXUtils(object):
             if self._is_job_finished(conn, job):
                 raise loopingcall.LoopingCallDone()
             if self.retries > JOB_RETRIES:
-                LOG.error(_("_wait_for_job_complete failed after %(retries)d "
-                          "tries") % {'retries': self.retries})
+                LOG.error(_LE("_wait_for_job_complete "
+                              "failed after %(retries)d "
+                              "tries") % {'retries': self.retries})
 
                 raise loopingcall.LoopingCallDone()
             try:
@@ -300,7 +301,7 @@ class EMCVMAXUtils(object):
                     if self._is_job_finished(conn, job):
                         self.wait_for_job_called = True
             except Exception as e:
-                LOG.error(_("Exception: %s") % six.text_type(e))
+                LOG.error(_LE("Exception: %s") % six.text_type(e))
                 exceptionMessage = (_("Issue encountered waiting for job."))
                 LOG.error(exceptionMessage)
                 raise exception.VolumeBackendAPIException(exceptionMessage)
@@ -349,7 +350,7 @@ class EMCVMAXUtils(object):
             if self._is_sync_complete(conn, syncName):
                 raise loopingcall.LoopingCallDone()
             if self.retries > JOB_RETRIES:
-                LOG.error(_("_wait_for_sync failed after %(retries)d tries")
+                LOG.error(_LE("_wait_for_sync failed after %(retries)d tries")
                           % {'retries': self.retries})
                 raise loopingcall.LoopingCallDone()
             try:
@@ -358,7 +359,7 @@ class EMCVMAXUtils(object):
                     if self._is_sync_complete(conn, syncName):
                         self.wait_for_sync_called = True
             except Exception as e:
-                LOG.error(_("Exception: %s") % six.text_type(e))
+                LOG.error(_LE("Exception: %s") % six.text_type(e))
                 exceptionMessage = (_("Issue encountered waiting for "
                                       "synchronization."))
                 LOG.error(exceptionMessage)
@@ -661,7 +662,7 @@ class EMCVMAXUtils(object):
                       % {'fileName': fileName,
                          'fastPolicyName': fastPolicyName})
         else:
-            LOG.info(_("Fast Policy not found."))
+            LOG.info(_LI("Fast Policy not found."))
         return fastPolicyName
 
     def parse_array_name_from_file(self, fileName):
index 903d2161a5c602577eca48ebbbad87f4697e6dd4..a8abba9472b0aaedd3589e95df8ecd5757835009 100644 (file)
@@ -26,7 +26,7 @@ import six
 
 from cinder import exception
 from cinder.exception import EMCVnxCLICmdError
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import excutils
 from cinder.openstack.common import jsonutils as json
 from cinder.openstack.common import lockutils
@@ -210,8 +210,8 @@ class CommandLineHelper(object):
         self.primary_storage_ip = self.active_storage_ip
         self.secondary_storage_ip = configuration.san_secondary_ip
         if self.secondary_storage_ip == self.primary_storage_ip:
-            LOG.warn(_("san_secondary_ip is configured as "
-                       "the same value as san_ip."))
+            LOG.warn(_LE("san_secondary_ip is configured as "
+                         "the same value as san_ip."))
             self.secondary_storage_ip = None
         if not configuration.san_ip:
             err_msg = _('san_ip: Mandatory field configuration. '
@@ -235,7 +235,7 @@ class CommandLineHelper(object):
         # if there is security file path provided, use this security file
         if storage_vnx_security_file:
             self.credentials = ('-secfilepath', storage_vnx_security_file)
-            LOG.info(_("Using security file in %s for authentication") %
+            LOG.info(_LI("Using security file in %s for authentication") %
                      storage_vnx_security_file)
         # if there is a username/password provided, use those in the cmd line
         elif storage_username is not None and len(storage_username) > 0 and\
@@ -243,19 +243,19 @@ class CommandLineHelper(object):
             self.credentials = ('-user', storage_username,
                                 '-password', storage_password,
                                 '-scope', storage_auth_type)
-            LOG.info(_("Plain text credentials are being used for "
-                       "authentication"))
+            LOG.info(_LI("Plain text credentials are being used for "
+                         "authentication"))
         else:
-            LOG.info(_("Neither security file nor plain "
-                       "text credentials are specified. Security file under "
-                       "home directory will be used for authentication "
-                       "if present."))
+            LOG.info(_LI("Neither security file nor plain "
+                         "text credentials are specified. Security file under "
+                         "home directory will be used for authentication "
+                         "if present."))
 
         self.iscsi_initiator_map = None
         if configuration.iscsi_initiators:
             self.iscsi_initiator_map = \
                 json.loads(configuration.iscsi_initiators)
-            LOG.info(_("iscsi_initiators: %s"), self.iscsi_initiator_map)
+            LOG.info(_LI("iscsi_initiators: %s"), self.iscsi_initiator_map)
 
         # extra spec constants
         self.pool_spec = 'storagetype:pool'
@@ -310,7 +310,7 @@ class CommandLineHelper(object):
         except EMCVnxCLICmdError as ex:
             with excutils.save_and_reraise_exception():
                 self.delete_lun(name)
-                LOG.error(_("Error on enable compression on lun %s.")
+                LOG.error(_LE("Error on enable compression on lun %s.")
                           % six.text_type(ex))
 
         # handle consistency group
@@ -321,8 +321,8 @@ class CommandLineHelper(object):
         except EMCVnxCLICmdError as ex:
             with excutils.save_and_reraise_exception():
                 self.delete_lun(name)
-                LOG.error(_("Error on adding lun to consistency"
-                            " group. %s") % six.text_type(ex))
+                LOG.error(_LE("Error on adding lun to consistency"
+                              " group. %s") % six.text_type(ex))
         return data
 
     @log_enter_exit
@@ -331,8 +331,8 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 4 and out.find('(0x712d8d04)') >= 0:
-                LOG.warn(_('LUN already exists, LUN name %(name)s. '
-                           'Message: %(msg)s') %
+                LOG.warn(_LW('LUN already exists, LUN name %(name)s. '
+                             'Message: %(msg)s') %
                          {'name': name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(cmd, rc, out)
@@ -359,8 +359,8 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 9 and out.find("not exist") >= 0:
-                LOG.warn(_("LUN is already deleted, LUN name %(name)s. "
-                           "Message: %(msg)s") %
+                LOG.warn(_LW("LUN is already deleted, LUN name %(name)s. "
+                             "Message: %(msg)s") %
                          {'name': name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_delete_lun, rc, out)
@@ -406,8 +406,8 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 4 and out.find("(0x712d8e04)") >= 0:
-                LOG.warn(_("LUN %(name)s is already expanded. "
-                           "Message: %(msg)s") %
+                LOG.warn(_LW("LUN %(name)s is already expanded. "
+                             "Message: %(msg)s") %
                          {'name': name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_expand_lun, rc, out)
@@ -463,8 +463,8 @@ class CommandLineHelper(object):
             # Ignore the error if consistency group already exists
             if (rc == 33 and
                     out.find("(0x716d8021)") >= 0):
-                LOG.warn(_('Consistency group %(name)s already '
-                           'exists. Message: %(msg)s') %
+                LOG.warn(_LW('Consistency group %(name)s already '
+                             'exists. Message: %(msg)s') %
                          {'name': cg_name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_create_cg, rc, out)
@@ -532,18 +532,18 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error if CG doesn't exist
             if rc == 13 and out.find(self.CLI_RESP_PATTERN_CG_NOT_FOUND) >= 0:
-                LOG.warn(_("CG %(cg_name)s does not exist. "
-                           "Message: %(msg)s") %
+                LOG.warn(_LW("CG %(cg_name)s does not exist. "
+                             "Message: %(msg)s") %
                          {'cg_name': cg_name, 'msg': out})
             elif rc == 1 and out.find("0x712d8801") >= 0:
-                LOG.warn(_("CG %(cg_name)s is deleting. "
-                           "Message: %(msg)s") %
+                LOG.warn(_LW("CG %(cg_name)s is deleting. "
+                             "Message: %(msg)s") %
                          {'cg_name': cg_name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(delete_cg_cmd, rc, out)
         else:
-            LOG.info(_('Consistency group %s was deleted '
-                       'successfully.') % cg_name)
+            LOG.info(_LI('Consistency group %s was deleted '
+                         'successfully.') % cg_name)
 
     @log_enter_exit
     def create_cgsnapshot(self, cgsnapshot):
@@ -562,8 +562,8 @@ class CommandLineHelper(object):
             # Ignore the error if cgsnapshot already exists
             if (rc == 5 and
                     out.find("(0x716d8005)") >= 0):
-                LOG.warn(_('Cgsnapshot name %(name)s already '
-                           'exists. Message: %(msg)s') %
+                LOG.warn(_LW('Cgsnapshot name %(name)s already '
+                             'exists. Message: %(msg)s') %
                          {'name': snap_name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(create_cg_snap_cmd, rc, out)
@@ -580,8 +580,8 @@ class CommandLineHelper(object):
             # Ignore the error if cgsnapshot does not exist.
             if (rc == 5 and
                     out.find(self.CLI_RESP_PATTERN_SNAP_NOT_FOUND) >= 0):
-                LOG.warn(_('Snapshot %(name)s for consistency group '
-                           'does not exist. Message: %(msg)s') %
+                LOG.warn(_LW('Snapshot %(name)s for consistency group '
+                             'does not exist. Message: %(msg)s') %
                          {'name': snap_name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(delete_cg_snap_cmd, rc, out)
@@ -601,8 +601,8 @@ class CommandLineHelper(object):
                 # Ignore the error that due to retry
                 if (rc == 5 and
                         out.find("(0x716d8005)") >= 0):
-                    LOG.warn(_('Snapshot %(name)s already exists. '
-                               'Message: %(msg)s') %
+                    LOG.warn(_LW('Snapshot %(name)s already exists. '
+                                 'Message: %(msg)s') %
                              {'name': name, 'msg': out})
                 else:
                     raise EMCVnxCLICmdError(command_create_snapshot, rc, out)
@@ -621,21 +621,21 @@ class CommandLineHelper(object):
             if rc != 0:
                 # Ignore the error that due to retry
                 if rc == 5 and out.find("not exist") >= 0:
-                    LOG.warn(_("Snapshot %(name)s may deleted already. "
-                               "Message: %(msg)s") %
+                    LOG.warn(_LW("Snapshot %(name)s may deleted already. "
+                                 "Message: %(msg)s") %
                              {'name': name, 'msg': out})
                     return True
                 # The snapshot cannot be destroyed because it is
                 # attached to a snapshot mount point. Wait
                 elif rc == 3 and out.find("(0x716d8003)") >= 0:
-                    LOG.warn(_("Snapshot %(name)s is in use, retry. "
-                               "Message: %(msg)s") %
+                    LOG.warn(_LW("Snapshot %(name)s is in use, retry. "
+                                 "Message: %(msg)s") %
                              {'name': name, 'msg': out})
                     return False
                 else:
                     raise EMCVnxCLICmdError(command_delete_snapshot, rc, out)
             else:
-                LOG.info(_('Snapshot %s was deleted successfully.') %
+                LOG.info(_LI('Snapshot %s was deleted successfully.') %
                          name)
                 return True
 
@@ -655,8 +655,8 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 4 and out.find("(0x712d8d04)") >= 0:
-                LOG.warn(_("Mount point %(name)s already exists. "
-                           "Message: %(msg)s") %
+                LOG.warn(_LW("Mount point %(name)s already exists. "
+                             "Message: %(msg)s") %
                          {'name': name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_create_mount_point, rc, out)
@@ -674,9 +674,9 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 85 and out.find('(0x716d8055)') >= 0:
-                LOG.warn(_("Snapshot %(snapname)s is attached to snapshot "
-                           "mount point %(mpname)s already. "
-                           "Message: %(msg)s") %
+                LOG.warn(_LW("Snapshot %(snapname)s is attached to snapshot "
+                             "mount point %(mpname)s already. "
+                             "Message: %(msg)s") %
                          {'snapname': snapshot_name,
                           'mpname': name,
                           'msg': out})
@@ -728,9 +728,9 @@ class CommandLineHelper(object):
         except EMCVnxCLICmdError as ex:
             migration_succeed = False
             if self._is_sp_unavailable_error(ex.out):
-                LOG.warn(_("Migration command may get network timeout. "
-                           "Double check whether migration in fact "
-                           "started successfully. Message: %(msg)s") %
+                LOG.warn(_LW("Migration command may get network timeout. "
+                             "Double check whether migration in fact "
+                             "started successfully. Message: %(msg)s") %
                          {'msg': ex.out})
                 command_migrate_list = ('migrate', '-list',
                                         '-source', src_id)
@@ -739,7 +739,7 @@ class CommandLineHelper(object):
                     migration_succeed = True
 
             if not migration_succeed:
-                LOG.warn(_("Start migration failed. Message: %s") %
+                LOG.warn(_LW("Start migration failed. Message: %s") %
                          ex.out)
                 LOG.debug("Delete temp LUN after migration "
                           "start failed. LUN: %s" % dst_name)
@@ -822,8 +822,8 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 66 and out.find("name already in use") >= 0:
-                LOG.warn(_('Storage group %(name)s already exists. '
-                           'Message: %(msg)s') %
+                LOG.warn(_LW('Storage group %(name)s already exists. '
+                             'Message: %(msg)s') %
                          {'name': name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_create_storage_group, rc, out)
@@ -839,9 +839,9 @@ class CommandLineHelper(object):
             # Ignore the error that due to retry
             if rc == 83 and out.find("group name or UID does not "
                                      "match any storage groups") >= 0:
-                LOG.warn(_("Storage group %(name)s doesn't exist, "
-                           "may have already been deleted. "
-                           "Message: %(msg)s") %
+                LOG.warn(_LW("Storage group %(name)s doesn't exist, "
+                             "may have already been deleted. "
+                             "Message: %(msg)s") %
                          {'name': name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_delete_storage_group, rc, out)
@@ -871,8 +871,8 @@ class CommandLineHelper(object):
             if rc == 116 and \
                 re.search("host is not.*connected to.*storage group",
                           out) is not None:
-                LOG.warn(_("Host %(host)s has already disconnected from "
-                           "storage group %(sgname)s. Message: %(msg)s") %
+                LOG.warn(_LW("Host %(host)s has already disconnected from "
+                             "storage group %(sgname)s. Message: %(msg)s") %
                          {'host': hostname, 'sgname': sg_name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_host_disconnect, rc, out)
@@ -891,9 +891,9 @@ class CommandLineHelper(object):
             if rc == 66 and \
                     re.search("LUN.*already.*added to.*Storage Group",
                               out) is not None:
-                LOG.warn(_("LUN %(lun)s has already added to "
-                           "Storage Group %(sgname)s. "
-                           "Message: %(msg)s") %
+                LOG.warn(_LW("LUN %(lun)s has already added to "
+                             "Storage Group %(sgname)s. "
+                             "Message: %(msg)s") %
                          {'lun': alu, 'sgname': sg_name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_add_hlu, rc, out)
@@ -911,8 +911,8 @@ class CommandLineHelper(object):
             # Ignore the error that due to retry
             if rc == 66 and\
                     out.find("No such Host LUN in this Storage Group") >= 0:
-                LOG.warn(_("HLU %(hlu)s has already been removed from "
-                           "%(sgname)s. Message: %(msg)s") %
+                LOG.warn(_LW("HLU %(hlu)s has already been removed from "
+                             "%(sgname)s. Message: %(msg)s") %
                          {'hlu': hlu, 'sgname': sg_name, 'msg': out})
             else:
                 raise EMCVnxCLICmdError(command_remove_hlu, rc, out)
@@ -1002,8 +1002,8 @@ class CommandLineHelper(object):
                 try:
                     return propertyDescriptor.converter(m.group(1))
                 except ValueError:
-                    LOG.error(_("Invalid value for %(key)s, "
-                                "value is %(value)s.") %
+                    LOG.error(_LE("Invalid value for %(key)s, "
+                                  "value is %(value)s.") %
                               {'key': propertyDescriptor.key,
                                'value': m.group(1)})
                     return None
@@ -1045,7 +1045,7 @@ class CommandLineHelper(object):
                     pool, self.POOL_FREE_CAPACITY)
                 temp_cache.append(obj)
         except Exception as ex:
-            LOG.error(_("Error happened during storage pool querying, %s.")
+            LOG.error(_LE("Error happened during storage pool querying, %s.")
                       % ex)
             # NOTE: Do not want to continue raise the exception
             # as the pools may temporarly unavailable
@@ -1066,8 +1066,8 @@ class CommandLineHelper(object):
             if m:
                 data['array_serial'] = m.group(1)
             else:
-                LOG.warn(_("No array serial number returned, "
-                           "set as unknown."))
+                LOG.warn(_LW("No array serial number returned, "
+                             "set as unknown."))
         else:
             raise EMCVnxCLICmdError(command_get_array_serial, rc, out)
 
@@ -1245,7 +1245,7 @@ class CommandLineHelper(object):
                 LOG.debug("See available iSCSI target: %s",
                           connection_pingnode)
                 return True
-        LOG.warn(_("See unavailable iSCSI target: %s"), connection_pingnode)
+        LOG.warn(_LW("See unavailable iSCSI target: %s"), connection_pingnode)
         return False
 
     @log_enter_exit
@@ -1372,8 +1372,8 @@ class CommandLineHelper(object):
             self.active_storage_ip == self.primary_storage_ip else\
             self.primary_storage_ip
 
-        LOG.info(_('Toggle storage_vnx_ip_address from %(old)s to '
-                   '%(new)s.') %
+        LOG.info(_LI('Toggle storage_vnx_ip_address from %(old)s to '
+                     '%(new)s.') %
                  {'old': old_ip,
                   'new': self.primary_storage_ip})
         return True
@@ -1451,13 +1451,13 @@ class EMCVnxCliBase(object):
                 FCSanLookupService(configuration=configuration)
         self.max_retries = 5
         if self.destroy_empty_sg:
-            LOG.warn(_("destroy_empty_storage_group: True. "
-                       "Empty storage group will be deleted "
-                       "after volume is detached."))
+            LOG.warn(_LW("destroy_empty_storage_group: True. "
+                         "Empty storage group will be deleted "
+                         "after volume is detached."))
         if not self.itor_auto_reg:
-            LOG.info(_("initiator_auto_registration: False. "
-                       "Initiator auto registration is not enabled. "
-                       "Please register initiator manually."))
+            LOG.info(_LI("initiator_auto_registration: False. "
+                         "Initiator auto registration is not enabled. "
+                         "Please register initiator manually."))
         self.hlu_set = set(xrange(1, self.max_luns_per_sg + 1))
         self._client = CommandLineHelper(self.configuration)
         self.array_serial = None
@@ -1488,10 +1488,10 @@ class EMCVnxCliBase(object):
         if not provisioning:
             provisioning = 'thick'
 
-        LOG.info(_('Create Volume: %(volume)s  Size: %(size)s '
-                   'pool: %(pool)s '
-                   'provisioning: %(provisioning)s '
-                   'tiering: %(tiering)s.')
+        LOG.info(_LI('Create Volume: %(volume)s  Size: %(size)s '
+                     'pool: %(pool)s '
+                     'provisioning: %(provisioning)s '
+                     'tiering: %(tiering)s.')
                  % {'volume': volumename,
                     'size': volumesize,
                     'pool': pool,
@@ -1536,7 +1536,7 @@ class EMCVnxCliBase(object):
         """check whether an extra spec's value is valid."""
 
         if not extra_spec or not valid_values:
-            LOG.error(_('The given extra_spec or valid_values is None.'))
+            LOG.error(_LE('The given extra_spec or valid_values is None.'))
         elif extra_spec not in valid_values:
             msg = _("The extra_spec: %s is invalid.") % extra_spec
             LOG.error(msg)
@@ -1614,9 +1614,9 @@ class EMCVnxCliBase(object):
         false_ret = (False, None)
 
         if 'location_info' not in host['capabilities']:
-            LOG.warn(_("Failed to get target_pool_name and "
-                       "target_array_serial. 'location_info' "
-                       "is not in host['capabilities']."))
+            LOG.warn(_LW("Failed to get target_pool_name and "
+                         "target_array_serial. 'location_info' "
+                         "is not in host['capabilities']."))
             return false_ret
 
         # mandatory info should be ok
@@ -1627,8 +1627,8 @@ class EMCVnxCliBase(object):
             target_pool_name = info_detail[0]
             target_array_serial = info_detail[1]
         except AttributeError:
-            LOG.warn(_("Error on parsing target_pool_name/"
-                       "target_array_serial."))
+            LOG.warn(_LW("Error on parsing target_pool_name/"
+                         "target_array_serial."))
             return false_ret
 
         if len(target_pool_name) == 0:
@@ -1745,8 +1745,8 @@ class EMCVnxCliBase(object):
                         volume, target_pool_name, new_type)[0]:
                     return True
                 else:
-                    LOG.warn(_('Storage-assisted migration failed during '
-                               'retype.'))
+                    LOG.warn(_LW('Storage-assisted migration failed during '
+                                 'retype.'))
                     return False
             else:
                 # migration is invalid
@@ -1860,7 +1860,7 @@ class EMCVnxCliBase(object):
         snapshotname = snapshot['name']
         volumename = snapshot['volume_name']
 
-        LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s')
+        LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s')
                  % {'snapshot': snapshotname,
                     'volume': volumename})
 
@@ -1872,7 +1872,7 @@ class EMCVnxCliBase(object):
 
         snapshotname = snapshot['name']
 
-        LOG.info(_('Delete Snapshot: %(snapshot)s')
+        LOG.info(_LI('Delete Snapshot: %(snapshot)s')
                  % {'snapshot': snapshotname})
 
         self._client.delete_snapshot(snapshotname)
@@ -1965,8 +1965,8 @@ class EMCVnxCliBase(object):
     @log_enter_exit
     def create_consistencygroup(self, context, group):
         """Create a consistency group."""
-        LOG.info(_('Start to create consistency group: %(group_name)s '
-                   'id: %(id)s') %
+        LOG.info(_LI('Start to create consistency group: %(group_name)s '
+                     'id: %(id)s') %
                  {'group_name': group['name'], 'id': group['id']})
 
         model_update = {'status': 'available'}
@@ -1988,7 +1988,7 @@ class EMCVnxCliBase(object):
 
         model_update = {}
         model_update['status'] = group['status']
-        LOG.info(_('Start to delete consistency group: %(cg_name)s')
+        LOG.info(_LI('Start to delete consistency group: %(cg_name)s')
                  % {'cg_name': cg_name})
         try:
             self._client.delete_consistencygroup(cg_name)
@@ -2016,8 +2016,8 @@ class EMCVnxCliBase(object):
             context, cgsnapshot_id)
 
         model_update = {}
-        LOG.info(_('Start to create cgsnapshot for consistency group'
-                   ': %(group_name)s') %
+        LOG.info(_LI('Start to create cgsnapshot for consistency group'
+                     ': %(group_name)s') %
                  {'group_name': cgsnapshot['consistencygroup_id']})
 
         try:
@@ -2043,8 +2043,8 @@ class EMCVnxCliBase(object):
 
         model_update = {}
         model_update['status'] = cgsnapshot['status']
-        LOG.info(_('Delete cgsnapshot %(snap_name)s for consistency group: '
-                   '%(group_name)s') % {'snap_name': cgsnapshot['id'],
+        LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
+                     '%(group_name)s') % {'snap_name': cgsnapshot['id'],
                  'group_name': cgsnapshot['consistencygroup_id']})
 
         try:
@@ -2102,7 +2102,7 @@ class EMCVnxCliBase(object):
                 # SG was not created or was destroyed by another concurrent
                 # operation before connected.
                 # Create SG and try to connect again
-                LOG.warn(_('Storage Group %s is not found. Create it.'),
+                LOG.warn(_LW('Storage Group %s is not found. Create it.'),
                          storage_group)
                 self.assure_storage_group(storage_group)
                 self._client.connect_host_to_storage_group(
@@ -2192,8 +2192,8 @@ class EMCVnxCliBase(object):
     def _register_iscsi_initiator(self, ip, host, initiator_uids):
         for initiator_uid in initiator_uids:
             iscsi_targets = self._client.get_iscsi_targets()
-            LOG.info(_('Get ISCSI targets %(tg)s to register '
-                       'initiator %(in)s.')
+            LOG.info(_LI('Get ISCSI targets %(tg)s to register '
+                         'initiator %(in)s.')
                      % ({'tg': iscsi_targets,
                          'in': initiator_uid}))
 
@@ -2217,7 +2217,7 @@ class EMCVnxCliBase(object):
     def _register_fc_initiator(self, ip, host, initiator_uids):
         for initiator_uid in initiator_uids:
             fc_targets = self._client.get_fc_targets()
-            LOG.info(_('Get FC targets %(tg)s to register initiator %(in)s.')
+            LOG.info(_LI('Get FC targets %(tg)s to register initiator %(in)s.')
                      % ({'tg': fc_targets,
                          'in': initiator_uid}))
 
@@ -2432,23 +2432,23 @@ class EMCVnxCliBase(object):
                 lun_map = self.get_lun_map(hostname)
             except EMCVnxCLICmdError as ex:
                 if ex.rc == 83:
-                    LOG.warn(_("Storage Group %s is not found. "
-                               "terminate_connection() is unnecessary."),
+                    LOG.warn(_LW("Storage Group %s is not found. "
+                                 "terminate_connection() is unnecessary."),
                              hostname)
                     return True
             try:
                 lun_id = self.get_lun_id(volume)
             except EMCVnxCLICmdError as ex:
                 if ex.rc == 9:
-                    LOG.warn(_("Volume %s is not found. "
-                               "It has probably been removed in VNX.")
+                    LOG.warn(_LW("Volume %s is not found. "
+                                 "It has probably been removed in VNX.")
                              % volume_name)
 
             if lun_id in lun_map:
                 self._client.remove_hlu_from_storagegroup(
                     lun_map[lun_id], hostname)
             else:
-                LOG.warn(_("Volume %(vol)s was not in Storage Group %(sg)s.")
+                LOG.warn(_LW("Volume %(vol)s was not in Storage Group %(sg)s.")
                          % {'vol': volume_name, 'sg': hostname})
             if self.destroy_empty_sg or self.zonemanager_lookup_service:
                 try:
@@ -2456,8 +2456,8 @@ class EMCVnxCliBase(object):
                     if not lun_map:
                         LOG.debug("Storage Group %s was empty.", hostname)
                         if self.destroy_empty_sg:
-                            LOG.info(_("Storage Group %s was empty, "
-                                       "destroy it."), hostname)
+                            LOG.info(_LI("Storage Group %s was empty, "
+                                         "destroy it."), hostname)
                             self._client.disconnect_host_from_storage_group(
                                 hostname, hostname)
                             self._client.delete_storage_group(hostname)
@@ -2466,7 +2466,7 @@ class EMCVnxCliBase(object):
                         LOG.debug("Storage Group %s not empty,", hostname)
                         return False
                 except Exception:
-                    LOG.warn(_("Failed to destroy Storage Group %s."),
+                    LOG.warn(_LW("Failed to destroy Storage Group %s."),
                              hostname)
             else:
                 return False
@@ -2585,7 +2585,7 @@ class EMCVnxCliPool(EMCVnxCliBase):
             if m is not None:
                 result = True if 'Enabled' == m.group(1) else False
             else:
-                LOG.error(_("Error parsing output for FastCache Command."))
+                LOG.error(_LE("Error parsing output for FastCache Command."))
         return result
 
     @log_enter_exit
index 438472a63afe2e4690712da0d832f4ca555de6e6..8888b6393dcb1f6a52b45d7c700da45f5ebe46a0 100644 (file)
@@ -33,7 +33,7 @@ import urllib2
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 from cinder.volume.drivers.san import san
@@ -85,18 +85,19 @@ class XtremIOVolumeDriver(san.SanDriver):
             if exc.code == 400 and hasattr(exc, 'read'):
                 error = json.load(exc)
                 if error['message'].endswith('obj_not_found'):
-                    LOG.warning(_("object %(key)s of type %(typ)s not found"),
+                    LOG.warning(_LW("object %(key)s of "
+                                    "type %(typ)s not found"),
                                 {'key': key, 'typ': object_type})
                     raise exception.NotFound()
                 elif error['message'] == 'vol_obj_name_not_unique':
-                    LOG.error(_("can't create 2 volumes with the same name"))
+                    LOG.error(_LE("can't create 2 volumes with the same name"))
                     msg = (_('Volume by this name already exists'))
                     raise exception.VolumeBackendAPIException(data=msg)
-            LOG.error(_('Bad response from XMS, %s'), exc.read())
+            LOG.error(_LE('Bad response from XMS, %s'), exc.read())
             msg = (_('Exception: %s') % six.text_type(exc))
             raise exception.VolumeDriverException(message=msg)
         if response.code >= 300:
-            LOG.error(_('bad API response, %s'), response.msg)
+            LOG.error(_LE('bad API response, %s'), response.msg)
             msg = (_('bad response from XMS got http code %(code)d, %(msg)s') %
                    {'code': response.code, 'msg': response.msg})
             raise exception.VolumeBackendAPIException(data=msg)
@@ -148,7 +149,7 @@ class XtremIOVolumeDriver(san.SanDriver):
             LOG.error(msg)
             raise exception.VolumeBackendAPIException(data=msg)
         else:
-            LOG.info(_('XtremIO SW version %s'), sys['sys-sw-version'])
+            LOG.info(_LI('XtremIO SW version %s'), sys['sys-sw-version'])
 
     def create_volume(self, volume):
         "Creates a volume"
@@ -177,7 +178,7 @@ class XtremIOVolumeDriver(san.SanDriver):
         try:
             self.req('volumes', 'DELETE', name=volume['id'])
         except exception.NotFound:
-            LOG.info(_("volume %s doesn't exist"), volume['id'])
+            LOG.info(_LI("volume %s doesn't exist"), volume['id'])
 
     def create_snapshot(self, snapshot):
         """Creates a snapshot."""
@@ -191,7 +192,7 @@ class XtremIOVolumeDriver(san.SanDriver):
         try:
             self.req('volumes', 'DELETE', name=snapshot.id)
         except exception.NotFound:
-            LOG.info(_("snapshot %s doesn't exist"), snapshot.id)
+            LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id)
 
     def _update_volume_stats(self):
         self._stats = {'volume_backend_name': self.backend_name,
@@ -235,10 +236,10 @@ class XtremIOVolumeDriver(san.SanDriver):
             lm_name = '%s_%s_%s' % (str(vol['index']),
                                     str(ig['index']) if ig else 'any',
                                     str(tg['index']))
-            LOG.info(_('removing lun map %s'), lm_name)
+            LOG.info(_LI('removing lun map %s'), lm_name)
             self.req('lun-maps', 'DELETE', name=lm_name)
         except exception.NotFound:
-            LOG.warning(_("terminate_connection: lun map not found"))
+            LOG.warning(_LW("terminate_connection: lun map not found"))
 
     def _find_lunmap(self, ig_name, vol_name):
         try:
@@ -271,17 +272,17 @@ class XtremIOVolumeDriver(san.SanDriver):
             res = self.req('lun-maps', 'POST', {'ig-id': ig['ig-id'][2],
                                                 'vol-id': volume['id']})
             lunmap = self._obj_from_result(res)
-            LOG.info(_('created lunmap\n%s'), lunmap)
+            LOG.info(_LI('created lunmap\n%s'), lunmap)
         except urllib2.HTTPError as exc:
             if exc.code == 400:
                 error = json.load(exc)
                 if 'already_mapped' in error.message:
-                    LOG.info(_('volume already mapped,'
-                               ' trying to retrieve it %(ig)s, %(vol)d'),
+                    LOG.info(_LI('volume already mapped,'
+                                 ' trying to retrieve it %(ig)s, %(vol)d'),
                              {'ig': ig['ig-id'][1], 'vol': volume['id']})
                     lunmap = self._find_lunmap(ig['ig-id'][1], volume['id'])
                 elif error.message == 'vol_obj_not_found':
-                    LOG.error(_("Can't find volume to map %s"), volume['id'])
+                    LOG.error(_LE("Can't find volume to map %s"), volume['id'])
                     raise exception.VolumeNotFound(volume_id=volume['id'])
                 else:
                     raise
@@ -350,8 +351,8 @@ class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver):
                                    'password']
                 # delete the initiator to create a new one with password
                 if not chap_passwd:
-                    LOG.info(_('initiator has no password while using chap,'
-                             'removing it'))
+                    LOG.info(_LI('initiator has no password while using chap,'
+                                 'removing it'))
                     self.req('initiators', 'DELETE', name=initiator)
                     # check if the initiator already exists
                     raise exception.NotFound()
index 357aed87d1ee5b159a9f93cc32da9c9586d0ad90..94be9d68c5f61b0d428c02b4e3f92c090d746e55 100644 (file)
@@ -367,8 +367,8 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
             self._eql_execute(*cmd)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Failed to add multihost-access'
-                          for volume "%s".'),
+                LOG.error(_LE('Failed to add multihost-access '
+                              'for volume "%s".'),
                           volume['name'])
 
     def delete_volume(self, volume):
@@ -382,7 +382,8 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
                      volume['name'])
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Failed to delete volume "%s".'), volume['name'])
+                LOG.error(_LE('Failed to delete '
+                              'volume "%s".'), volume['name'])
 
     def create_snapshot(self, snapshot):
         """"Create snapshot of existing volume on appliance."""
@@ -435,7 +436,7 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
         except Exception:
             with excutils.save_and_reraise_exception():
                 LOG.error(_LE('Failed to delete snapshot %(snap)s of '
-                          'volume %(vol)s.'),
+                              'volume %(vol)s.'),
                           {'snap': snapshot['name'],
                            'vol': snapshot['volume_name']})
 
@@ -455,8 +456,8 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
             }
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Failed to initialize connection'
-                          to volume "%s".'),
+                LOG.error(_LE('Failed to initialize connection '
+                              'to volume "%s".'),
                           volume['name'])
 
     def terminate_connection(self, volume, connector, force=False, **kwargs):
@@ -470,8 +471,8 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
                                   'access', 'delete', connection_id)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Failed to terminate connection'
-                          to volume "%s".'),
+                LOG.error(_LE('Failed to terminate connection '
+                              'to volume "%s".'),
                           volume['name'])
 
     def create_export(self, context, volume):
@@ -516,9 +517,9 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
         except Exception:
             with excutils.save_and_reraise_exception():
                 LOG.error(_LE('Failed to extend_volume %(name)s from '
-                          '%(current_size)sGB to %(new_size)sGB.'),
+                              '%(current_size)sGB to %(new_size)sGB.'),
                           {'name': volume['name'],
-                          'current_size': volume['size'],
+                           'current_size': volume['size'],
                            'new_size': new_size})
 
     def local_path(self, volume):
index 5cd3d51fe8d471f00280dab9f4e2da16985b41d4..53495f364a87b6464c3de6b1a227f8777e1ebca7 100644 (file)
@@ -29,7 +29,7 @@ from oslo.config import cfg
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.openstack.common import units
@@ -244,8 +244,8 @@ class FJDXCommon(object):
                 errordesc = RETCODE_dic[six.text_type(rc)]
 
             if rc != 0L:
-                LOG.error(_('Error Create Volume: %(volumename)s.  '
-                          'Return code: %(rc)lu.  Error: %(error)s')
+                LOG.error(_LE('Error Create Volume: %(volumename)s.  '
+                              'Return code: %(rc)lu.  Error: %(error)s')
                           % {'volumename': volumename,
                              'rc': rc,
                              'error': errordesc})
@@ -516,8 +516,8 @@ class FJDXCommon(object):
 
         vol_instance = self._find_lun(volume)
         if vol_instance is None:
-            LOG.error(_('Volume %(name)s not found on the array. '
-                        'No volume to delete.')
+            LOG.error(_LE('Volume %(name)s not found on the array. '
+                          'No volume to delete.')
                       % {'name': volumename})
             return
 
@@ -593,8 +593,8 @@ class FJDXCommon(object):
 
         repservice = self._find_replication_service(storage_system)
         if repservice is None:
-            LOG.error(_("Cannot find Replication Service to create snapshot "
-                        "for volume %s.") % volumename)
+            LOG.error(_LE("Cannot find Replication Service to create snapshot "
+                          "for volume %s.") % volumename)
             exception_message = (_("Cannot find Replication Service to "
                                    "create snapshot for volume %s.")
                                  % volumename)
@@ -716,8 +716,8 @@ class FJDXCommon(object):
         sync_name, storage_system =\
             self._find_storage_sync_sv_sv(snapshot, volume, False)
         if sync_name is None:
-            LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s '
-                        'not found on the array. No snapshot to delete.')
+            LOG.error(_LE('Snapshot: %(snapshot)s: volume: %(volume)s '
+                          'not found on the array. No snapshot to delete.')
                       % {'snapshot': snapshotname,
                          'volume': volumename})
             return
@@ -789,8 +789,8 @@ class FJDXCommon(object):
                                 'volume': volumename})
                     raise loopingcall.LoopingCallDone()
                 if int(time.time()) - start >= wait_timeout:
-                    LOG.warn(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
-                               'Snapshot deleted but cleanup timed out.')
+                    LOG.warn(_LW('Snapshot: %(snapshot)s: volume: %(volume)s. '
+                                 'Snapshot deleted but cleanup timed out.')
                              % {'snapshot': snapshotname,
                                 'volume': volumename})
                     raise loopingcall.LoopingCallDone()
@@ -802,9 +802,9 @@ class FJDXCommon(object):
                              % {'snapshot': snapshotname,
                                 'volume': volumename})
                 else:
-                    LOG.warn(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
-                               'Snapshot deleted but error during cleanup. '
-                               'Error: %(error)s')
+                    LOG.warn(_LW('Snapshot: %(snapshot)s: volume: %(volume)s. '
+                                 'Snapshot deleted but error during cleanup. '
+                                 'Error: %(error)s')
                              % {'snapshot': snapshotname,
                                 'volume': volumename,
                                 'error': six.text_type(ex.args)})
index ed6404e066c629471c6acb3f8f8125db0045c967..9e98c1af46a3d6c174fde76a611da675907684ec 100644 (file)
@@ -24,7 +24,7 @@ from cinder.brick.remotefs import remotefs as remotefs_brick
 from cinder import compute
 from cinder import db
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import log as logging
@@ -125,7 +125,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
             try:
                 self._do_umount(True, share)
             except Exception as exc:
-                LOG.warning(_('Exception during unmounting %s') % (exc))
+                LOG.warning(_LE('Exception during unmounting %s') % (exc))
 
     def _do_umount(self, ignore_not_mounted, share):
         mount_path = self._get_mount_point_for_share(share)
@@ -134,9 +134,9 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
             self._execute(*command, run_as_root=True)
         except processutils.ProcessExecutionError as exc:
             if ignore_not_mounted and 'not mounted' in exc.stderr:
-                LOG.info(_("%s is already umounted"), share)
+                LOG.info(_LI("%s is already umounted"), share)
             else:
-                LOG.error(_("Failed to umount %(share)s, reason=%(stderr)s"),
+                LOG.error(_LE("Failed to umount %(share)s, reason=%(stderr)s"),
                           {'share': share, 'stderr': exc.stderr})
                 raise
 
@@ -145,7 +145,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
             self._unmount_shares()
         except processutils.ProcessExecutionError as exc:
             if 'target is busy' in exc.stderr:
-                LOG.warn(_("Failed to refresh mounts, reason=%s") %
+                LOG.warn(_LW("Failed to refresh mounts, reason=%s") %
                          exc.stderr)
             else:
                 raise
@@ -179,7 +179,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
 
         volume['provider_location'] = self._find_share(volume['size'])
 
-        LOG.info(_('casted to %s') % volume['provider_location'])
+        LOG.info(_LI('casted to %s') % volume['provider_location'])
 
         self._do_create_volume(volume)
 
@@ -234,8 +234,9 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
         """Deletes a logical volume."""
 
         if not volume['provider_location']:
-            LOG.warn(_('Volume %s does not have provider_location specified, '
-                     'skipping'), volume['name'])
+            LOG.warn(_LW('Volume %s does not have '
+                         'provider_location specified, '
+                         'skipping'), volume['name'])
             return
 
         self._ensure_share_mounted(volume['provider_location'])
@@ -313,7 +314,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
                 snapshot['id'],
                 delete_info)
         except Exception as e:
-            LOG.error(_('Call to Nova delete snapshot failed'))
+            LOG.error(_LE('Call to Nova delete snapshot failed'))
             LOG.exception(e)
             raise e
 
@@ -477,7 +478,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
                 self._ensure_share_mounted(share)
                 self._mounted_shares.append(share)
             except Exception as exc:
-                LOG.error(_('Exception during mounting %s') % (exc,))
+                LOG.error(_LE('Exception during mounting %s') % (exc,))
 
         LOG.debug('Available shares: %s' % self._mounted_shares)
 
@@ -601,7 +602,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
                 connection_info)
             LOG.debug('nova call result: %s' % result)
         except Exception as e:
-            LOG.error(_('Call to Nova to create snapshot failed'))
+            LOG.error(_LE('Call to Nova to create snapshot failed'))
             LOG.exception(e)
             raise e
 
index 99e8fc7f39cfaa96da6da21e21ef0ec189c6e443..102d8e7f3aebe10488f520ca2ec1b14ca7e9ab23 100644 (file)
@@ -35,7 +35,7 @@ import re
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import processutils
@@ -88,7 +88,7 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
         self.configuration.san_ssh_port = self.configuration.nas_ssh_port
         self.configuration.ibmnas_platform_type = \
             self.configuration.ibmnas_platform_type.lower()
-        LOG.info(_('Initialized driver for IBMNAS Platform: %s.'),
+        LOG.info(_LI('Initialized driver for IBMNAS Platform: %s.'),
                  self.configuration.ibmnas_platform_type)
 
     def set_execute(self, execute):
@@ -284,8 +284,9 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
     def delete_volume(self, volume):
         """Deletes a logical volume."""
         if not volume['provider_location']:
-            LOG.warn(_('Volume %s does not have provider_location specified, '
-                     'skipping.'), volume['name'])
+            LOG.warn(_LW('Volume %s does not have '
+                         'provider_location specified, '
+                         'skipping.'), volume['name'])
             return
 
         export_path = self._get_export_path(volume['id'])
index 8f6c6ae7ad5040e4206d75c8adf2b20d2e11ce78..a51b1e7323e81000531835121f4324cecfee9ee9 100644 (file)
@@ -27,7 +27,7 @@ from oslo.config import cfg
 from cinder.brick import exception as brick_exception
 from cinder.brick.local_dev import lvm as lvm
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import log as logging
@@ -227,8 +227,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
             return True
 
         if self.vg.lv_has_snapshot(volume['name']):
-            LOG.error(_('Unabled to delete due to existing snapshot '
-                        'for volume: %s') % volume['name'])
+            LOG.error(_LE('Unabled to delete due to existing snapshot '
+                          'for volume: %s') % volume['name'])
             raise exception.VolumeIsBusy(volume_name=volume['name'])
 
         self._delete_volume(volume)
@@ -244,8 +244,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
         """Deletes a snapshot."""
         if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
             # If the snapshot isn't present, then don't attempt to delete
-            LOG.warning(_("snapshot: %s not found, "
-                          "skipping delete operations") % snapshot['name'])
+            LOG.warning(_LW("snapshot: %s not found, "
+                            "skipping delete operations") % snapshot['name'])
             return True
 
         # TODO(yamahata): zeroing out the whole snapshot triggers COW.
@@ -282,7 +282,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
         mirror_count = 0
         if self.configuration.lvm_mirrors:
             mirror_count = self.configuration.lvm_mirrors
-        LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
+        LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
         volume_name = src_vref['name']
         temp_id = 'tmp-snap-%s' % volume['id']
         temp_snapshot = {'volume_name': volume_name,
@@ -346,8 +346,9 @@ class LVMVolumeDriver(driver.VolumeDriver):
 
         LOG.debug("Updating volume stats")
         if self.vg is None:
-            LOG.warning(_('Unable to update stats on non-initialized '
-                          'Volume Group: %s'), self.configuration.volume_group)
+            LOG.warning(_LW('Unable to update stats on non-initialized '
+                            'Volume Group: %s'), self.configuration.
+                        volume_group)
             return
 
         self.vg.update_volume_group_info()
@@ -523,8 +524,8 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
                 if attempts == 0:
                     raise
                 else:
-                    LOG.warning(_('Error creating iSCSI target, retrying '
-                                  'creation for target: %s') % iscsi_name)
+                    LOG.warning(_LW('Error creating iSCSI target, retrying '
+                                    'creation for target: %s') % iscsi_name)
         return tid
 
     def ensure_export(self, context, volume):
index d9bf96dc79054ff89112c25b4a29f24ec31b0944..8b497ad8387b2b54ab877bd538ccb332ebe68579 100644 (file)
@@ -28,7 +28,7 @@ from oslo.config import cfg
 from suds import client
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import units
 from cinder.volume.drivers.san.san import SanISCSIDriver
@@ -97,31 +97,31 @@ class NimbleISCSIDriver(SanISCSIDriver):
                   % {'netlabel': subnet_label, 'netconf': netconfig})
         ret_discovery_ip = None
         for subnet in netconfig['subnet-list']:
-            LOG.info(_('Exploring array subnet label %s') % subnet['label'])
+            LOG.info(_LI('Exploring array subnet label %s') % subnet['label'])
             if subnet_label == '*':
                 # Use the first data subnet, save mgmt+data for later
                 if (subnet['subnet-id']['type'] == SM_SUBNET_DATA):
-                    LOG.info(_('Discovery ip %(disc_ip)s is used '
-                               'on data subnet %(net_label)s')
+                    LOG.info(_LI('Discovery ip %(disc_ip)s is used '
+                                 'on data subnet %(net_label)s')
                              % {'disc_ip': subnet['discovery-ip'],
                                 'net_label': subnet['label']})
                     return subnet['discovery-ip']
                 elif (subnet['subnet-id']['type'] ==
                         SM_SUBNET_MGMT_PLUS_DATA):
-                    LOG.info(_('Discovery ip %(disc_ip)s is found'
-                               ' on mgmt+data subnet %(net_label)s')
+                    LOG.info(_LI('Discovery ip %(disc_ip)s is found'
+                                 ' on mgmt+data subnet %(net_label)s')
                              % {'disc_ip': subnet['discovery-ip'],
                                 'net_label': subnet['label']})
                     ret_discovery_ip = subnet['discovery-ip']
             # If subnet is specified and found, use the subnet
             elif subnet_label == subnet['label']:
-                LOG.info(_('Discovery ip %(disc_ip)s is used'
-                           ' on subnet %(net_label)s')
+                LOG.info(_LI('Discovery ip %(disc_ip)s is used'
+                             ' on subnet %(net_label)s')
                          % {'disc_ip': subnet['discovery-ip'],
                             'net_label': subnet['label']})
                 return subnet['discovery-ip']
         if ret_discovery_ip:
-            LOG.info(_('Discovery ip %s is used on mgmt+data subnet')
+            LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet')
                      % ret_discovery_ip)
             return ret_discovery_ip
         else:
@@ -137,9 +137,9 @@ class NimbleISCSIDriver(SanISCSIDriver):
                 password=self.configuration.san_password,
                 ip=self.configuration.san_ip)
         except Exception:
-            LOG.error(_('Failed to create SOAP client.'
-                        'Check san_ip, username, password'
-                        ' and make sure the array version is compatible'))
+            LOG.error(_LE('Failed to create SOAP client.'
+                          'Check san_ip, username, password'
+                          ' and make sure the array version is compatible'))
             raise
 
     def _get_provider_location(self, volume_name):
@@ -150,7 +150,7 @@ class NimbleISCSIDriver(SanISCSIDriver):
         target_ipaddr = self._get_discovery_ip(netconfig)
         iscsi_portal = target_ipaddr + ':3260'
         provider_location = '%s %s %s' % (iscsi_portal, iqn, LUN_ID)
-        LOG.info(_('vol_name=%(name)s provider_location=%(loc)s')
+        LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s')
                  % {'name': volume_name, 'loc': provider_location})
         return provider_location
 
@@ -274,7 +274,7 @@ class NimbleISCSIDriver(SanISCSIDriver):
     def extend_volume(self, volume, new_size):
         """Extend an existing volume."""
         volume_name = volume['name']
-        LOG.info(_('Entering extend_volume volume=%(vol)s new_size=%(size)s')
+        LOG.info(_LI('Entering extend_volume volume=%(vol)s new_size=%(size)s')
                  % {'vol': volume_name, 'size': new_size})
         vol_size = int(new_size) * units.Gi
         reserve = not self.configuration.san_thin_provision
@@ -291,7 +291,8 @@ class NimbleISCSIDriver(SanISCSIDriver):
     def _create_igroup_for_initiator(self, initiator_name):
         """Creates igroup for an initiator and returns the igroup name."""
         igrp_name = 'openstack-' + self._generate_random_string(12)
-        LOG.info(_('Creating initiator group %(grp)s with initiator %(iname)s')
+        LOG.info(_LI('Creating initiator group %(grp)s '
+                     'with initiator %(iname)s')
                  % {'grp': igrp_name, 'iname': initiator_name})
         self.APIExecutor.create_initiator_group(igrp_name, initiator_name)
         return igrp_name
@@ -303,17 +304,18 @@ class NimbleISCSIDriver(SanISCSIDriver):
                 if (len(initiator_group['initiator-list']) == 1 and
                     initiator_group['initiator-list'][0]['name'] ==
                         initiator_name):
-                    LOG.info(_('igroup %(grp)s found for initiator %(iname)s')
+                    LOG.info(_LI('igroup %(grp)s found for '
+                                 'initiator %(iname)s')
                              % {'grp': initiator_group['name'],
                                 'iname': initiator_name})
                     return initiator_group['name']
-        LOG.info(_('No igroup found for initiator %s') % initiator_name)
+        LOG.info(_LI('No igroup found for initiator %s') % initiator_name)
         return None
 
     def initialize_connection(self, volume, connector):
         """Driver entry point to attach a volume to an instance."""
-        LOG.info(_('Entering initialize_connection volume=%(vol)s'
-                   ' connector=%(conn)s location=%(loc)s')
+        LOG.info(_LI('Entering initialize_connection volume=%(vol)s'
+                     ' connector=%(conn)s location=%(loc)s')
                  % {'vol': volume,
                     'conn': connector,
                     'loc': volume['provider_location']})
@@ -323,7 +325,7 @@ class NimbleISCSIDriver(SanISCSIDriver):
         if not initiator_group_name:
             initiator_group_name = self._create_igroup_for_initiator(
                 initiator_name)
-        LOG.info(_('Initiator group name is %(grp)s for initiator %(iname)s')
+        LOG.info(_LI('Initiator group name is %(grp)s for initiator %(iname)s')
                  % {'grp': initiator_group_name, 'iname': initiator_name})
         self.APIExecutor.add_acl(volume, initiator_group_name)
         (iscsi_portal, iqn, lun_num) = volume['provider_location'].split()
@@ -340,8 +342,8 @@ class NimbleISCSIDriver(SanISCSIDriver):
 
     def terminate_connection(self, volume, connector, **kwargs):
         """Driver entry point to unattach a volume from an instance."""
-        LOG.info(_('Entering terminate_connection volume=%(vol)s'
-                   ' connector=%(conn)s location=%(loc)s.')
+        LOG.info(_LI('Entering terminate_connection volume=%(vol)s'
+                     ' connector=%(conn)s location=%(loc)s.')
                  % {'vol': volume,
                     'conn': connector,
                     'loc': volume['provider_location']})
@@ -386,12 +388,12 @@ def _connection_checker(func):
                 return func(self, *args, **kwargs)
             except NimbleAPIException as e:
                 if attempts < 1 and (re.search('SM-eaccess', str(e))):
-                    LOG.info(_('Session might have expired.'
-                               ' Trying to relogin'))
+                    LOG.info(_LI('Session might have expired.'
+                                 ' Trying to relogin'))
                     self.login()
                     continue
                 else:
-                    LOG.error(_('Re-throwing Exception %s') % e)
+                    LOG.error(_LE('Re-throwing Exception %s') % e)
                     raise
     return inner_connection_checker
 
@@ -475,8 +477,8 @@ class NimbleAPIExecutor:
         # Limit description size to 254 characters
         description = description[:254]
 
-        LOG.info(_('Creating a new volume=%(vol)s size=%(size)s'
-                   ' reserve=%(reserve)s in pool=%(pool)s')
+        LOG.info(_LI('Creating a new volume=%(vol)s size=%(size)s'
+                     ' reserve=%(reserve)s in pool=%(pool)s')
                  % {'vol': volume['name'],
                     'size': volume_size,
                     'reserve': reserve,
@@ -497,7 +499,7 @@ class NimbleAPIExecutor:
     def create_vol(self, volume, pool_name, reserve):
         """Execute createVol API."""
         response = self._execute_create_vol(volume, pool_name, reserve)
-        LOG.info(_('Successfully create volume %s') % response['name'])
+        LOG.info(_LI('Successfully create volume %s') % response['name'])
         return response['name']
 
     @_connection_checker
@@ -516,8 +518,8 @@ class NimbleAPIExecutor:
     @_response_checker
     def add_acl(self, volume, initiator_group_name):
         """Execute addAcl API."""
-        LOG.info(_('Adding ACL to volume=%(vol)s with'
-                   ' initiator group name %(igrp)s')
+        LOG.info(_LI('Adding ACL to volume=%(vol)s with'
+                     ' initiator group name %(igrp)s')
                  % {'vol': volume['name'],
                     'igrp': initiator_group_name})
         return self.client.service.addVolAcl(
@@ -531,8 +533,8 @@ class NimbleAPIExecutor:
     @_response_checker
     def remove_acl(self, volume, initiator_group_name):
         """Execute removeVolAcl API."""
-        LOG.info(_('Removing ACL from volume=%(vol)s'
-                   ' for initiator group %(igrp)s')
+        LOG.info(_LI('Removing ACL from volume=%(vol)s'
+                     ' for initiator group %(igrp)s')
                  % {'vol': volume['name'],
                     'igrp': initiator_group_name})
         return self.client.service.removeVolAcl(
@@ -545,14 +547,15 @@ class NimbleAPIExecutor:
     @_connection_checker
     @_response_checker
     def _execute_get_vol_info(self, vol_name):
-        LOG.info(_('Getting volume information for vol_name=%s') % (vol_name))
+        LOG.info(_LI('Getting volume information '
+                     'for vol_name=%s') % (vol_name))
         return self.client.service.getVolInfo(request={'sid': self.sid,
                                                        'name': vol_name})
 
     def get_vol_info(self, vol_name):
         """Execute getVolInfo API."""
         response = self._execute_get_vol_info(vol_name)
-        LOG.info(_('Successfully got volume information for volume %s')
+        LOG.info(_LI('Successfully got volume information for volume %s')
                  % vol_name)
         return response['vol']
 
@@ -560,7 +563,7 @@ class NimbleAPIExecutor:
     @_response_checker
     def online_vol(self, vol_name, online_flag, *args, **kwargs):
         """Execute onlineVol API."""
-        LOG.info(_('Setting volume %(vol)s to online_flag %(flag)s')
+        LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s')
                  % {'vol': vol_name, 'flag': online_flag})
         return self.client.service.onlineVol(request={'sid': self.sid,
                                                       'name': vol_name,
@@ -581,7 +584,7 @@ class NimbleAPIExecutor:
     @_response_checker
     def dissociate_volcoll(self, vol_name, *args, **kwargs):
         """Execute dissocProtPol API."""
-        LOG.info(_('Dissociating volume %s ') % vol_name)
+        LOG.info(_LI('Dissociating volume %s ') % vol_name)
         return self.client.service.dissocProtPol(
             request={'sid': self.sid,
                      'vol-name': vol_name})
@@ -590,7 +593,7 @@ class NimbleAPIExecutor:
     @_response_checker
     def delete_vol(self, vol_name, *args, **kwargs):
         """Execute deleteVol API."""
-        LOG.info(_('Deleting volume %s ') % vol_name)
+        LOG.info(_LI('Deleting volume %s ') % vol_name)
         return self.client.service.deleteVol(request={'sid': self.sid,
                                                       'name': vol_name})
 
@@ -609,8 +612,8 @@ class NimbleAPIExecutor:
         snap_description = snap_display_name + snap_display_description
         # Limit to 254 characters
         snap_description = snap_description[:254]
-        LOG.info(_('Creating snapshot for volume_name=%(vol)s'
-                   ' snap_name=%(name)s snap_description=%(desc)s')
+        LOG.info(_LI('Creating snapshot for volume_name=%(vol)s'
+                     ' snap_name=%(name)s snap_description=%(desc)s')
                  % {'vol': volume_name,
                     'name': snap_name,
                     'desc': snap_description})
@@ -624,7 +627,7 @@ class NimbleAPIExecutor:
     @_response_checker
     def delete_snap(self, vol_name, snap_name, *args, **kwargs):
         """Execute deleteSnap API."""
-        LOG.info(_('Deleting snapshot %s ') % snap_name)
+        LOG.info(_LI('Deleting snapshot %s ') % snap_name)
         return self.client.service.deleteSnap(request={'sid': self.sid,
                                                        'vol': vol_name,
                                                        'name': snap_name})
@@ -638,9 +641,9 @@ class NimbleAPIExecutor:
         clone_name = volume['name']
         snap_size = snapshot['volume_size']
         reserve_size = snap_size * units.Gi if reserve else 0
-        LOG.info(_('Cloning volume from snapshot volume=%(vol)s '
-                   'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
-                   'reserve=%(reserve)s')
+        LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s '
+                     'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
+                     'reserve=%(reserve)s')
                  % {'vol': volume_name,
                     'snap': snap_name,
                     'clone': clone_name,
@@ -663,7 +666,7 @@ class NimbleAPIExecutor:
     @_response_checker
     def edit_vol(self, vol_name, mask, attr):
         """Execute editVol API."""
-        LOG.info(_('Editing Volume %(vol)s with mask %(mask)s')
+        LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s')
                  % {'vol': vol_name, 'mask': str(mask)})
         return self.client.service.editVol(request={'sid': self.sid,
                                                     'name': vol_name,
@@ -673,14 +676,14 @@ class NimbleAPIExecutor:
     @_connection_checker
     @_response_checker
     def _execute_get_initiator_grp_list(self):
-        LOG.info(_('Getting getInitiatorGrpList'))
+        LOG.info(_LI('Getting getInitiatorGrpList'))
         return (self.client.service.getInitiatorGrpList(
             request={'sid': self.sid}))
 
     def get_initiator_grp_list(self):
         """Execute getInitiatorGrpList API."""
         response = self._execute_get_initiator_grp_list()
-        LOG.info(_('Successfully retrieved InitiatorGrpList'))
+        LOG.info(_LI('Successfully retrieved InitiatorGrpList'))
         return (response['initiatorgrp-list']
                 if 'initiatorgrp-list' in response else [])
 
@@ -688,8 +691,8 @@ class NimbleAPIExecutor:
     @_response_checker
     def create_initiator_group(self, initiator_group_name, initiator_name):
         """Execute createInitiatorGrp API."""
-        LOG.info(_('Creating initiator group %(igrp)s'
-                   ' with one initiator %(iname)s')
+        LOG.info(_LI('Creating initiator group %(igrp)s'
+                     ' with one initiator %(iname)s')
                  % {'igrp': initiator_group_name, 'iname': initiator_name})
         return self.client.service.createInitiatorGrp(
             request={'sid': self.sid,
@@ -701,7 +704,7 @@ class NimbleAPIExecutor:
     @_response_checker
     def delete_initiator_group(self, initiator_group_name, *args, **kwargs):
         """Execute deleteInitiatorGrp API."""
-        LOG.info(_('Deleting deleteInitiatorGrp %s ') % initiator_group_name)
+        LOG.info(_LI('Deleting deleteInitiatorGrp %s ') % initiator_group_name)
         return self.client.service.deleteInitiatorGrp(
             request={'sid': self.sid,
                      'name': initiator_group_name})
index cd53317c160690a99936d2937076060ea79e4677..37f690a63d000ee0f85e7bb3addc541779992e6d 100644 (file)
@@ -16,7 +16,7 @@
 import errno
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
 from cinder.openstack.common import log as logging
 import cinder.volume.driver
 from cinder.volume.drivers.prophetstor import dplcommon
@@ -156,6 +156,6 @@ class DPLISCSIDriver(dplcommon.DPLCOMMONDriver,
                         (backend_name or 'DPLISCSIDriver')
                     self._stats = data
             except Exception as exc:
-                LOG.warning(_('Cannot get volume status '
-                              '%(exc)%s.') % {'exc': exc})
+                LOG.warning(_LW('Cannot get volume status '
+                                '%(exc)%s.') % {'exc': exc})
         return self._stats
index 92557a557fc9a12db11ec4b4fa524f608a2f7efc..5b7995e25b3f1168b61b10546583dc72fc55f4df 100644 (file)
@@ -27,7 +27,7 @@ import time
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.openstack.common import units
@@ -88,7 +88,7 @@ class DPLCommand(object):
                 payload = json.dumps(params, ensure_ascii=False)
                 payload.encode('utf-8')
             except Exception:
-                LOG.error(_('JSON encode params error: %s.'),
+                LOG.error(_LE('JSON encode params error: %s.'),
                           six.text_type(params))
                 retcode = errno.EINVAL
         for i in range(CONNECTION_RETRY):
@@ -100,11 +100,11 @@ class DPLCommand(object):
                     retcode = 0
                     break
             except IOError as ioerr:
-                LOG.error(_('Connect to Flexvisor error: %s.'),
+                LOG.error(_LE('Connect to Flexvisor error: %s.'),
                           six.text_type(ioerr))
                 retcode = errno.ENOTCONN
             except Exception as e:
-                LOG.error(_('Connect to Flexvisor failed: %s.'),
+                LOG.error(_LE('Connect to Flexvisor failed: %s.'),
                           six.text_type(e))
                 retcode = errno.EFAULT
 
@@ -128,7 +128,7 @@ class DPLCommand(object):
                     retcode = errno.ENOTCONN
                 continue
             except Exception as e:
-                LOG.error(_('Failed to send request: %s.'),
+                LOG.error(_LE('Failed to send request: %s.'),
                           six.text_type(e))
                 retcode = errno.EFAULT
                 break
@@ -137,7 +137,7 @@ class DPLCommand(object):
                 try:
                     response = connection.getresponse()
                     if response.status == httplib.SERVICE_UNAVAILABLE:
-                        LOG.error(_('The Flexvisor service is unavailable.'))
+                        LOG.error(_LE('The Flexvisor service is unavailable.'))
                         time.sleep(1)
                         retry -= 1
                         retcode = errno.ENOPROTOOPT
@@ -151,7 +151,7 @@ class DPLCommand(object):
                     retcode = errno.EFAULT
                     continue
                 except Exception as e:
-                    LOG.error(_('Failed to get response: %s.'),
+                    LOG.error(_LE('Failed to get response: %s.'),
                               six.text_type(e.message))
                     retcode = errno.EFAULT
                     break
@@ -160,8 +160,8 @@ class DPLCommand(object):
                 response.status == httplib.NOT_FOUND:
             retcode = errno.ENODATA
         elif retcode == 0 and response.status not in expected_status:
-            LOG.error(_('%(method)s %(url)s unexpected response status: '
-                        '%(response)s (expects: %(expects)s).')
+            LOG.error(_LE('%(method)s %(url)s unexpected response status: '
+                          '%(response)s (expects: %(expects)s).')
                       % {'method': method,
                          'url': url,
                          'response': httplib.responses[response.status],
@@ -179,11 +179,11 @@ class DPLCommand(object):
                 data = response.read()
                 data = json.loads(data)
             except (TypeError, ValueError) as e:
-                LOG.error(_('Call to json.loads() raised an exception: %s.'),
+                LOG.error(_LE('Call to json.loads() raised an exception: %s.'),
                           six.text_type(e))
                 retcode = errno.ENOEXEC
             except Exception as e:
-                LOG.error(_('Read response raised an exception: %s.'),
+                LOG.error(_LE('Read response raised an exception: %s.'),
                           six.text_type(e))
                 retcode = errno.ENOEXEC
         elif retcode == 0 and \
@@ -193,11 +193,11 @@ class DPLCommand(object):
                 data = response.read()
                 data = json.loads(data)
             except (TypeError, ValueError) as e:
-                LOG.error(_('Call to json.loads() raised an exception: %s.'),
+                LOG.error(_LE('Call to json.loads() raised an exception: %s.'),
                           six.text_type(e))
                 retcode = errno.ENOEXEC
             except Exception as e:
-                LOG.error(_('Read response raised an exception: %s.'),
+                LOG.error(_LE('Read response raised an exception: %s.'),
                           six.text_type(e))
                 retcode = errno.ENOEXEC
 
@@ -826,8 +826,8 @@ class DPLCOMMONDriver(driver.VolumeDriver):
 
     def create_consistencygroup(self, context, group):
         """Creates a consistencygroup."""
-        LOG.info(_('Start to create consistency group: %(group_name)s '
-                   'id: %(id)s') %
+        LOG.info(_LI('Start to create consistency group: %(group_name)s '
+                     'id: %(id)s') %
                  {'group_name': group['name'], 'id': group['id']})
         model_update = {'status': 'available'}
         try:
@@ -857,7 +857,7 @@ class DPLCOMMONDriver(driver.VolumeDriver):
             context, group['id'])
         model_update = {}
         model_update['status'] = group['status']
-        LOG.info(_('Start to delete consistency group: %(cg_name)s')
+        LOG.info(_LI('Start to delete consistency group: %(cg_name)s')
                  % {'cg_name': group['id']})
         try:
             self.dpl.delete_vg(self._conver_uuid2hex(group['id']))
@@ -888,8 +888,8 @@ class DPLCOMMONDriver(driver.VolumeDriver):
             context, cgsnapshot_id)
 
         model_update = {}
-        LOG.info(_('Start to create cgsnapshot for consistency group'
-                   ': %(group_name)s') %
+        LOG.info(_LI('Start to create cgsnapshot for consistency group'
+                     ': %(group_name)s') %
                  {'group_name': cgId})
 
         try:
@@ -920,8 +920,8 @@ class DPLCOMMONDriver(driver.VolumeDriver):
 
         model_update = {}
         model_update['status'] = cgsnapshot['status']
-        LOG.info(_('Delete cgsnapshot %(snap_name)s for consistency group: '
-                   '%(group_name)s') % {'snap_name': cgsnapshot['id'],
+        LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
+                     '%(group_name)s') % {'snap_name': cgsnapshot['id'],
                  'group_name': cgsnapshot['consistencygroup_id']})
 
         try:
@@ -1364,7 +1364,7 @@ class DPLCOMMONDriver(driver.VolumeDriver):
     def do_setup(self, context):
         """Any initialization the volume driver does while starting."""
         self.context = context
-        LOG.info(_('Activate Flexvisor cinder volume driver.'))
+        LOG.info(_LI('Activate Flexvisor cinder volume driver.'))
 
     def check_for_setup_error(self):
         """Check DPL can connect properly."""
@@ -1387,8 +1387,8 @@ class DPLCOMMONDriver(driver.VolumeDriver):
                     ret = 0
                     output = status.get('output', {})
             else:
-                LOG.error(_('Flexvisor failed to get pool info '
-                          '(failed to get event)%s.') % (poolid))
+                LOG.error(_LE('Flexvisor failed to get pool info '
+                              '(failed to get event)%s.') % (poolid))
                 raise exception.VolumeBackendAPIException(
                     data="failed to get event")
         elif ret != 0:
index 09030c12a9077785bc05665015ff8aa2d1d3639d..683634b2ecc28b61af812dc61159e521b80b573d 100644 (file)
@@ -140,8 +140,8 @@ class PureISCSIDriver(san.SanISCSIDriver):
                 if err.kwargs["code"] == 400:
                     # Happens if the volume does not exist.
                     ctxt.reraise = False
-                    LOG.error(_LE("Volume deletion failed with message: %s") %
-                              err.msg)
+                    LOG.error(_LE("Volume deletion failed with message: {0}"
+                                  ).format(err.msg))
         LOG.debug("Leave PureISCSIDriver.delete_volume.")
 
     def create_snapshot(self, snapshot):
@@ -162,8 +162,8 @@ class PureISCSIDriver(san.SanISCSIDriver):
                 if err.kwargs["code"] == 400:
                     # Happens if the snapshot does not exist.
                     ctxt.reraise = False
-                    LOG.error(_LE("Snapshot deletion failed with message:"
-                                  " %s") % err.msg)
+                    LOG.error(_LE("Snapshot deletion failed with message: {0}"
+                                  ).format(err.msg))
         LOG.debug("Leave PureISCSIDriver.delete_snapshot.")
 
     def initialize_connection(self, volume, connector):
@@ -191,11 +191,9 @@ class PureISCSIDriver(san.SanISCSIDriver):
             self._run_iscsiadm_bare(["-m", "discovery", "-t", "sendtargets",
                                      "-p", self._iscsi_port["portal"]])
         except processutils.ProcessExecutionError as err:
-            LOG.warn(_LW("iSCSI discovery of port %(port_name)s at "
-                         "%(port_portal)s failed with error: %(err_msg)s") %
-                     {"port_name": self._iscsi_port["name"],
-                      "port_portal": self._iscsi_port["portal"],
-                      "err_msg": err.stderr})
+            LOG.warn(_LW("iSCSI discovery of port {0[name]} at {0[portal]} "
+                         "failed with error: {1}").format(self._iscsi_port,
+                                                          err.stderr))
             self._iscsi_port = self._choose_target_iscsi_port()
         return self._iscsi_port
 
@@ -261,8 +259,9 @@ class PureISCSIDriver(san.SanISCSIDriver):
                     if err.kwargs["code"] == 400:
                         # Happens if the host and volume are not connected.
                         ctxt.reraise = False
-                        LOG.error(_LE("Disconnection failed with message: "
-                                      "%(msg)s.") % {"msg": err.msg})
+                        LOG.error(_LE("Disconnection failed "
+                                      "with message: {msg}."
+                                      ).format(msg=err.msg))
             if (GENERATED_NAME.match(host_name) and not host["hgroup"] and
                 not self._array.list_host_connections(host_name,
                                                       private=True)):
@@ -271,7 +270,7 @@ class PureISCSIDriver(san.SanISCSIDriver):
                 self._array.delete_host(host_name)
         else:
             LOG.error(_LE("Unable to find host object in Purity with IQN: "
-                          "%(iqn)s.") % {"iqn": connector["initiator"]})
+                          "{iqn}.").format(iqn=connector["initiator"]))
         LOG.debug("Leave PureISCSIDriver.terminate_connection.")
 
     def get_volume_stats(self, refresh=False):
@@ -448,4 +447,4 @@ class FlashArray(object):
 
     def list_ports(self, **kwargs):
         """Return a list of dictionaries describing ports."""
-        return self._http_request("GET", "port", kwargs)
\ No newline at end of file
+        return self._http_request("GET", "port", kwargs)
index a55c1f666683e5ed545916e7933a8002f4828790..e575f731e6d3f138cfef591033e4f36dff519e69 100644 (file)
@@ -25,7 +25,7 @@ from oslo.config import cfg
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import log as logging
@@ -174,7 +174,8 @@ class RBDImageIOWrapper(io.RawIOBase):
         try:
             self._rbd_meta.image.flush()
         except AttributeError:
-            LOG.warning(_("flush() not supported in this version of librbd"))
+            LOG.warning(_LW("flush() not supported in "
+                            "this version of librbd"))
 
     def fileno(self):
         """RBD does not have support for fileno() so we raise IOError.
@@ -212,7 +213,7 @@ class RBDVolumeProxy(object):
                                            snapshot=snapshot,
                                            read_only=read_only)
         except driver.rbd.Error:
-            LOG.exception(_("error opening rbd image %s"), name)
+            LOG.exception(_LE("error opening rbd image %s"), name)
             driver._disconnect_from_rados(client, ioctx)
             raise
         self.driver = driver
@@ -306,7 +307,7 @@ class RBDDriver(driver.VolumeDriver):
             ioctx = client.open_ioctx(pool)
             return client, ioctx
         except self.rados.Error as exc:
-            LOG.error("error connecting to ceph cluster.")
+            LOG.error(_LE("error connecting to ceph cluster."))
             # shutdown cannot raise an exception
             client.shutdown()
             raise exception.VolumeBackendAPIException(data=str(exc))
@@ -366,7 +367,7 @@ class RBDDriver(driver.VolumeDriver):
             stats['free_capacity_gb'] = new_stats['kb_avail'] / units.Mi
         except self.rados.Error:
             # just log and return unknown capacities
-            LOG.exception(_('error refreshing volume stats'))
+            LOG.exception(_LE('error refreshing volume stats'))
         self._stats = stats
 
     def get_volume_stats(self, refresh=False):
@@ -614,7 +615,7 @@ class RBDDriver(driver.VolumeDriver):
             try:
                 rbd_image = self.rbd.Image(client.ioctx, volume_name)
             except self.rbd.ImageNotFound:
-                LOG.info(_("volume %s no longer exists in backend")
+                LOG.info(_LI("volume %s no longer exists in backend")
                          % (volume_name))
                 return
 
index e05796ee5b4132e1af757bfc20baf08d190d5e93..3c5d670a0c999e6f50bcdfdd54a9f7bd1590e472 100644 (file)
@@ -25,7 +25,7 @@ import tempfile
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import processutils
@@ -176,7 +176,7 @@ class SheepdogDriver(driver.VolumeDriver):
             stats['total_capacity_gb'] = total / units.Gi
             stats['free_capacity_gb'] = (total - used) / units.Gi
         except processutils.ProcessExecutionError:
-            LOG.exception(_('error refreshing volume stats'))
+            LOG.exception(_LE('error refreshing volume stats'))
 
         self._stats = stats
 
index fac5167b4e998893250071cc4190ccdd399154d0..48c53a8534bc326667b0490695ead52ae505c571 100644 (file)
@@ -25,7 +25,7 @@ from six import wraps
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 from cinder.openstack.common import units
@@ -120,7 +120,7 @@ class SolidFireDriver(SanISCSIDriver):
     cluster_stats = {}
     retry_exc_tuple = (exception.SolidFireRetryableException,
                        requests.exceptions.ConnectionError)
-    retryable_errors = ['xDBVersionMisMatch',
+    retryable_errors = ['xDBVersionMismatch',
                         'xMaxSnapshotsPerVolumeExceeded',
                         'xMaxClonesPerVolumeExceeded',
                         'xMaxSnapshotsPerNodeExceeded',
@@ -295,8 +295,8 @@ class SolidFireDriver(SanISCSIDriver):
             iteration_count += 1
 
         if not found_volume:
-            LOG.error(_('Failed to retrieve volume SolidFire-'
-                        'ID: %s in get_by_account!') % sf_volume_id)
+            LOG.error(_LE('Failed to retrieve volume SolidFire-'
+                          'ID: %s in get_by_account!') % sf_volume_id)
             raise exception.VolumeNotFound(volume_id=sf_volume_id)
 
         model_update = {}
@@ -405,8 +405,8 @@ class SolidFireDriver(SanISCSIDriver):
                    if i.key == 'sf-qos' and i.value in valid_presets]
         if len(presets) > 0:
             if len(presets) > 1:
-                LOG.warning(_('More than one valid preset was '
-                              'detected, using %s') % presets[0])
+                LOG.warning(_LW('More than one valid preset was '
+                                'detected, using %s') % presets[0])
             qos = self.sf_qos_dict[presets[0]]
         else:
             # look for explicit settings
@@ -467,10 +467,10 @@ class SolidFireDriver(SanISCSIDriver):
             # NOTE(jdg): Previously we would raise here, but there are cases
             # where this might be a cleanup for a failed delete.
             # Until we get better states we'll just log an error
-            LOG.error(_("Volume %s, not found on SF Cluster."), uuid)
+            LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
 
         if found_count > 1:
-            LOG.error(_("Found %(count)s volumes mapped to id: %(uuid)s.") %
+            LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s.") %
                       {'count': found_count,
                        'uuid': uuid})
             raise exception.DuplicateSfVolumeNames(vol_name=uuid)
@@ -552,11 +552,11 @@ class SolidFireDriver(SanISCSIDriver):
 
         sfaccount = self._get_sfaccount(volume['project_id'])
         if sfaccount is None:
-            LOG.error(_("Account for Volume ID %s was not found on "
-                        "the SolidFire Cluster while attempting "
-                        "delete_volume operation!") % volume['id'])
-            LOG.error(_("This usually means the volume was never "
-                        "successfully created."))
+            LOG.error(_LE("Account for Volume ID %s was not found on "
+                          "the SolidFire Cluster while attempting "
+                          "delete_volume operation!") % volume['id'])
+            LOG.error(_LE("This usually means the volume was never "
+                          "successfully created."))
             return
 
         params = {'accountID': sfaccount['accountID']}
@@ -571,9 +571,9 @@ class SolidFireDriver(SanISCSIDriver):
                 msg = _("Failed to delete SolidFire Volume: %s") % data
                 raise exception.SolidFireAPIException(msg)
         else:
-            LOG.error(_("Volume ID %s was not found on "
-                        "the SolidFire Cluster while attempting "
-                        "delete_volume operation!"), volume['id'])
+            LOG.error(_LE("Volume ID %s was not found on "
+                          "the SolidFire Cluster while attempting "
+                          "delete_volume operation!"), volume['id'])
 
         LOG.debug("Leaving SolidFire delete_volume")
 
@@ -646,9 +646,9 @@ class SolidFireDriver(SanISCSIDriver):
         sf_vol = self._get_sf_volume(volume['id'], params)
 
         if sf_vol is None:
-            LOG.error(_("Volume ID %s was not found on "
-                        "the SolidFire Cluster while attempting "
-                        "extend_volume operation!"), volume['id'])
+            LOG.error(_LE("Volume ID %s was not found on "
+                          "the SolidFire Cluster while attempting "
+                          "extend_volume operation!"), volume['id'])
             raise exception.VolumeNotFound(volume_id=volume['id'])
 
         params = {
@@ -674,7 +674,7 @@ class SolidFireDriver(SanISCSIDriver):
         # of stats data, this is just one of the calls
         results = self._issue_api_request('GetClusterCapacity', params)
         if 'result' not in results:
-            LOG.error(_('Failed to get updated stats'))
+            LOG.error(_LE('Failed to get updated stats'))
 
         results = results['result']['clusterCapacity']
         free_capacity =\
@@ -711,9 +711,9 @@ class SolidFireDriver(SanISCSIDriver):
 
         sf_vol = self._get_sf_volume(volume['id'], params)
         if sf_vol is None:
-            LOG.error(_("Volume ID %s was not found on "
-                        "the SolidFire Cluster while attempting "
-                        "attach_volume operation!"), volume['id'])
+            LOG.error(_LE("Volume ID %s was not found on "
+                          "the SolidFire Cluster while attempting "
+                          "attach_volume operation!"), volume['id'])
             raise exception.VolumeNotFound(volume_id=volume['id'])
 
         attributes = sf_vol['attributes']
@@ -737,9 +737,9 @@ class SolidFireDriver(SanISCSIDriver):
 
         sf_vol = self._get_sf_volume(volume['id'], params)
         if sf_vol is None:
-            LOG.error(_("Volume ID %s was not found on "
-                        "the SolidFire Cluster while attempting "
-                        "detach_volume operation!"), volume['id'])
+            LOG.error(_LE("Volume ID %s was not found on "
+                          "the SolidFire Cluster while attempting "
+                          "detach_volume operation!"), volume['id'])
             raise exception.VolumeNotFound(volume_id=volume['id'])
 
         attributes = sf_vol['attributes']
@@ -762,9 +762,9 @@ class SolidFireDriver(SanISCSIDriver):
         params = {'accountID': sfaccount['accountID']}
         sf_vol = self._get_sf_volume(volume['id'], params)
         if sf_vol is None:
-            LOG.error(_("Volume ID %s was not found on "
-                        "the SolidFire Cluster while attempting "
-                        "accept_transfer operation!"), volume['id'])
+            LOG.error(_LE("Volume ID %s was not found on "
+                          "the SolidFire Cluster while attempting "
+                          "accept_transfer operation!"), volume['id'])
             raise exception.VolumeNotFound(volume_id=volume['id'])
         if new_project != volume['project_id']:
             # do a create_sfaccount here as this tenant
@@ -908,9 +908,9 @@ class SolidFireDriver(SanISCSIDriver):
         LOG.debug("Enter SolidFire unmanage...")
         sfaccount = self._get_sfaccount(volume['project_id'])
         if sfaccount is None:
-            LOG.error(_("Account for Volume ID %s was not found on "
-                        "the SolidFire Cluster while attempting "
-                        "unmanage operation!") % volume['id'])
+            LOG.error(_LE("Account for Volume ID %s was not found on "
+                          "the SolidFire Cluster while attempting "
+                          "unmanage operation!") % volume['id'])
             raise exception.SolidFireAPIException("Failed to find account "
                                                   "for volume.")
 
index a43489b67c09fee7b400467456fa87a826c271ad..ca18380248529f4936dfc65440a0ac34505ec3c8 100644 (file)
@@ -26,7 +26,7 @@ import urllib2
 import netaddr
 import six.moves.urllib.parse as urlparse
 
-from cinder.i18n import _
+from cinder.i18n import _, _LI
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.vmware import error_util
 from cinder.volume.drivers.vmware import vim_util
@@ -206,7 +206,7 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile):
             msg = _("Could not retrieve URL from lease.")
             LOG.exception(msg)
             raise error_util.VimException(msg)
-        LOG.info(_("Opening vmdk url: %s for write.") % url)
+        LOG.info(_LI("Opening vmdk url: %s for write.") % url)
 
         # Prepare the http connection to the vmdk url
         cookies = session.vim.client.options.transport.cookiejar
@@ -299,7 +299,7 @@ class VMwareHTTPReadVmdk(VMwareHTTPFile):
             msg = _("Could not retrieve URL from lease.")
             LOG.exception(msg)
             raise error_util.VimException(msg)
-        LOG.info(_("Opening vmdk url: %s for read.") % url)
+        LOG.info(_LI("Opening vmdk url: %s for read.") % url)
 
         cookies = session.vim.client.options.transport.cookiejar
         headers = {'User-Agent': USER_AGENT,
index 5a135740407123a68f5a6de89e788f7a96d3fff7..f915bdc1b425d0f55f7099c2dca6d108e72dbbcc 100644 (file)
@@ -18,7 +18,7 @@ from taskflow.utils import misc
 
 from cinder import exception
 from cinder import flow_utils
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 from cinder.openstack.common import units
@@ -533,7 +533,7 @@ class EntryCreateTask(flow_utils.CinderTask):
             #
             # NOTE(harlowja): Being unable to destroy a volume is pretty
             # bad though!!
-            LOG.exception(_("Failed destroying volume entry %s"), vol_id)
+            LOG.exception(_LE("Failed destroying volume entry %s"), vol_id)
 
 
 class QuotaReserveTask(flow_utils.CinderTask):
@@ -617,8 +617,8 @@ class QuotaReserveTask(flow_utils.CinderTask):
         except exception.CinderException:
             # We are already reverting, therefore we should silence this
             # exception since a second exception being active will be bad.
-            LOG.exception(_("Failed rolling back quota for"
-                            " %s reservations"), reservations)
+            LOG.exception(_LE("Failed rolling back quota for"
+                              " %s reservations"), reservations)
 
 
 class QuotaCommitTask(flow_utils.CinderTask):
@@ -663,8 +663,8 @@ class QuotaCommitTask(flow_utils.CinderTask):
                 QUOTAS.commit(context, reservations,
                               project_id=context.project_id)
         except Exception:
-            LOG.exception(_("Failed to update quota for deleting volume: %s"),
-                          volume['id'])
+            LOG.exception(_LE("Failed to update quota for deleting "
+                              "volume: %s"), volume['id'])
 
 
 class VolumeCastTask(flow_utils.CinderTask):
@@ -763,11 +763,11 @@ class VolumeCastTask(flow_utils.CinderTask):
         volume_id = kwargs['volume_id']
         common.restore_source_status(context, self.db, kwargs)
         common.error_out_volume(context, self.db, volume_id)
-        LOG.error(_("Volume %s: create failed"), volume_id)
+        LOG.error(_LE("Volume %s: create failed"), volume_id)
         exc_info = False
         if all(flow_failures[-1].exc_info):
             exc_info = flow_failures[-1].exc_info
-        LOG.error(_('Unexpected build error:'), exc_info=exc_info)
+        LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
 
 
 def get_flow(scheduler_rpcapi, volume_rpcapi, db_api,
index 6841dd101e1ea71f4ec68a70bcfcdc54cb8b62c6..d0e7e773344820870225ee035336a7deae408219 100644 (file)
@@ -19,7 +19,7 @@
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LE
 from cinder.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
@@ -57,8 +57,9 @@ def restore_source_status(context, db, volume_spec):
     except exception.CinderException:
         # NOTE(harlowja): Don't let this cause further exceptions since this is
         # a non-critical failure.
-        LOG.exception(_("Failed setting source volume %(source_volid)s back to"
-                        " its initial %(source_status)s status") %
+        LOG.exception(_LE("Failed setting source "
+                          "volume %(source_volid)s back to"
+                          " its initial %(source_status)s status") %
                       {'source_status': source_status,
                        'source_volid': source_volid})
 
@@ -89,6 +90,6 @@ def error_out_volume(context, db, volume_id, reason=None):
         db.volume_update(context, volume_id, update)
     except exception.CinderException:
         # Don't let this cause further exceptions.
-        LOG.exception(_("Failed updating volume %(volume_id)s with"
-                        " %(update)s") % {'volume_id': volume_id,
-                                          'update': update})
+        LOG.exception(_LE("Failed updating volume %(volume_id)s with"
+                          " %(update)s") % {'volume_id': volume_id,
+                                            'update': update})
index 69b3d82876242dd68a48adcd7c0035927a284ee7..f26892a5958f37cb18a4517cf0c58c73abba5b2e 100644 (file)
@@ -19,7 +19,7 @@ from taskflow.utils import misc
 
 from cinder import exception
 from cinder import flow_utils
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.image import glance
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import processutils
@@ -187,7 +187,7 @@ class ExtractVolumeRefTask(flow_utils.CinderTask):
             return
 
         common.error_out_volume(context, self.db, volume_id)
-        LOG.error(_("Volume %s: create failed"), volume_id)
+        LOG.error(_LE("Volume %s: create failed"), volume_id)
 
 
 class ExtractVolumeSpecTask(flow_utils.CinderTask):
@@ -485,19 +485,19 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
         try:
             copy_image_to_volume(context, volume_ref, image_service, image_id)
         except processutils.ProcessExecutionError as ex:
-            LOG.error(_("Failed to copy image %(image_id)s to volume: "
-                        "%(volume_id)s, error: %(error)s") %
+            LOG.error(_LE("Failed to copy image %(image_id)s to volume: "
+                          "%(volume_id)s, error: %(error)s") %
                       {'volume_id': volume_id,
                        'error': ex.stderr, 'image_id': image_id})
             raise exception.ImageCopyFailure(reason=ex.stderr)
         except exception.ImageUnacceptable as ex:
-            LOG.error(_("Failed to copy image to volume: %(volume_id)s, "
-                        "error: %(error)s") % {'volume_id': volume_id,
-                                               'error': ex})
+            LOG.error(_LE("Failed to copy image to volume: %(volume_id)s, "
+                          "error: %(error)s") %
+                      {'volume_id': volume_id, 'error': ex})
             raise exception.ImageUnacceptable(ex)
         except Exception as ex:
-            LOG.error(_("Failed to copy image %(image_id)s to "
-                        "volume: %(volume_id)s, error: %(error)s") %
+            LOG.error(_LE("Failed to copy image %(image_id)s to "
+                          "volume: %(volume_id)s, error: %(error)s") %
                       {'volume_id': volume_id, 'error': ex,
                        'image_id': image_id})
             if not isinstance(ex, exception.ImageCopyFailure):
@@ -606,16 +606,16 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
         # we can't do anything if the driver didn't init
         if not self.driver.initialized:
             driver_name = self.driver.__class__.__name__
-            LOG.error(_("Unable to create volume. "
-                        "Volume driver %s not initialized") % driver_name)
+            LOG.error(_LE("Unable to create volume. "
+                          "Volume driver %s not initialized") % driver_name)
             # NOTE(flaper87): Set the error status before
             # raising any exception.
             self.db.volume_update(context, volume_id, dict(status='error'))
             raise exception.DriverNotInitialized()
 
         create_type = volume_spec.pop('type', None)
-        LOG.info(_("Volume %(volume_id)s: being created as %(create_type)s "
-                   "with specification: %(volume_spec)s") %
+        LOG.info(_LI("Volume %(volume_id)s: being created as %(create_type)s "
+                     "with specification: %(volume_spec)s") %
                  {'volume_spec': volume_spec, 'volume_id': volume_id,
                   'create_type': create_type})
         if create_type == 'raw':
index 01d6328f3198c5cd67741b3dfbb576f228438c4f..80627537c46c697bfafb64cae6cd4bf68a03c73d 100644 (file)
@@ -17,7 +17,7 @@ from taskflow.patterns import linear_flow
 
 from cinder import exception
 from cinder import flow_utils
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.openstack.common import log as logging
 from cinder.volume.flows.api import create_volume as create_api
 from cinder.volume.flows import common as flow_common
@@ -43,8 +43,8 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask):
         volume_id = volume_ref['id']
         if not self.driver.initialized:
             driver_name = self.driver.__class__.__name__
-            LOG.error(_("Unable to manage existing volume. "
-                        "Volume driver %s not initialized.") % driver_name)
+            LOG.error(_LE("Unable to manage existing volume. "
+                          "Volume driver %s not initialized.") % driver_name)
             flow_common.error_out_volume(context, self.db, volume_id,
                                          reason=_("Volume driver %s "
                                                   "not initialized.") %
index 05c18fe0ed146676e79e9183dbc85847135ba18e..4bbd24f225a733ea0afb19b2417cd17bf3dc43e2 100644 (file)
@@ -47,7 +47,7 @@ from cinder import compute
 from cinder import context
 from cinder import exception
 from cinder import flow_utils
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import glance
 from cinder import manager
 from cinder.openstack.common import excutils
@@ -174,8 +174,8 @@ class VolumeManager(manager.SchedulerDependentManager):
             # if its not using the multi backend
             volume_driver = self.configuration.volume_driver
         if volume_driver in MAPPING:
-            LOG.warn(_("Driver path %s is deprecated, update your "
-                       "configuration to the new path."), volume_driver)
+            LOG.warn(_LW("Driver path %s is deprecated, update your "
+                         "configuration to the new path."), volume_driver)
             volume_driver = MAPPING[volume_driver]
 
         vol_db_empty = self._set_voldb_empty_at_startup_indicator(
@@ -213,7 +213,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             try:
                 pool = self.driver.get_pool(volume)
             except Exception as err:
-                LOG.error(_('Failed to fetch pool name for volume: %s'),
+                LOG.error(_LE('Failed to fetch pool name for volume: %s'),
                           volume['id'])
                 LOG.exception(err)
                 return
@@ -256,25 +256,25 @@ class VolumeManager(manager.SchedulerDependentManager):
                                              None, filters=None)
 
         if len(vol_entries) == 0:
-            LOG.info(_("Determined volume DB was empty at startup."))
+            LOG.info(_LI("Determined volume DB was empty at startup."))
             return True
         else:
-            LOG.info(_("Determined volume DB was not empty at startup."))
+            LOG.info(_LI("Determined volume DB was not empty at startup."))
             return False
 
     def init_host(self):
         """Perform any required initialization."""
 
         ctxt = context.get_admin_context()
-        LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") %
+        LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
                  {'driver_name': self.driver.__class__.__name__,
                   'version': self.driver.get_version()})
         try:
             self.driver.do_setup(ctxt)
             self.driver.check_for_setup_error()
         except Exception as ex:
-            LOG.error(_("Error encountered during "
-                        "initialization of driver: %(name)s") %
+            LOG.error(_LE("Error encountered during "
+                          "initialization of driver: %(name)s") %
                       {'name': self.driver.__class__.__name__})
             LOG.exception(ex)
             # we don't want to continue since we failed
@@ -298,25 +298,25 @@ class VolumeManager(manager.SchedulerDependentManager):
                         if volume['status'] in ['in-use']:
                             self.driver.ensure_export(ctxt, volume)
                     except Exception as export_ex:
-                        LOG.error(_("Failed to re-export volume %s: "
-                                    "setting to error state"), volume['id'])
+                        LOG.error(_LE("Failed to re-export volume %s: "
+                                      "setting to error state"), volume['id'])
                         LOG.exception(export_ex)
                         self.db.volume_update(ctxt,
                                               volume['id'],
                                               {'status': 'error'})
                 elif volume['status'] == 'downloading':
-                    LOG.info(_("volume %s stuck in a downloading state"),
+                    LOG.info(_LI("volume %s stuck in a downloading state"),
                              volume['id'])
                     self.driver.clear_download(ctxt, volume)
                     self.db.volume_update(ctxt,
                                           volume['id'],
                                           {'status': 'error'})
                 else:
-                    LOG.info(_("volume %s: skipping export"), volume['id'])
+                    LOG.info(_LI("volume %s: skipping export"), volume['id'])
         except Exception as ex:
-            LOG.error(_("Error encountered during "
-                        "re-exporting phase of driver initialization: "
-                        " %(name)s") %
+            LOG.error(_LE("Error encountered during "
+                          "re-exporting phase of driver initialization: "
+                          " %(name)s") %
                       {'name': self.driver.__class__.__name__})
             LOG.exception(ex)
             return
@@ -327,7 +327,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         LOG.debug('Resuming any in progress delete operations')
         for volume in volumes:
             if volume['status'] == 'deleting':
-                LOG.info(_('Resuming delete on volume: %s') % volume['id'])
+                LOG.info(_LI('Resuming delete on volume: %s') % volume['id'])
                 if CONF.volume_service_inithost_offload:
                     # Offload all the pending volume delete operations to the
                     # threadpool to prevent the main volume service thread
@@ -372,7 +372,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                 request_spec=request_spec,
                 filter_properties=filter_properties)
         except Exception:
-            LOG.exception(_("Failed to create manager volume flow"))
+            LOG.exception(_LE("Failed to create manager volume flow"))
             raise exception.CinderException(
                 _("Failed to create manager volume flow."))
 
@@ -434,8 +434,8 @@ class VolumeManager(manager.SchedulerDependentManager):
         except exception.VolumeNotFound:
             # NOTE(thingee): It could be possible for a volume to
             # be deleted when resuming deletes from init_host().
-            LOG.info(_("Tried to delete volume %s, but it no longer exists, "
-                       "moving on") % (volume_id))
+            LOG.info(_LI("Tried to delete volume %s, but it no longer exists, "
+                         "moving on") % (volume_id))
             return True
 
         if context.project_id != volume_ref['project_id']:
@@ -443,7 +443,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         else:
             project_id = context.project_id
 
-        LOG.info(_("volume %s: deleting"), volume_ref['id'])
+        LOG.info(_LI("volume %s: deleting"), volume_ref['id'])
         if volume_ref['attach_status'] == "attached":
             # Volume is still attached, need to detach first
             raise exception.VolumeAttached(volume_id=volume_id)
@@ -466,7 +466,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             else:
                 self.driver.delete_volume(volume_ref)
         except exception.VolumeIsBusy:
-            LOG.error(_("Cannot delete volume %s: volume is busy"),
+            LOG.error(_LE("Cannot delete volume %s: volume is busy"),
                       volume_ref['id'])
             self.db.volume_update(context, volume_ref['id'],
                                   {'status': 'available'})
@@ -493,13 +493,13 @@ class VolumeManager(manager.SchedulerDependentManager):
                                           **reserve_opts)
         except Exception:
             reservations = None
-            LOG.exception(_("Failed to update usages deleting volume"))
+            LOG.exception(_LE("Failed to update usages deleting volume"))
 
         # Delete glance metadata if it exists
         self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
 
         self.db.volume_destroy(context, volume_id)
-        LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
+        LOG.info(_LI("volume %s: deleted successfully"), volume_ref['id'])
         self._notify_about_volume_usage(context, volume_ref, "delete.end")
 
         # Commit the reservations
@@ -529,7 +529,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         caller_context = context
         context = context.elevated()
         snapshot_ref = self.db.snapshot_get(context, snapshot_id)
-        LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
+        LOG.info(_LI("snapshot %s: creating"), snapshot_ref['id'])
 
         self._notify_about_snapshot_usage(
             context, snapshot_ref, "create.start")
@@ -564,9 +564,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                 self.db.volume_glance_metadata_copy_to_snapshot(
                     context, snapshot_ref['id'], volume_id)
             except exception.CinderException as ex:
-                LOG.exception(_("Failed updating %(snapshot_id)s"
-                                " metadata using the provided volumes"
-                                " %(volume_id)s metadata") %
+                LOG.exception(_LE("Failed updating %(snapshot_id)s"
+                                  " metadata using the provided volumes"
+                                  " %(volume_id)s metadata") %
                               {'volume_id': volume_id,
                                'snapshot_id': snapshot_id})
                 self.db.snapshot_update(context,
@@ -579,7 +579,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                                                {'status': 'available',
                                                 'progress': '100%'})
 
-        LOG.info(_("snapshot %s: created successfully"), snapshot_ref['id'])
+        LOG.info(_LI("snapshot %s: created successfully"), snapshot_ref['id'])
         self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
         return snapshot_id
 
@@ -591,7 +591,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         snapshot_ref = self.db.snapshot_get(context, snapshot_id)
         project_id = snapshot_ref['project_id']
 
-        LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
+        LOG.info(_LI("snapshot %s: deleting"), snapshot_ref['id'])
         self._notify_about_snapshot_usage(
             context, snapshot_ref, "delete.start")
 
@@ -609,7 +609,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
             self.driver.delete_snapshot(snapshot_ref)
         except exception.SnapshotIsBusy:
-            LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
+            LOG.error(_LE("Cannot delete snapshot %s: snapshot is busy"),
                       snapshot_ref['id'])
             self.db.snapshot_update(context,
                                     snapshot_ref['id'],
@@ -639,10 +639,10 @@ class VolumeManager(manager.SchedulerDependentManager):
                                           **reserve_opts)
         except Exception:
             reservations = None
-            LOG.exception(_("Failed to update usages deleting snapshot"))
+            LOG.exception(_LE("Failed to update usages deleting snapshot"))
         self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
         self.db.snapshot_destroy(context, snapshot_id)
-        LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id'])
+        LOG.info(_LI("snapshot %s: deleted successfully"), snapshot_ref['id'])
         self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")
 
         # Commit the reservations
@@ -772,12 +772,12 @@ class VolumeManager(manager.SchedulerDependentManager):
             self.driver.remove_export(context.elevated(), volume)
         except exception.DriverNotInitialized:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Error detaching volume %(volume)s, "
-                                "due to uninitialized driver."),
+                LOG.exception(_LE("Error detaching volume %(volume)s, "
+                                  "due to uninitialized driver."),
                               {"volume": volume_id})
         except Exception as ex:
-            LOG.exception(_("Error detaching volume %(volume)s, "
-                            "due to remove export failure."),
+            LOG.exception(_LE("Error detaching volume %(volume)s, "
+                              "due to remove export failure."),
                           {"volume": volume_id})
             raise exception.RemoveExportException(volume=volume_id, reason=ex)
 
@@ -808,8 +808,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                       "image (%(image_id)s) successfully",
                       {'volume_id': volume_id, 'image_id': image_id})
         except Exception as error:
-            LOG.error(_("Error occurred while uploading volume %(volume_id)s "
-                        "to image %(image_id)s."),
+            LOG.error(_LE("Error occurred while uploading "
+                          "volume %(volume_id)s "
+                          "to image %(image_id)s."),
                       {'volume_id': volume_id, 'image_id': image_meta['id']})
             if image_service is not None:
                 # Deletes the image if it is in queued or saving state
@@ -832,13 +833,13 @@ class VolumeManager(manager.SchedulerDependentManager):
             image_meta = image_service.show(context, image_id)
             image_status = image_meta.get('status')
             if image_status == 'queued' or image_status == 'saving':
-                LOG.warn("Deleting image %(image_id)s in %(image_status)s "
-                         "state.",
+                LOG.warn(_LW("Deleting image %(image_id)s in %(image_status)s "
+                             "state."),
                          {'image_id': image_id,
                           'image_status': image_status})
                 image_service.delete(context, image_id)
         except Exception:
-            LOG.warn(_("Error occurred while deleting image %s."),
+            LOG.warn(_LW("Error occurred while deleting image %s."),
                      image_id, exc_info=True)
 
     def initialize_connection(self, context, volume_id, connector):
@@ -908,8 +909,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                                                volume_id,
                                                model_update)
         except exception.CinderException as ex:
-            LOG.exception(_("Failed updating model of volume %(volume_id)s"
-                          " with driver provided model %(model)s") %
+            LOG.exception(_LE("Failed updating model of volume %(volume_id)s"
+                              " with driver provided model %(model)s") %
                           {'volume_id': volume_id, 'model': model_update})
             raise exception.ExportFailure(reason=ex)
 
@@ -996,10 +997,10 @@ class VolumeManager(manager.SchedulerDependentManager):
                                       model_update)
             except exception.CinderException:
                 with excutils.save_and_reraise_exception():
-                    LOG.exception(_("Failed updating model of "
-                                    "volume %(volume_id)s "
-                                    "with drivers update %(model)s "
-                                    "during xfr.") %
+                    LOG.exception(_LE("Failed updating model of "
+                                      "volume %(volume_id)s "
+                                      "with drivers update %(model)s "
+                                      "during xfr.") %
                                   {'volume_id': volume_id,
                                    'model': model_update})
                     self.db.volume_update(context.elevated(),
@@ -1227,7 +1228,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
     @periodic_task.periodic_task
     def _report_driver_status(self, context):
-        LOG.info(_("Updating volume status"))
+        LOG.info(_LI("Updating volume status"))
         if not self.driver.initialized:
             if self.driver.configuration.config_group is None:
                 config_group = ''
@@ -1235,9 +1236,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                 config_group = ('(config name %s)' %
                                 self.driver.configuration.config_group)
 
-            LOG.warning(_('Unable to update stats, %(driver_name)s '
-                          '-%(driver_version)s '
-                          '%(config_group)s driver is uninitialized.') %
+            LOG.warning(_LW('Unable to update stats, %(driver_name)s '
+                            '-%(driver_version)s '
+                            '%(config_group)s driver is uninitialized.') %
                         {'driver_name': self.driver.__class__.__name__,
                          'driver_version': self.driver.get_version(),
                          'config_group': config_group})
@@ -1271,7 +1272,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         self._publish_service_capabilities(context)
 
     def notification(self, context, event):
-        LOG.info(_("Notification {%s} received"), event)
+        LOG.info(_LI("Notification {%s} received"), event)
 
     def _notify_about_volume_usage(self,
                                    context,
@@ -1339,11 +1340,11 @@ class VolumeManager(manager.SchedulerDependentManager):
         size_increase = (int(new_size)) - volume['size']
         self._notify_about_volume_usage(context, volume, "resize.start")
         try:
-            LOG.info(_("volume %s: extending"), volume['id'])
+            LOG.info(_LI("volume %s: extending"), volume['id'])
             self.driver.extend_volume(volume, new_size)
-            LOG.info(_("volume %s: extended successfully"), volume['id'])
+            LOG.info(_LI("volume %s: extended successfully"), volume['id'])
         except Exception:
-            LOG.exception(_("volume %s: Error trying to extend volume"),
+            LOG.exception(_LE("volume %s: Error trying to extend volume"),
                           volume_id)
             try:
                 self.db.volume_update(context, volume['id'],
@@ -1422,7 +1423,8 @@ class VolumeManager(manager.SchedulerDependentManager):
         except Exception:
             old_reservations = None
             self.db.volume_update(context, volume_id, status_update)
-            LOG.exception(_("Failed to update usages while retyping volume."))
+            LOG.exception(_LE("Failed to update usages "
+                              "while retyping volume."))
             raise exception.CinderException(_("Failed to get old volume type"
                                               " quota reservations"))
 
@@ -1454,11 +1456,11 @@ class VolumeManager(manager.SchedulerDependentManager):
                     retyped = ret
 
                 if retyped:
-                    LOG.info(_("Volume %s: retyped successfully"), volume_id)
+                    LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
             except Exception as ex:
                 retyped = False
-                LOG.error(_("Volume %s: driver error when trying to retype, "
-                            "falling back to generic mechanism."),
+                LOG.error(_LE("Volume %s: driver error when trying to retype, "
+                              "falling back to generic mechanism."),
                           volume_ref['id'])
                 LOG.exception(ex)
 
@@ -1524,7 +1526,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                 volume_id,
                 ref)
         except Exception:
-            LOG.exception(_("Failed to create manage_existing flow."))
+            LOG.exception(_LE("Failed to create manage_existing flow."))
             raise exception.CinderException(
                 _("Failed to create manage existing flow."))
 
@@ -1556,7 +1558,8 @@ class VolumeManager(manager.SchedulerDependentManager):
             utils.require_driver_initialized(self.driver)
         except exception.DriverNotInitialized:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to promote replica for volume %(id)s.")
+                LOG.exception(_LE("Failed to promote replica "
+                                  "for volume %(id)s.")
                               % {'id': volume_id})
 
         volume = self.db.volume_get(ctxt, volume_id)
@@ -1587,7 +1590,7 @@ class VolumeManager(manager.SchedulerDependentManager):
             utils.require_driver_initialized(self.driver)
         except exception.DriverNotInitialized:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Failed to sync replica for volume %(id)s.")
+                LOG.exception(_LE("Failed to sync replica for volume %(id)s.")
                               % {'id': volume_id})
 
         volume = self.db.volume_get(ctxt, volume_id)
@@ -1614,7 +1617,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
     @periodic_task.periodic_task
     def _update_replication_relationship_status(self, ctxt):
-        LOG.info(_('Updating volume replication status.'))
+        LOG.info(_LI('Updating volume replication status.'))
         if not self.driver.initialized:
             if self.driver.configuration.config_group is None:
                 config_group = ''
@@ -1622,9 +1625,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                 config_group = ('(config name %s)' %
                                 self.driver.configuration.config_group)
 
-            LOG.warning(_('Unable to update volume replication status, '
-                          '%(driver_name)s -%(driver_version)s '
-                          '%(config_group)s driver is uninitialized.') %
+            LOG.warning(_LW('Unable to update volume replication status, '
+                            '%(driver_name)s -%(driver_version)s '
+                            '%(config_group)s driver is uninitialized.') %
                         {'driver_name': self.driver.__class__.__name__,
                          'driver_version': self.driver.get_version(),
                          'config_group': config_group})
@@ -1640,8 +1643,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                                               vol['id'],
                                               model_update)
                 except Exception:
-                    LOG.exception(_("Error checking replication status for "
-                                    "volume %s") % vol['id'])
+                    LOG.exception(_LE("Error checking replication status for "
+                                      "volume %s") % vol['id'])
 
     def create_consistencygroup(self, context, group_id):
         """Creates the consistency group."""
@@ -1658,7 +1661,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         try:
             utils.require_driver_initialized(self.driver)
 
-            LOG.info(_("Consistency group %s: creating"), group_ref['name'])
+            LOG.info(_LI("Consistency group %s: creating"), group_ref['name'])
             model_update = self.driver.create_consistencygroup(context,
                                                                group_ref)
 
@@ -1672,7 +1675,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                     context,
                     group_ref['id'],
                     {'status': 'error'})
-                LOG.error(_("Consistency group %s: create failed"),
+                LOG.error(_LE("Consistency group %s: create failed"),
                           group_ref['name'])
 
         now = timeutils.utcnow()
@@ -1680,7 +1683,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                                         group_ref['id'],
                                         {'status': status,
                                          'created_at': now})
-        LOG.info(_("Consistency group %s: created successfully"),
+        LOG.info(_LI("Consistency group %s: created successfully"),
                  group_ref['name'])
 
         self._notify_about_consistencygroup_usage(
@@ -1699,7 +1702,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         else:
             project_id = context.project_id
 
-        LOG.info(_("Consistency group %s: deleting"), group_ref['id'])
+        LOG.info(_LI("Consistency group %s: deleting"), group_ref['id'])
 
         volumes = self.db.volume_get_all_by_group(context, group_id)
 
@@ -1764,8 +1767,8 @@ class VolumeManager(manager.SchedulerDependentManager):
                                               **reserve_opts)
         except Exception:
             cgreservations = None
-            LOG.exception(_("Failed to update usages deleting "
-                          "consistency groups."))
+            LOG.exception(_LE("Failed to update usages deleting "
+                              "consistency groups."))
 
         for volume_ref in volumes:
             # Get reservations for volume
@@ -1781,7 +1784,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                                               **reserve_opts)
             except Exception:
                 reservations = None
-                LOG.exception(_("Failed to update usages deleting volume."))
+                LOG.exception(_LE("Failed to update usages deleting volume."))
 
             # Delete glance metadata if it exists
             self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
@@ -1799,7 +1802,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                             project_id=project_id)
 
         self.db.consistencygroup_destroy(context, group_id)
-        LOG.info(_("Consistency group %s: deleted successfully."),
+        LOG.info(_LI("Consistency group %s: deleted successfully."),
                  group_id)
         self._notify_about_consistencygroup_usage(
             context, group_ref, "delete.end")
@@ -1812,7 +1815,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         caller_context = context
         context = context.elevated()
         cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
-        LOG.info(_("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
+        LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
 
         snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
                                                             cgsnapshot_id)
@@ -1869,9 +1872,9 @@ class VolumeManager(manager.SchedulerDependentManager):
                     self.db.volume_glance_metadata_copy_to_snapshot(
                         context, snapshot['id'], volume_id)
                 except exception.CinderException as ex:
-                    LOG.error(_("Failed updating %(snapshot_id)s"
-                                " metadata using the provided volumes"
-                                " %(volume_id)s metadata") %
+                    LOG.error(_LE("Failed updating %(snapshot_id)s"
+                                  " metadata using the provided volumes"
+                                  " %(volume_id)s metadata") %
                               {'volume_id': volume_id,
                                'snapshot_id': snapshot_id})
                     self.db.snapshot_update(context,
@@ -1887,7 +1890,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                                   cgsnapshot_ref['id'],
                                   {'status': 'available'})
 
-        LOG.info(_("cgsnapshot %s: created successfully"),
+        LOG.info(_LI("cgsnapshot %s: created successfully"),
                  cgsnapshot_ref['id'])
         self._notify_about_cgsnapshot_usage(
             context, cgsnapshot_ref, "create.end")
@@ -1900,7 +1903,7 @@ class VolumeManager(manager.SchedulerDependentManager):
         cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
         project_id = cgsnapshot_ref['project_id']
 
-        LOG.info(_("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
+        LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
 
         snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
                                                             cgsnapshot_id)
@@ -1969,7 +1972,7 @@ class VolumeManager(manager.SchedulerDependentManager):
 
             except Exception:
                 reservations = None
-                LOG.exception(_("Failed to update usages deleting snapshot"))
+                LOG.exception(_LE("Failed to update usages deleting snapshot"))
 
             self.db.volume_glance_metadata_delete_by_snapshot(context,
                                                               snapshot['id'])
@@ -1980,7 +1983,7 @@ class VolumeManager(manager.SchedulerDependentManager):
                 QUOTAS.commit(context, reservations, project_id=project_id)
 
         self.db.cgsnapshot_destroy(context, cgsnapshot_id)
-        LOG.info(_("cgsnapshot %s: deleted successfully"),
+        LOG.info(_LI("cgsnapshot %s: deleted successfully"),
                  cgsnapshot_ref['id'])
         self._notify_about_cgsnapshot_usage(
             context, cgsnapshot_ref, "delete.end")