]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Implementing the use of _L’x’/i18n markers
authorMike Mason <mikemason010@gmail.com>
Tue, 11 Nov 2014 14:05:24 +0000 (14:05 +0000)
committerMike Mason <mikemason010@gmail.com>
Fri, 14 Nov 2014 10:24:49 +0000 (10:24 +0000)
Placing the _Lx markers back into the code.  No other cleaner solution has
has been implemented. Patches will be submitted in a series of sub
directories and in a fashion that is manageable.
This is the fifth commit of this kind

Change-Id: I476750bf05ddee85dd5bf25c7bd00ee103d23115
Partial-Bug: #1384312

15 files changed:
cinder/volume/drivers/emc/emc_vmax_common.py
cinder/volume/drivers/emc/emc_vmax_masking.py
cinder/volume/drivers/emc/emc_vmax_provision.py
cinder/volume/drivers/hds/hds.py
cinder/volume/drivers/hds/iscsi.py
cinder/volume/drivers/hds/nfs.py
cinder/volume/drivers/netapp/eseries/iscsi.py
cinder/volume/drivers/netapp/iscsi.py
cinder/volume/drivers/netapp/nfs.py
cinder/volume/drivers/nexenta/iscsi.py
cinder/volume/drivers/nexenta/jsonrpc.py
cinder/volume/drivers/nexenta/nfs.py
cinder/volume/drivers/remotefs.py
cinder/volume/drivers/vmware/datastore.py
cinder/volume/drivers/vmware/volumeops.py

index ccaa50363ab7637f08896878f0134c2e87fa01a0..ae7d9ce3dbdc14cd7ad59c078b469ac12f54a455 100644 (file)
@@ -19,7 +19,7 @@ from oslo.config import cfg
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.emc import emc_vmax_fast
 from cinder.volume.drivers.emc import emc_vmax_masking
@@ -79,7 +79,7 @@ class EMCVMAXCommon(object):
     def __init__(self, prtcl, configuration=None):
 
         if not pywbemAvailable:
-            LOG.info(_(
+            LOG.info(_LI(
                 'Module PyWBEM not installed.  '
                 'Install PyWBEM using the python-pywbem package.'))
 
@@ -176,10 +176,11 @@ class EMCVMAXCommon(object):
         # add the volume to the default storage group created for
         # volumes in pools associated with this fast policy
         if extraSpecs[FASTPOLICY]:
-            LOG.info(_("Adding volume: %(volumeName)s to default storage group"
-                       " for FAST policy: %(fastPolicyName)s ")
-                     % {'volumeName': volumeName,
-                        'fastPolicyName': extraSpecs[FASTPOLICY]})
+            LOG.info(_LI("Adding volume: %(volumeName)s to "
+                         "default storage group "
+                         "for FAST policy: %(fastPolicyName)s "),
+                     {'volumeName': volumeName,
+                      'fastPolicyName': extraSpecs[FASTPOLICY]})
             defaultStorageGroupInstanceName = (
                 self._get_or_create_default_storage_group(
                     self.conn, storageSystemName, volumeDict,
@@ -197,9 +198,9 @@ class EMCVMAXCommon(object):
                 volumeDict, volumeName, storageConfigService,
                 storageSystemName, extraSpecs[FASTPOLICY])
 
-        LOG.info(_("Leaving create_volume: %(volumeName)s  "
-                   "Return code: %(rc)lu "
-                   "volume dict: %(name)s")
+        LOG.info(_LI("Leaving create_volume: %(volumeName)s  "
+                     "Return code: %(rc)lu "
+                     "volume dict: %(name)s")
                  % {'volumeName': volumeName,
                     'rc': rc,
                     'name': volumeDict})
@@ -231,12 +232,12 @@ class EMCVMAXCommon(object):
 
         :param volume: volume Object
         """
-        LOG.info(_("Deleting Volume: %(volume)s")
+        LOG.info(_LI("Deleting Volume: %(volume)s")
                  % {'volume': volume['name']})
 
         rc, volumeName = self._delete_volume(volume)
-        LOG.info(_("Leaving delete_volume: %(volumename)s  Return code: "
-                   "%(rc)lu")
+        LOG.info(_LI("Leaving delete_volume: %(volumename)s  Return code: "
+                     "%(rc)lu")
                  % {'volumename': volumeName,
                     'rc': rc})
 
@@ -257,7 +258,7 @@ class EMCVMAXCommon(object):
         :param snapshot: snapshot object
         :param volume: volume Object to create snapshot from
         """
-        LOG.info(_("Delete Snapshot: %(snapshotName)s ")
+        LOG.info(_LI("Delete Snapshot: %(snapshotName)s ")
                  % {'snapshotName': snapshot['name']})
         rc, snapshotName = self._delete_volume(snapshot)
         LOG.debug("Leaving delete_snapshot: %(snapshotname)s  Return code: "
@@ -296,13 +297,13 @@ class EMCVMAXCommon(object):
         """
         extraSpecs = self._initial_setup(volume)
         volumename = volume['name']
-        LOG.info(_("Unmap volume: %(volume)s")
+        LOG.info(_LI("Unmap volume: %(volume)s")
                  % {'volume': volumename})
 
         device_info = self.find_device_number(volume, connector)
         device_number = device_info['hostlunid']
         if device_number is None:
-            LOG.info(_("Volume %s is not mapped. No volume to unmap.")
+            LOG.info(_LI("Volume %s is not mapped. No volume to unmap.")
                      % (volumename))
             return
 
@@ -349,7 +350,7 @@ class EMCVMAXCommon(object):
         extraSpecs = self._initial_setup(volume)
 
         volumeName = volume['name']
-        LOG.info(_("Initialize connection: %(volume)s")
+        LOG.info(_LI("Initialize connection: %(volume)s")
                  % {'volume': volumeName})
         self.conn = self._get_ecom_connection()
         deviceInfoDict = self._wrap_find_device_number(volume, connector)
@@ -357,8 +358,8 @@ class EMCVMAXCommon(object):
                 deviceInfoDict['hostlunid'] is not None):
             # Device is already mapped so we will leave the state as is
             deviceNumber = deviceInfoDict['hostlunid']
-            LOG.info(_("Volume %(volume)s is already mapped. "
-                       "The device number is  %(deviceNumber)s ")
+            LOG.info(_LI("Volume %(volume)s is already mapped. "
+                         "The device number is  %(deviceNumber)s ")
                      % {'volume': volumeName,
                         'deviceNumber': deviceNumber})
         else:
@@ -372,7 +373,7 @@ class EMCVMAXCommon(object):
             if 'hostlunid' not in deviceInfoDict:
                 # Did not successfully attach to host,
                 # so a rollback for FAST is required
-                LOG.error(_("Error Attaching volume %(vol)s ")
+                LOG.error(_LE("Error Attaching volume %(vol)s ")
                           % {'vol': volumeName})
                 if rollbackDict['fastPolicyName'] is not None:
                     (
@@ -409,7 +410,7 @@ class EMCVMAXCommon(object):
         self._initial_setup(volume)
 
         volumename = volume['name']
-        LOG.info(_("Terminate connection: %(volume)s")
+        LOG.info(_LI("Terminate connection: %(volume)s")
                  % {'volume': volumename})
 
         self.conn = self._get_ecom_connection()
@@ -563,14 +564,14 @@ class EMCVMAXCommon(object):
 
         poolName = self.utils.parse_pool_name_from_file(emcConfigFileName)
         if poolName is None:
-            LOG.error(_(
+            LOG.error(_LE(
                 "PoolName %(poolName)s must be in the file "
                 "%(emcConfigFileName)s ")
                 % {'poolName': poolName,
                    'emcConfigFileName': emcConfigFileName})
         arrayName = self.utils.parse_array_name_from_file(emcConfigFileName)
         if arrayName is None:
-            LOG.error(_(
+            LOG.error(_LE(
                 "Array Serial Number %(arrayName)s must be in the file "
                 "%(emcConfigFileName)s ")
                 % {'arrayName': arrayName,
@@ -603,10 +604,10 @@ class EMCVMAXCommon(object):
             total_capacity_gb, free_capacity_gb = (
                 self.fast.get_capacities_associated_to_policy(
                     self.conn, arrayName, fastPolicyName))
-            LOG.info(
+            LOG.info(_LI(
                 "FAST: capacity stats for policy %(fastPolicyName)s on "
                 "array %(arrayName)s (total_capacity_gb=%(total_capacity_gb)lu"
-                ", free_capacity_gb=%(free_capacity_gb)lu"
+                ", free_capacity_gb=%(free_capacity_gb)lu")
                 % {'fastPolicyName': fastPolicyName,
                    'arrayName': arrayName,
                    'total_capacity_gb': total_capacity_gb,
@@ -614,10 +615,10 @@ class EMCVMAXCommon(object):
         else:  # NON-FAST
             total_capacity_gb, free_capacity_gb = (
                 self.utils.get_pool_capacities(self.conn, poolName, arrayName))
-            LOG.info(
+            LOG.info(_LI(
                 "NON-FAST: capacity stats for pool %(poolName)s on array "
                 "%(arrayName)s (total_capacity_gb=%(total_capacity_gb)lu, "
-                "free_capacity_gb=%(free_capacity_gb)lu"
+                "free_capacity_gb=%(free_capacity_gb)lu")
                 % {'poolName': poolName,
                    'arrayName': arrayName,
                    'total_capacity_gb': total_capacity_gb,
@@ -665,7 +666,7 @@ class EMCVMAXCommon(object):
 
         volumeName = volume['name']
         volumeStatus = volume['status']
-        LOG.info(_("Migrating using retype Volume: %(volume)s")
+        LOG.info(_LI("Migrating using retype Volume: %(volume)s")
                  % {'volume': volumeName})
 
         extraSpecs = self._initial_setup(volume)
@@ -673,8 +674,8 @@ class EMCVMAXCommon(object):
 
         volumeInstance = self._find_lun(volume)
         if volumeInstance is None:
-            LOG.error(_("Volume %(name)s not found on the array. "
-                        "No volume to migrate using retype.")
+            LOG.error(_LE("Volume %(name)s not found on the array. "
+                          "No volume to migrate using retype.")
                       % {'name': volumeName})
             return False
 
@@ -685,8 +686,8 @@ class EMCVMAXCommon(object):
                 volumeName, volumeStatus))
 
         if not isValid:
-            LOG.error(_("Volume %(name)s is not suitable for storage "
-                        "assisted migration using retype")
+            LOG.error(_LE("Volume %(name)s is not suitable for storage "
+                          "assisted migration using retype")
                       % {'name': volumeName})
             return False
         if volume['host'] != host['host']:
@@ -713,12 +714,12 @@ class EMCVMAXCommon(object):
         :returns: boolean True/False
         :returns: list
         """
-        LOG.warn(_("The VMAX plugin only supports Retype.  "
-                   "If a pool based migration is necessary "
-                   "this will happen on a Retype "
-                   "From the command line: "
-                   "cinder --os-volume-api-version 2 retype "
-                   "<volumeId> <volumeType> --migration-policy on-demand"))
+        LOG.warn(_LW("The VMAX plugin only supports Retype.  "
+                     "If a pool based migration is necessary "
+                     "this will happen on a Retype "
+                     "From the command line: "
+                     "cinder --os-volume-api-version 2 retype "
+                     "<volumeId> <volumeType> --migration-policy on-demand"))
         return True, {}
 
     def _migrate_volume(
@@ -747,10 +748,10 @@ class EMCVMAXCommon(object):
         if moved is False and sourceFastPolicyName is not None:
             # Return the volume to the default source fast policy storage
             # group because the migrate was unsuccessful
-            LOG.warn(_("Failed to migrate: %(volumeName)s from "
-                       "default source storage group "
-                       "for FAST policy: %(sourceFastPolicyName)s "
-                       "Attempting cleanup... ")
+            LOG.warn(_LW("Failed to migrate: %(volumeName)s from "
+                         "default source storage group "
+                         "for FAST policy: %(sourceFastPolicyName)s "
+                         "Attempting cleanup... ")
                      % {'volumeName': volumeName,
                         'sourceFastPolicyName': sourceFastPolicyName})
             if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume(
@@ -773,8 +774,8 @@ class EMCVMAXCommon(object):
             if not self._migrate_volume_fast_target(
                     volumeInstance, storageSystemName,
                     targetFastPolicyName, volumeName):
-                LOG.warn(_("Attempting a rollback of: %(volumeName)s to "
-                           "original pool %(sourcePoolInstanceName)s ")
+                LOG.warn(_LW("Attempting a rollback of: %(volumeName)s to "
+                             "original pool %(sourcePoolInstanceName)s ")
                          % {'volumeName': volumeName,
                             'sourcePoolInstanceName': sourcePoolInstanceName})
                 self._migrate_rollback(
@@ -804,7 +805,7 @@ class EMCVMAXCommon(object):
         :returns: int, the return code from migrate operation
         """
 
-        LOG.warn(_("_migrate_rollback on : %(volumeName)s from ")
+        LOG.warn(_LW("_migrate_rollback on : %(volumeName)s from ")
                  % {'volumeName': volumeName})
 
         storageRelocationService = self.utils.find_storage_relocation_service(
@@ -842,7 +843,7 @@ class EMCVMAXCommon(object):
         :returns: int, the return code from migrate operation
         """
 
-        LOG.warn(_("_migrate_cleanup on : %(volumeName)s from ")
+        LOG.warn(_LW("_migrate_cleanup on : %(volumeName)s from ")
                  % {'volumeName': volumeName})
 
         controllerConfigurationService = (
@@ -891,8 +892,8 @@ class EMCVMAXCommon(object):
         :returns: boolean True/False
         """
         falseRet = False
-        LOG.info(_("Adding volume: %(volumeName)s to default storage group "
-                   "for FAST policy: %(fastPolicyName)s ")
+        LOG.info(_LI("Adding volume: %(volumeName)s to default storage group "
+                     "for FAST policy: %(fastPolicyName)s ")
                  % {'volumeName': volumeName,
                     'fastPolicyName': targetFastPolicyName})
 
@@ -972,7 +973,7 @@ class EMCVMAXCommon(object):
         except Exception as e:
             # rollback by deleting the volume if adding the volume to the
             # default storage group were to fail
-            LOG.error(_("Exception: %s") % six.text_type(e))
+            LOG.error(_LE("Exception: %s") % six.text_type(e))
             exceptionMessage = (_("Error migrating volume: %(volumename)s. "
                                   "to target pool  %(targetPoolName)s. ")
                                 % {'volumename': volumeName,
@@ -1030,7 +1031,7 @@ class EMCVMAXCommon(object):
                     conn, controllerConfigurationService,
                     volumeInstance.path, volumeName, sourceFastPolicyName))
         except Exception as ex:
-            LOG.error(_("Exception: %s") % six.text_type(ex))
+            LOG.error(_LE("Exception: %s") % six.text_type(ex))
             exceptionMessage = (_("Failed to remove: %(volumename)s. "
                                   "from the default storage group for "
                                   "FAST policy %(fastPolicyName)s. ")
@@ -1096,7 +1097,7 @@ class EMCVMAXCommon(object):
         """
         falseRet = (False, None, None)
         if 'location_info' not in host['capabilities']:
-            LOG.error(_('Error getting target pool name and array'))
+            LOG.error(_LE('Error getting target pool name and array'))
             return falseRet
         info = host['capabilities']['location_info']
 
@@ -1108,8 +1109,8 @@ class EMCVMAXCommon(object):
             targetPoolName = infoDetail[1]
             targetFastPolicy = infoDetail[2]
         except Exception:
-            LOG.error(_("Error parsing target pool name, array, "
-                        "and fast policy"))
+            LOG.error(_LE("Error parsing target pool name, array, "
+                          "and fast policy"))
 
         if targetArraySerialNumber not in sourceArraySerialNumber:
             errorMessage = (_(
@@ -1419,7 +1420,7 @@ class EMCVMAXCommon(object):
                 rc, targetEndpoints = self.provision.get_target_endpoints(
                     self.conn, storageHardwareService, hardwareIdInstance)
             except Exception as ex:
-                LOG.error(_("Exception: %s") % six.text_type(ex))
+                LOG.error(_LE("Exception: %s") % six.text_type(ex))
                 errorMessage = (_(
                     "Unable to get target endpoints for hardwareId "
                     "%(hardwareIdInstance)s")
@@ -1438,7 +1439,7 @@ class EMCVMAXCommon(object):
                     if not any(d == wwn for d in targetWwns):
                         targetWwns.append(wwn)
             else:
-                LOG.error(_(
+                LOG.error(_LE(
                     "Target end points do not exist for hardware Id : "
                     "%(hardwareIdInstance)s ")
                     % {'hardwareIdInstance': hardwareIdInstance})
@@ -1726,7 +1727,7 @@ class EMCVMAXCommon(object):
         except Exception as e:
             # rollback by deleting the volume if adding the volume to the
             # default storage group were to fail
-            LOG.error(_("Exception: %s") % six.text_type(e))
+            LOG.error(_LE("Exception: %s") % six.text_type(e))
             errorMessage = (_(
                 "Rolling back %(volumeName)s by deleting it. ")
                 % {'volumeName': volumeName})
@@ -1882,8 +1883,9 @@ class EMCVMAXCommon(object):
         sourceName = sourceVolume['name']
         cloneName = cloneVolume['name']
 
-        LOG.info(_("Create a Clone from Volume: Clone Volume: %(cloneName)s  "
-                   "Source Volume: %(sourceName)s")
+        LOG.info(_LI("Create a Clone from Volume: Clone "
+                     "Volume: %(cloneName)s  "
+                     "Source Volume: %(sourceName)s")
                  % {'cloneName': cloneName,
                     'sourceName': sourceName})
 
@@ -1987,8 +1989,8 @@ class EMCVMAXCommon(object):
 
         volumeInstance = self._find_lun(volume)
         if volumeInstance is None:
-            LOG.error(_("Volume %(name)s not found on the array. "
-                        "No volume to delete.")
+            LOG.error(_LE("Volume %(name)s not found on the array. "
+                          "No volume to delete.")
                       % {'name': volumeName})
             return errorRet
 
@@ -2058,7 +2060,7 @@ class EMCVMAXCommon(object):
                            'fastPolicyName': fastPolicyName})
                     LOG.error(errorMsg)
 
-            LOG.error(_("Exception: %s") % six.text_type(e))
+            LOG.error(_LE("Exception: %s") % six.text_type(e))
             errorMessage = (_("Failed to delete volume %(volumeName)s")
                             % {'volumeName': volumeName})
             LOG.error(errorMessage)
@@ -2081,9 +2083,10 @@ class EMCVMAXCommon(object):
             self.masking.get_associated_masking_group_from_device(
                 self.conn, volumeInstanceName))
         if storageGroupInstanceName is not None:
-            LOG.warn(_("Pre check for deletion "
-                       "Volume: %(volumeName)s is part of a storage group "
-                       "Attempting removal from %(storageGroupInstanceName)s ")
+            LOG.warn(_LW("Pre check for deletion "
+                         "Volume: %(volumeName)s is part of a storage group "
+                         "Attempting removal "
+                         "from %(storageGroupInstanceName)s ")
                      % {'volumeName': volumeName,
                         'storageGroupInstanceName': storageGroupInstanceName})
             self.provision.remove_device_from_storage_group(
index 4386a1874535d27a3f838ddc3a7255c38133b91f..dd3324ad45fe2a336b82d30a37258fc70931b609 100644 (file)
@@ -15,7 +15,7 @@
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.emc import emc_vmax_fast
 from cinder.volume.drivers.emc import emc_vmax_provision
@@ -182,7 +182,7 @@ class EMCVMAXMasking(object):
                 if self._is_volume_in_storage_group(
                         conn, storageGroupInstanceName,
                         volumeInstance):
-                    LOG.warn(_(
+                    LOG.warn(_LW(
                         "Volume: %(volumeName)s is already part "
                         "of storage group %(sgGroupName)s ")
                         % {'volumeName': volumeName,
@@ -205,7 +205,7 @@ class EMCVMAXMasking(object):
                     conn, controllerConfigService, volumeInstance, volumeName,
                     fastPolicyName, defaultStorageGroupInstanceName)
 
-            LOG.error(_("Exception: %s") % six.text_type(e))
+            LOG.error(_LE("Exception: %s") % six.text_type(e))
             errorMessage = (_(
                 "Failed to get or create masking view %(maskingViewName)s ")
                 % {'maskingViewName': maskingViewName})
@@ -253,7 +253,7 @@ class EMCVMAXMasking(object):
                 % {'foundElement': foundStorageGroupInstance['ElementName']})
             if (foundStorageGroupInstance['ElementName'] == (
                     storageGroupInstance['ElementName'])):
-                LOG.warn(_(
+                LOG.warn(_LW(
                     "The volume is already part of storage group: "
                     "%(storageGroupInstanceName)s. ")
                     % {'storageGroupInstanceName': storageGroupInstanceName})
@@ -318,12 +318,12 @@ class EMCVMAXMasking(object):
                 conn, controllerConfigService, storageGroupName,
                 volumeInstance.path))
         if foundStorageGroupInstanceName is None:
-            LOG.error(_(
+            LOG.error(_LE(
                 "Cannot get storage Group from job : %(storageGroupName)s. ")
                 % {'storageGroupName': storageGroupName})
             return failedRet
         else:
-            LOG.info(_(
+            LOG.info(_LI(
                 "Created new storage group: %(storageGroupName)s ")
                 % {'storageGroupName': storageGroupName})
 
@@ -335,7 +335,7 @@ class EMCVMAXMasking(object):
                     foundStorageGroupInstanceName,
                     storageGroupName, fastPolicyName))
             if assocTierPolicyInstanceName is None:
-                LOG.error(_(
+                LOG.error(_LE(
                     "Cannot add and verify tier policy association for storage"
                     " group : %(storageGroupName)s to FAST policy : "
                     "%(fastPolicyName)s. ")
@@ -365,7 +365,7 @@ class EMCVMAXMasking(object):
                 break
 
         if foundPortGroupInstanceName is None:
-            LOG.error(_(
+            LOG.error(_LE(
                 "Could not find port group : %(portGroupName)s. Check that the"
                 " EMC configuration file has the correct port group name. ")
                 % {'portGroupName': portGroupName})
@@ -409,7 +409,7 @@ class EMCVMAXMasking(object):
                 self._get_storage_hardware_id_instance_names(
                     conn, initiatorNames, storageSystemName))
             if not storageHardwareIDInstanceNames:
-                LOG.error(_(
+                LOG.error(_LE(
                     "Initiator Name(s) %(initiatorNames)s are not on array "
                     "%(storageSystemName)s ")
                     % {'initiatorNames': initiatorNames,
@@ -420,10 +420,11 @@ class EMCVMAXMasking(object):
                 conn, controllerConfigService, igGroupName,
                 storageHardwareIDInstanceNames)
 
-            LOG.info("Created new initiator group name: %(igGroupName)s "
+            LOG.info(_LI("Created new initiator group name: %(igGroupName)s ")
                      % {'igGroupName': igGroupName})
         else:
-            LOG.info("Using existing initiator group name: %(igGroupName)s "
+            LOG.info(_LI("Using existing initiator "
+                         "group name: %(igGroupName)s ")
                      % {'igGroupName': igGroupName})
 
         return foundInitiatorGroupInstanceName
@@ -592,7 +593,7 @@ class EMCVMAXMasking(object):
                 raise exception.VolumeBackendAPIException(
                     data=exceptionMessage)
 
-        LOG.info(_("Created new masking view : %(maskingViewName)s ")
+        LOG.info(_LI("Created new masking view : %(maskingViewName)s ")
                  % {'maskingViewName': maskingViewName})
         return rc, job
 
@@ -689,8 +690,8 @@ class EMCVMAXMasking(object):
         else:
             if self._is_volume_in_storage_group(
                     conn, storageGroupInstanceName, volumeInstance):
-                LOG.warn(_("Volume: %(volumeName)s is already "
-                           "part of storage group %(sgGroupName)s ")
+                LOG.warn(_LW("Volume: %(volumeName)s is already "
+                             "part of storage group %(sgGroupName)s ")
                          % {'volumeName': volumeName,
                             'sgGroupName': sgGroupName})
             else:
@@ -723,7 +724,7 @@ class EMCVMAXMasking(object):
             LOG.error(errorMessage)
             return foundPortGroupInstanceName
 
-        LOG.info(_(
+        LOG.info(_LI(
             "Port group instance name is %(foundPortGroupInstanceName)s")
             % {'foundPortGroupInstanceName': foundPortGroupInstanceName})
 
@@ -844,7 +845,7 @@ class EMCVMAXMasking(object):
                         conn, controllerConfigService, volumeInstance,
                         fastPolicyName, volumeName)
         except Exception as e:
-            LOG.error(_("Exception: %s") % six.text_type(e))
+            LOG.error(_LE("Exception: %s") % six.text_type(e))
             errorMessage = (_(
                 "Rollback for Volume: %(volumeName)s has failed. "
                 "Please contact your system administrator to manually return "
@@ -940,7 +941,7 @@ class EMCVMAXMasking(object):
                         self._get_storage_hardware_id_instance_names(
                             conn, initiatorNames, storageSystemName))
                     if not storageHardwareIDInstanceNames:
-                        LOG.error(_(
+                        LOG.error(_LE(
                             "Initiator Name(s) %(initiatorNames)s are not on "
                             "array %(storageSystemName)s ")
                             % {'initiatorNames': initiatorNames,
@@ -973,7 +974,7 @@ class EMCVMAXMasking(object):
                             "%(maskingViewName)s.  "
                             % {'maskingViewName': maskingViewName})
                 else:
-                    LOG.error(_(
+                    LOG.error(_LE(
                         "One of the components of the original masking view "
                         "%(maskingViewName)s cannot be retrieved so "
                         "please contact your system administrator to check "
@@ -1314,7 +1315,7 @@ class EMCVMAXMasking(object):
                 tierPolicyInstanceName = self.fast.get_tier_policy_by_name(
                     conn, storageSystemInstanceName['Name'], fastPolicyName)
 
-                LOG.info(_(
+                LOG.info(_LI(
                     "policy:%(policy)s, policy service:%(service)s, "
                     "masking group=%(maskingGroup)s")
                     % {'policy': tierPolicyInstanceName,
index 9d9ffa12b15329be36cb6ca659b3ac44ab60ec7a..4a58a19677f77d7141fa5ff0a9948c0dd77c370c 100644 (file)
@@ -16,7 +16,7 @@
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.emc import emc_vmax_utils
 
@@ -490,7 +490,7 @@ class EMCVMAXProvision(object):
                     rc = self._terminate_migrate_session(
                         conn, volumeInstanceName)
                 except Exception as ex:
-                    LOG.error(_("Exception: %s") % six.text_type(ex))
+                    LOG.error(_LE("Exception: %s") % six.text_type(ex))
                     exceptionMessage = (_(
                         "Failed to terminate migrate session"))
                     LOG.error(exceptionMessage)
@@ -501,7 +501,7 @@ class EMCVMAXProvision(object):
                         conn, storageRelocationServiceInstanceName,
                         volumeInstanceName, targetPoolInstanceName)
                 except Exception as ex:
-                    LOG.error(_("Exception: %s") % six.text_type(ex))
+                    LOG.error(_LE("Exception: %s") % six.text_type(ex))
                     exceptionMessage = (_(
                         "Failed to migrate volume for the second time"))
                     LOG.error(exceptionMessage)
@@ -509,7 +509,7 @@ class EMCVMAXProvision(object):
                         data=exceptionMessage)
 
             else:
-                LOG.error(_("Exception: %s") % six.text_type(ex))
+                LOG.error(_LE("Exception: %s") % six.text_type(ex))
                 exceptionMessage = (_(
                     "Failed to migrate volume for the first time"))
                 LOG.error(exceptionMessage)
index f08498f54dd57e3d7ec2237beb6edc67a7a810d5..0f40d9b479e6144200d7b927c17becd86ab54000 100644 (file)
@@ -24,7 +24,7 @@ from xml.etree import ElementTree as ETree
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
 from cinder import utils
@@ -92,7 +92,7 @@ def _xml_read(root, element, check=None):
     """Read an xml element."""
     try:
         val = root.findtext(element)
-        LOG.info(_("%(element)s: %(val)s")
+        LOG.info(_LI("%(element)s: %(val)s")
                  % {'element': element,
                     'val': val})
         if val:
@@ -103,9 +103,9 @@ def _xml_read(root, element, check=None):
     except ETree.ParseError:
         if check:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("XML exception reading parameter: %s") % element)
+                LOG.error(_LE("XML exception reading parameter: %s") % element)
         else:
-            LOG.info(_("XML exception reading parameter: %s") % element)
+            LOG.info(_LI("XML exception reading parameter: %s") % element)
             return None
 
 
@@ -197,7 +197,7 @@ class HUSDriver(driver.ISCSIDriver):
             service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
                        svc['port'], svc['hdp'])  # ip, ipp, ctl, port, hdp
         else:
-            LOG.error(_("No configuration found for service: %s") % label)
+            LOG.error(_LE("No configuration found for service: %s") % label)
             raise exception.ParameterNotFound(param=label)
         return service
 
@@ -250,7 +250,7 @@ class HUSDriver(driver.ISCSIDriver):
         lst.extend([self.config['snapshot_hdp'], ])
         for hdp in lst:
             if hdp not in hdpl:
-                LOG.error(_("HDP not found: %s") % hdp)
+                LOG.error(_LE("HDP not found: %s") % hdp)
                 err = "HDP not found: " + hdp
                 raise exception.ParameterNotFound(param=err)
 
@@ -289,7 +289,8 @@ class HUSDriver(driver.ISCSIDriver):
                 self.config['services'][svc]['iscsi_port'] = (
                     iscsi_info[svc_ip]['iscsi_port'])
             else:          # config iscsi address not found on device!
-                LOG.error(_("iSCSI portal not found for service: %s") % svc_ip)
+                LOG.error(_LE("iSCSI portal not found "
+                              "for service: %s") % svc_ip)
                 raise exception.ParameterNotFound(param=svc_ip)
         return
 
index 11da3541c687324e8600e2f99870a6e279573a2a..b89ad6201745758985ff72dea9b1a0ebc63d6662 100644 (file)
@@ -23,7 +23,7 @@ from xml.etree import ElementTree as ETree
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LE, _LI
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import units
@@ -70,7 +70,7 @@ def _xml_read(root, element, check=None):
 
     try:
         val = root.findtext(element)
-        LOG.info(_("%(element)s: %(val)s")
+        LOG.info(_LI("%(element)s: %(val)s")
                  % {'element': element,
                     'val': val})
         if val:
@@ -81,9 +81,10 @@ def _xml_read(root, element, check=None):
     except ETree.ParseError:
         if check:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("XML exception reading parameter: %s") % element)
+                LOG.error(_LE("XML exception reading "
+                              "parameter: %s") % element)
         else:
-            LOG.info(_("XML exception reading parameter: %s") % element)
+            LOG.info(_LI("XML exception reading parameter: %s") % element)
             return None
 
 
@@ -144,7 +145,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
         self.type = 'HNAS'
 
         self.platform = self.type.lower()
-        LOG.info(_("Backend type: %s") % self.type)
+        LOG.info(_LI("Backend type: %s") % self.type)
         self.bend = factory_bend(self.type)
 
     def _array_info_get(self):
@@ -202,8 +203,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
         if label not in self.config['services'].keys():
             # default works if no match is found
             label = 'default'
-            LOG.info(_("Using default: instead of %s") % label)
-            LOG.info(_("Available services: %s")
+            LOG.info(_LI("Using default: instead of %s") % label)
+            LOG.info(_LI("Available services: %s")
                      % self.config['services'].keys())
 
         if label in self.config['services'].keys():
@@ -215,7 +216,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
             if self.config['chap_enabled'] == 'True':
                 # it may not exist, create and set secret
                 if 'iscsi_secret' not in svc:
-                    LOG.info(_("Retrieving secret for service: %s")
+                    LOG.info(_LI("Retrieving secret for service: %s")
                              % label)
 
                     out = self.bend.get_targetsecret(self.config['hnas_cmd'],
@@ -249,7 +250,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
                 svc['iscsi_secret'] = ""
 
             if 'iscsi_target' not in svc:
-                LOG.info(_("Retrieving target for service: %s") % label)
+                LOG.info(_LI("Retrieving target for service: %s") % label)
 
                 out = self.bend.get_targetiqn(self.config['hnas_cmd'],
                                               self.config['mgmt_ip0'],
@@ -266,9 +267,9 @@ class HDSISCSIDriver(driver.ISCSIDriver):
                        svc['port'], svc['hdp'], svc['iscsi_target'],
                        svc['iscsi_secret'])
         else:
-            LOG.info(_("Available services: %s")
+            LOG.info(_LI("Available services: %s")
                      % self.config['services'].keys())
-            LOG.error(_("No configuration found for service: %s")
+            LOG.error(_LE("No configuration found for service: %s")
                       % label)
             raise exception.ParameterNotFound(param=label)
 
@@ -308,7 +309,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
         hnas_stat['QoS_support'] = False
         hnas_stat['reserved_percentage'] = 0
 
-        LOG.info(_("stats: stats: %s") % hnas_stat)
+        LOG.info(_LI("stats: stats: %s") % hnas_stat)
         return hnas_stat
 
     def _get_hdp_list(self):
@@ -331,7 +332,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
                     hdp_list.extend(inf[1:2])
 
         # returns a list of HDP IDs
-        LOG.info(_("HDP list: %s") % hdp_list)
+        LOG.info(_LI("HDP list: %s") % hdp_list)
         return hdp_list
 
     def _check_hdp_list(self):
@@ -346,7 +347,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
 
         for hdp in lst:
             if hdp not in hdpl:
-                LOG.error(_("HDP not found: %s") % hdp)
+                LOG.error(_LE("HDP not found: %s") % hdp)
                 err = "HDP not found: " + hdp
                 raise exception.ParameterNotFound(param=err)
             # status, verify corresponding status is Normal
@@ -382,18 +383,19 @@ class HDSISCSIDriver(driver.ISCSIDriver):
         self._check_hdp_list()
 
         iscsi_info = self._get_iscsi_info()
-        LOG.info(_("do_setup: %s") % iscsi_info)
+        LOG.info(_LI("do_setup: %s") % iscsi_info)
         for svc in self.config['services'].keys():
             svc_ip = self.config['services'][svc]['iscsi_ip']
             if svc_ip in iscsi_info.keys():
-                LOG.info(_("iSCSI portal found for service: %s") % svc_ip)
+                LOG.info(_LI("iSCSI portal found for service: %s") % svc_ip)
                 self.config['services'][svc]['port'] = \
                     iscsi_info[svc_ip]['port']
                 self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl']
                 self.config['services'][svc]['iscsi_port'] = \
                     iscsi_info[svc_ip]['iscsi_port']
             else:          # config iscsi address not found on device!
-                LOG.error(_("iSCSI portal not found for service: %s") % svc_ip)
+                LOG.error(_LE("iSCSI portal not found "
+                              "for service: %s") % svc_ip)
                 raise exception.ParameterNotFound(param=svc_ip)
 
     def ensure_export(self, context, volume):
@@ -439,13 +441,13 @@ class HDSISCSIDriver(driver.ISCSIDriver):
                                   '%s' % (int(volume['size']) * units.Ki),
                                   volume['name'])
 
-        LOG.info(_("create_volume: create_lu returns %s") % out)
+        LOG.info(_LI("create_volume: create_lu returns %s") % out)
 
         lun = self.arid + '.' + out.split()[1]
         sz = int(out.split()[5])
 
         # Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd
-        LOG.info(_("LUN %(lun)s of size %(sz)s MB is created.")
+        LOG.info(_LI("LUN %(lun)s of size %(sz)s MB is created.")
                  % {'lun': lun, 'sz': sz})
         return {'provider_location': lun}
 
@@ -496,7 +498,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
                              '%s' % (new_size * units.Ki),
                              volume['name'])
 
-        LOG.info(_("LUN %(lun)s extended to %(size)s GB.")
+        LOG.info(_LI("LUN %(lun)s extended to %(size)s GB.")
                  % {'lun': lun, 'size': new_size})
 
     def delete_volume(self, volume):
@@ -678,7 +680,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
         myid = self.arid
 
         if arid != myid:
-            LOG.error(_('Array mismatch %(myid)s vs %(arid)s')
+            LOG.error(_LE('Array mismatch %(myid)s vs %(arid)s')
                       % {'myid': myid,
                          'arid': arid})
             msg = 'Array id mismatch in delete snapshot'
index c68a6f976575d079533b0b85b615e3bace4926d4..66b408837f21a22d524682e7f18dbc10e353c9e0 100644 (file)
@@ -24,7 +24,7 @@ from xml.etree import ElementTree as ETree
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.image import image_utils
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
@@ -59,7 +59,7 @@ def _xml_read(root, element, check=None):
 
     try:
         val = root.findtext(element)
-        LOG.info(_("%(element)s: %(val)s")
+        LOG.info(_LI("%(element)s: %(val)s")
                  % {'element': element,
                     'val': val})
         if val:
@@ -70,9 +70,9 @@ def _xml_read(root, element, check=None):
     except ETree.ParseError:
         if check:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("XML exception reading parameter: %s") % element)
+                LOG.error(_LE("XML exception reading parameter: %s") % element)
         else:
-            LOG.info(_("XML exception reading parameter: %s") % element)
+            LOG.info(_LI("XML exception reading parameter: %s") % element)
             return None
 
 
@@ -187,9 +187,9 @@ class HDSNFSDriver(nfs.NfsDriver):
             LOG.info("Get service: %s->%s" % (label, svc['fslabel']))
             service = (svc['hdp'], svc['path'], svc['fslabel'])
         else:
-            LOG.info(_("Available services: %s")
+            LOG.info(_LI("Available services: %s")
                      % self.config['services'].keys())
-            LOG.error(_("No configuration found for service: %s") % label)
+            LOG.error(_LE("No configuration found for service: %s") % label)
             raise exception.ParameterNotFound(param=label)
 
         return service
@@ -213,10 +213,10 @@ class HDSNFSDriver(nfs.NfsDriver):
         if self._is_file_size_equal(path, new_size):
             return
         else:
-            LOG.info(_('Resizing file to %sG'), new_size)
+            LOG.info(_LI('Resizing file to %sG'), new_size)
             image_utils.resize_image(path, new_size)
             if self._is_file_size_equal(path, new_size):
-                LOG.info(_("LUN %(id)s extended to %(size)s GB.")
+                LOG.info(_LI("LUN %(id)s extended to %(size)s GB.")
                          % {'id': volume['id'], 'size': new_size})
                 return
             else:
@@ -349,8 +349,8 @@ class HDSNFSDriver(nfs.NfsDriver):
                 tries += 1
                 if tries >= self.configuration.num_shell_tries:
                     raise
-                LOG.exception(_("Recovering from a failed execute.  "
-                                "Try number %s"), tries)
+                LOG.exception(_LE("Recovering from a failed execute.  "
+                                  "Try number %s"), tries)
                 time.sleep(tries ** 2)
 
     def _get_volume_path(self, nfs_share, volume_name):
index 889a7f2e4861047b3fd00e5ee81e8fce79fda2ac..1ef84cc3586f975111042e9b2af2064e2c83fdda 100644 (file)
@@ -24,7 +24,7 @@ from oslo.config import cfg
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import units
@@ -143,7 +143,7 @@ class Driver(driver.ISCSIDriver):
                 ip = utils.resolve_hostname(host)
                 return ip
             except socket.gaierror as e:
-                LOG.error(_('Error resolving host %(host)s. Error - %(e)s.')
+                LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
                           % {'host': host, 'e': e})
                 return None
 
@@ -156,10 +156,10 @@ class Driver(driver.ISCSIDriver):
             msg = _('Controller ips not valid after resolution.')
             raise exception.NoValidHost(reason=msg)
         if host in ips:
-            LOG.info(_('Embedded mode detected.'))
+            LOG.info(_LI('Embedded mode detected.'))
             system = self._client.list_storage_systems()[0]
         else:
-            LOG.info(_('Proxy mode detected.'))
+            LOG.info(_LI('Proxy mode detected.'))
             system = self._client.register_storage_system(
                 ips, password=self.configuration.netapp_sa_password)
         self._client.set_system_id(system.get('id'))
@@ -188,7 +188,7 @@ class Driver(driver.ISCSIDriver):
             # password was not in sync previously.
             if ((status == 'nevercontacted') or
                     (password_not_in_sync and status == 'passwordoutofsync')):
-                LOG.info(_('Waiting for web service array communication.'))
+                LOG.info(_LI('Waiting for web service array communication.'))
                 time.sleep(self.SLEEP_SECS)
                 comm_time = comm_time + self.SLEEP_SECS
                 if comm_time >= sa_comm_timeout:
@@ -204,7 +204,7 @@ class Driver(driver.ISCSIDriver):
                 status == 'offline'):
             msg = _("System %(id)s found with bad status - %(status)s.")
             raise exception.NetAppDriverException(msg % msg_dict)
-        LOG.info(_("System %(id)s has %(status)s status.") % msg_dict)
+        LOG.info(_LI("System %(id)s has %(status)s status.") % msg_dict)
         return True
 
     def _populate_system_objects(self):
@@ -220,7 +220,7 @@ class Driver(driver.ISCSIDriver):
     def _cache_allowed_disk_pool_refs(self):
         """Caches disk pools refs as per pools configured by user."""
         d_pools = self.configuration.netapp_storage_pools
-        LOG.info(_('Configured storage pools %s.'), d_pools)
+        LOG.info(_LI('Configured storage pools %s.'), d_pools)
         pools = [x.strip().lower() if x else None for x in d_pools.split(',')]
         for pool in self._client.list_storage_pools():
             if (pool.get('raidLevel') == 'raidDiskPool'
@@ -385,10 +385,11 @@ class Driver(driver.ISCSIDriver):
         try:
             vol = self._client.create_volume(target_pool['volumeGroupRef'],
                                              eseries_volume_label, size_gb)
-            LOG.info(_("Created volume with label %s."), eseries_volume_label)
+            LOG.info(_LI("Created volume with "
+                         "label %s."), eseries_volume_label)
         except exception.NetAppDriverException as e:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error creating volume. Msg - %s."),
+                LOG.error(_LE("Error creating volume. Msg - %s."),
                           six.text_type(e))
 
         return vol
@@ -400,10 +401,10 @@ class Driver(driver.ISCSIDriver):
             try:
                 vol = self._client.create_volume(pool['volumeGroupRef'],
                                                  label, size_gb)
-                LOG.info(_("Created volume with label %s."), label)
+                LOG.info(_LI("Created volume with label %s."), label)
                 return vol
             except exception.NetAppDriverException as e:
-                LOG.error(_("Error creating volume. Msg - %s."), e)
+                LOG.error(_LE("Error creating volume. Msg - %s."), e)
         msg = _("Failure creating volume %s.")
         raise exception.NetAppDriverException(msg % label)
 
@@ -417,7 +418,7 @@ class Driver(driver.ISCSIDriver):
             src_vol = self._create_snapshot_volume(snapshot['id'])
             self._copy_volume_high_prior_readonly(src_vol, dst_vol)
             self._cache_volume(dst_vol)
-            LOG.info(_("Created volume with label %s."), label)
+            LOG.info(_LI("Created volume with label %s."), label)
         except exception.NetAppDriverException:
             with excutils.save_and_reraise_exception():
                 self._client.delete_volume(dst_vol['volumeRef'])
@@ -426,9 +427,9 @@ class Driver(driver.ISCSIDriver):
                 try:
                     self._client.delete_snapshot_volume(src_vol['id'])
                 except exception.NetAppDriverException as e:
-                    LOG.error(_("Failure deleting snap vol. Error: %s."), e)
+                    LOG.error(_LE("Failure deleting snap vol. Error: %s."), e)
             else:
-                LOG.warn(_("Snapshot volume not found."))
+                LOG.warn(_LW("Snapshot volume not found."))
 
     def _create_snapshot_volume(self, snapshot_id):
         """Creates snapshot volume for given group with snapshot_id."""
@@ -444,7 +445,7 @@ class Driver(driver.ISCSIDriver):
 
     def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
         """Copies src volume to dest volume."""
-        LOG.info(_("Copying src vol %(src)s to dest vol %(dst)s.")
+        LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
                  % {'src': src_vol['label'], 'dst': dst_vol['label']})
         try:
             job = None
@@ -457,11 +458,11 @@ class Driver(driver.ISCSIDriver):
                     time.sleep(self.SLEEP_SECS)
                     continue
                 if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
-                    LOG.error(_("Vol copy job status %s."), j_st['status'])
+                    LOG.error(_LE("Vol copy job status %s."), j_st['status'])
                     msg = _("Vol copy job for dest %s failed.")\
                         % dst_vol['label']
                     raise exception.NetAppDriverException(msg)
-                LOG.info(_("Vol copy job completed for dest %s.")
+                LOG.info(_LI("Vol copy job completed for dest %s.")
                          % dst_vol['label'])
                 break
         finally:
@@ -469,11 +470,12 @@ class Driver(driver.ISCSIDriver):
                 try:
                     self._client.delete_vol_copy_job(job['volcopyRef'])
                 except exception.NetAppDriverException:
-                    LOG.warn(_("Failure deleting job %s."), job['volcopyRef'])
+                    LOG.warn(_LW("Failure deleting "
+                                 "job %s."), job['volcopyRef'])
             else:
-                LOG.warn(_('Volume copy job for src vol %s not found.'),
+                LOG.warn(_LW('Volume copy job for src vol %s not found.'),
                          src_vol['id'])
-        LOG.info(_('Copy job to dest vol %s completed.'), dst_vol['label'])
+        LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
 
     def create_cloned_volume(self, volume, src_vref):
         """Creates a clone of the specified volume."""
@@ -485,7 +487,7 @@ class Driver(driver.ISCSIDriver):
             try:
                 self.delete_snapshot(snapshot)
             except exception.NetAppDriverException:
-                LOG.warn(_("Failure deleting temp snapshot %s."),
+                LOG.warn(_LW("Failure deleting temp snapshot %s."),
                          snapshot['id'])
 
     def delete_volume(self, volume):
@@ -494,7 +496,7 @@ class Driver(driver.ISCSIDriver):
             vol = self._get_volume(volume['id'])
             self._delete_volume(vol['label'])
         except KeyError:
-            LOG.info(_("Volume %s already deleted."), volume['id'])
+            LOG.info(_LI("Volume %s already deleted."), volume['id'])
             return
 
     def _delete_volume(self, label):
@@ -518,7 +520,7 @@ class Driver(driver.ISCSIDriver):
             snap_image = self._client.create_snapshot_image(
                 snap_grp['pitGroupRef'])
             self._cache_snap_img(snap_image)
-            LOG.info(_("Created snap grp with label %s."), snapshot_name)
+            LOG.info(_LI("Created snap grp with label %s."), snapshot_name)
         except exception.NetAppDriverException:
             with excutils.save_and_reraise_exception():
                 if snap_image is None and snap_grp:
@@ -529,7 +531,7 @@ class Driver(driver.ISCSIDriver):
         try:
             snap_grp = self._get_cached_snapshot_grp(snapshot['id'])
         except KeyError:
-            LOG.warn(_("Snapshot %s already deleted.") % snapshot['id'])
+            LOG.warn(_LW("Snapshot %s already deleted.") % snapshot['id'])
             return
         self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
         snapshot_name = snap_grp['label']
@@ -651,7 +653,7 @@ class Driver(driver.ISCSIDriver):
                     LOG.warn(msg % {'l': host['label'], 'e': e.msg})
                     return host
         except exception.NotFound as e:
-            LOG.warn(_("Message - %s."), e.msg)
+            LOG.warn(_LW("Message - %s."), e.msg)
             return self._create_host(port_id, host_type)
 
     def _get_host_with_port(self, port_id):
@@ -669,7 +671,7 @@ class Driver(driver.ISCSIDriver):
 
     def _create_host(self, port_id, host_type):
         """Creates host on system with given initiator as port_id."""
-        LOG.info(_("Creating host with port %s."), port_id)
+        LOG.info(_LI("Creating host with port %s."), port_id)
         label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
         port_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
         host_type = self._get_host_type_definition(host_type)
@@ -791,7 +793,7 @@ class Driver(driver.ISCSIDriver):
             new_vol = stage_2
             self._cache_volume(new_vol)
             self._cache_volume(stage_1)
-            LOG.info(_('Extended volume with label %s.'), src_label)
+            LOG.info(_LI('Extended volume with label %s.'), src_label)
         except exception.NetAppDriverException:
             if stage_1 == 0:
                 with excutils.save_and_reraise_exception():
@@ -805,7 +807,8 @@ class Driver(driver.ISCSIDriver):
         """Removes tmp vols with no snapshots."""
         try:
             if not utils.set_safe_attr(self, 'clean_job_running', True):
-                LOG.warn(_('Returning as clean tmp vol job already running.'))
+                LOG.warn(_LW('Returning as clean tmp '
+                             'vol job already running.'))
                 return
             for label in self._objects['volumes']['label_ref'].keys():
                 if (label.startswith('tmp-') and
index 8b966e641a9e6dfe08b2b6f65d001288fc70eefd..225db8b7f1068abad87f7fa2346d7dc40eada9e0 100644 (file)
@@ -29,7 +29,7 @@ import uuid
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
@@ -346,8 +346,8 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
                 self.extend_volume(volume, volume['size'])
             except Exception:
                 with excutils.save_and_reraise_exception():
-                    LOG.error(
-                        _("Resizing %s failed. Cleaning volume."), new_name)
+                    LOG.error(_LE("Resizing %s failed. "
+                                  "Cleaning volume."), new_name)
                     self.delete_volume(volume)
 
     def terminate_connection(self, volume, connector, **kwargs):
@@ -579,9 +579,9 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
             attr = getattr(self._get_lun_from_table(name), attr)
             return attr
         except exception.VolumeNotFound as e:
-            LOG.error(_("Message: %s"), e.msg)
+            LOG.error(_LE("Message: %s"), e.msg)
         except Exception as e:
-            LOG.error(_("Error getting lun attribute. Exception: %s"),
+            LOG.error(_LE("Error getting lun attribute. Exception: %s"),
                       e.__str__())
         return None
 
@@ -600,8 +600,8 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
                 self.extend_volume(volume, volume['size'])
             except Exception:
                 with excutils.save_and_reraise_exception():
-                    LOG.error(
-                        _("Resizing %s failed. Cleaning volume."), new_name)
+                    LOG.error(_LE("Resizing %s failed. "
+                                  "Cleaning volume."), new_name)
                     self.delete_volume(volume)
 
     def get_volume_stats(self, refresh=False):
@@ -638,13 +638,13 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
                 self._do_sub_clone_resize(path, new_size_bytes)
             self.lun_table[name].size = new_size_bytes
         else:
-            LOG.info(_("No need to extend volume %s"
-                       " as it is already the requested new size."), name)
+            LOG.info(_LI("No need to extend volume %s"
+                         " as it is already the requested new size."), name)
 
     def _do_direct_resize(self, path, new_size_bytes, force=True):
         """Uses the resize api to resize the lun."""
         seg = path.split("/")
-        LOG.info(_("Resizing lun %s directly to new size."), seg[-1])
+        LOG.info(_LI("Resizing lun %s directly to new size."), seg[-1])
         lun_resize = NaElement("lun-resize")
         lun_resize.add_new_child('path', path)
         lun_resize.add_new_child('size', new_size_bytes)
@@ -671,7 +671,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
             geometry['max_resize'] =\
                 result.get_child_content("max-resize-size")
         except Exception as e:
-            LOG.error(_("Lun %(path)s geometry failed. Message - %(msg)s")
+            LOG.error(_LE("Lun %(path)s geometry failed. Message - %(msg)s")
                       % {'path': path, 'msg': e.message})
         return geometry
 
@@ -715,7 +715,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
             after a successful clone.
         """
         seg = path.split("/")
-        LOG.info(_("Resizing lun %s using sub clone to new size."), seg[-1])
+        LOG.info(_LI("Resizing lun %s using sub clone to new size."), seg[-1])
         name = seg[-1]
         vol_name = seg[2]
         lun = self._get_lun_from_table(name)
@@ -745,7 +745,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
         """Try post sub clone resize in a transactional manner."""
         st_tm_mv, st_nw_mv, st_del_old = None, None, None
         seg = path.split("/")
-        LOG.info(_("Post clone resize lun %s"), seg[-1])
+        LOG.info(_LI("Post clone resize lun %s"), seg[-1])
         new_lun = 'new-%s' % (seg[-1])
         tmp_lun = 'tmp-%s' % (seg[-1])
         tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
@@ -765,12 +765,12 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
                     raise exception.VolumeBackendAPIException(
                         data=msg % (seg[-1]))
                 elif st_del_old is None:
-                    LOG.error(_("Failure deleting staged tmp lun %s."),
+                    LOG.error(_LE("Failure deleting staged tmp lun %s."),
                               tmp_lun)
                 else:
-                    LOG.error(_("Unknown exception in"
-                                " post clone resize lun %s."), seg[-1])
-                    LOG.error(_("Exception details: %s") % (e.__str__()))
+                    LOG.error(_LE("Unknown exception in"
+                                  " post clone resize lun %s."), seg[-1])
+                    LOG.error(_LE("Exception details: %s") % (e.__str__()))
 
     def _get_lun_block_count(self, path):
         """Gets block counts for the lun."""
@@ -1231,8 +1231,8 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
             volume_name = vol.get_child_content('name')
             if self._get_vol_option(volume_name, 'root') == 'true':
                 return volume_name
-        LOG.warn(_('Could not determine root volume name '
-                   'on %s.') % self._get_owner())
+        LOG.warn(_LW('Could not determine root volume name '
+                     'on %s.') % self._get_owner())
         return None
 
     def _get_igroup_by_initiator(self, initiator):
@@ -1310,8 +1310,8 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
                     if luns:
                         lun_list.extend(luns)
                 except NaApiError:
-                    LOG.warn(_("Error finding luns for volume %s."
-                               " Verify volume exists.") % (vol))
+                    LOG.warn(_LW("Error finding luns for volume %s."
+                                 " Verify volume exists.") % (vol))
         else:
             luns = self._get_vol_luns(None)
             lun_list.extend(luns)
@@ -1570,14 +1570,14 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
             try:
                 job_set = set_safe_attr(self, 'vol_refresh_running', True)
                 if not job_set:
-                    LOG.warn(
-                        _("Volume refresh job already running. Returning..."))
+                    LOG.warn(_LW("Volume refresh job already "
+                                 "running. Returning..."))
                     return
                 self.vol_refresh_voluntary = False
                 self.vols = self._get_filer_volumes()
                 self.vol_refresh_time = timeutils.utcnow()
             except Exception as e:
-                LOG.warn(_("Error refreshing volume info. Message: %s"),
+                LOG.warn(_LW("Error refreshing volume info. Message: %s"),
                          six.text_type(e))
             finally:
                 set_safe_attr(self, 'vol_refresh_running', False)
index a161bc78de95181dd71878644947c19407eca19e..3694b57d2b2237b65923779e119f92f63c0e0dfd 100644 (file)
@@ -27,7 +27,7 @@ import six
 import six.moves.urllib.parse as urlparse
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
@@ -110,9 +110,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
                     self.extend_volume(volume, vol_size)
                 except Exception:
                     with excutils.save_and_reraise_exception():
-                        LOG.error(
-                            _("Resizing %s failed. Cleaning volume."),
-                            volume.name)
+                        LOG.error(_LE("Resizing %s failed. Cleaning volume."),
+                                  volume.name)
                         self._execute('rm', path, run_as_root=run_as_root)
         else:
             raise exception.CinderException(
@@ -186,8 +185,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
                 tries = tries + 1
                 if tries >= self.configuration.num_shell_tries:
                     raise
-                LOG.exception(_("Recovering from a failed execute.  "
-                                "Try number %s"), tries)
+                LOG.exception(_LE("Recovering from a failed execute.  "
+                                  "Try number %s"), tries)
                 time.sleep(tries ** 2)
 
     def _get_volume_path(self, nfs_share, volume_name):
@@ -217,8 +216,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
                 try:
                     self.extend_volume(volume, vol_size)
                 except Exception as e:
-                    LOG.error(
-                        _("Resizing %s failed. Cleaning volume."), volume.name)
+                    LOG.error(_LE("Resizing %s failed. "
+                                  "Cleaning volume. "), volume.name)
                     self._execute('rm', path,
                                   run_as_root=self._execute_as_root)
                     raise e
@@ -236,23 +235,22 @@ class NetAppNFSDriver(nfs.NfsDriver):
         """Fetch the image from image_service and write it to the volume."""
         super(NetAppNFSDriver, self).copy_image_to_volume(
             context, volume, image_service, image_id)
-        LOG.info(_('Copied image to volume %s using regular download.'),
+        LOG.info(_LI('Copied image to volume %s using regular download.'),
                  volume['name'])
         self._register_image_in_cache(volume, image_id)
 
     def _register_image_in_cache(self, volume, image_id):
         """Stores image in the cache."""
         file_name = 'img-cache-%s' % image_id
-        LOG.info(_("Registering image in cache %s"), file_name)
+        LOG.info(_LI("Registering image in cache %s"), file_name)
         try:
             self._do_clone_rel_img_cache(
                 volume['name'], file_name,
                 volume['provider_location'], file_name)
         except Exception as e:
-            LOG.warn(
-                _('Exception while registering image %(image_id)s'
-                  ' in cache. Exception: %(exc)s')
-                % {'image_id': image_id, 'exc': e.__str__()})
+            LOG.warn(_LW('Exception while registering image %(image_id)s'
+                         ' in cache. Exception: %(exc)s')
+                     % {'image_id': image_id, 'exc': e.__str__()})
 
     def _find_image_in_cache(self, image_id):
         """Finds image in cache and returns list of shares with file name."""
@@ -276,7 +274,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
             dir = self._get_mount_point_for_share(share)
             file_path = '%s/%s' % (dir, dst)
             if not os.path.exists(file_path):
-                LOG.info(_('Cloning from cache to destination %s'), dst)
+                LOG.info(_LI('Cloning from cache to destination %s'), dst)
                 self._clone_volume(src, dst, volume_id=None, share=share)
         _do_clone()
 
@@ -306,7 +304,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
                         self._get_capacity_info(share)
                     avl_percent = int((total_avl / total_size) * 100)
                     if avl_percent <= thres_size_perc_start:
-                        LOG.info(_('Cleaning cache for share %s.'), share)
+                        LOG.info(_LI('Cleaning cache for share %s.'), share)
                         eligible_files = self._find_old_cache_files(share)
                         threshold_size = int(
                             (thres_size_perc_stop * total_size) / 100)
@@ -318,10 +316,9 @@ class NetAppNFSDriver(nfs.NfsDriver):
                     else:
                         continue
                 except Exception as e:
-                    LOG.warn(_(
-                        'Exception during cache cleaning'
-                        ' %(share)s. Message - %(ex)s')
-                        % {'share': share, 'ex': e.__str__()})
+                    LOG.warn(_LW('Exception during cache cleaning'
+                                 ' %(share)s. Message - %(ex)s')
+                             % {'share': share, 'ex': e.__str__()})
                     continue
         finally:
             LOG.debug('Image cache cleaning done.')
@@ -377,7 +374,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
             self._execute(*cmd, run_as_root=self._execute_as_root)
             return True
         except Exception as ex:
-            LOG.warning(_('Exception during deleting %s'), ex.__str__())
+            LOG.warning(_LW('Exception during deleting %s'), ex.__str__())
             return False
 
     def clone_image(self, volume, image_location, image_id, image_meta):
@@ -409,8 +406,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
                 post_clone = self._post_clone_image(volume)
         except Exception as e:
             msg = e.msg if getattr(e, 'msg', None) else e.__str__()
-            LOG.info(_('Image cloning unsuccessful for image'
-                       ' %(image_id)s. Message: %(msg)s')
+            LOG.info(_LI('Image cloning unsuccessful for image'
+                         ' %(image_id)s. Message: %(msg)s')
                      % {'image_id': image_id, 'msg': msg})
             vol_path = self.local_path(volume)
             volume['provider_location'] = None
@@ -425,7 +422,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
     def _clone_from_cache(self, volume, image_id, cache_result):
         """Clones a copy from image cache."""
         cloned = False
-        LOG.info(_('Cloning image %s from cache'), image_id)
+        LOG.info(_LI('Cloning image %s from cache'), image_id)
         for res in cache_result:
             # Repeat tries in other shares if failed in some
             (share, file_name) = res
@@ -439,13 +436,13 @@ class NetAppNFSDriver(nfs.NfsDriver):
                     volume['provider_location'] = share
                     break
                 except Exception:
-                    LOG.warn(_('Unexpected exception during'
-                               ' image cloning in share %s'), share)
+                    LOG.warn(_LW('Unexpected exception during'
+                                 ' image cloning in share %s'), share)
         return cloned
 
     def _direct_nfs_clone(self, volume, image_location, image_id):
         """Clone directly in nfs share."""
-        LOG.info(_('Checking image clone %s from glance share.'), image_id)
+        LOG.info(_LI('Checking image clone %s from glance share.'), image_id)
         cloned = False
         image_location = self._construct_image_nfs_url(image_location)
         share = self._is_cloneable_share(image_location)
@@ -466,9 +463,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
                     volume_id=None, share=share)
                 cloned = True
             else:
-                LOG.info(
-                    _('Image will locally be converted to raw %s'),
-                    image_id)
+                LOG.info(_LI('Image will locally be converted to raw %s'),
+                         image_id)
                 dst = '%s/%s' % (dir_path, volume['name'])
                 image_utils.convert_image(img_path, dst, 'raw',
                                           run_as_root=run_as_root)
@@ -485,7 +481,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
 
     def _post_clone_image(self, volume):
         """Do operations post image cloning."""
-        LOG.info(_('Performing post clone for %s'), volume['name'])
+        LOG.info(_LI('Performing post clone for %s'), volume['name'])
         vol_path = self.local_path(volume)
         if self._discover_file_till_timeout(vol_path):
             self._set_rw_permissions(vol_path)
@@ -500,7 +496,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
         if self._is_file_size_equal(path, new_size):
             return
         else:
-            LOG.info(_('Resizing file to %sG'), new_size)
+            LOG.info(_LI('Resizing file to %sG'), new_size)
             image_utils.resize_image(path, new_size,
                                      run_as_root=self._execute_as_root)
             if self._is_file_size_equal(path, new_size):
@@ -530,7 +526,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
                 return True
             else:
                 if retry_seconds <= 0:
-                    LOG.warn(_('Discover file retries exhausted.'))
+                    LOG.warn(_LW('Discover file retries exhausted.'))
                     return False
                 else:
                     time.sleep(sleep_interval)
@@ -588,7 +584,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
                               share_candidates)
                     return self._share_match_for_ip(ip, share_candidates)
         except Exception:
-            LOG.warn(_("Unexpected exception while short listing used share."))
+            LOG.warn(_LW("Unexpected exception while short "
+                         "listing used share."))
         return None
 
     def _construct_image_nfs_url(self, image_location):
@@ -628,7 +625,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
 
     def extend_volume(self, volume, new_size):
         """Extend an existing volume to the new size."""
-        LOG.info(_('Extending volume %s.'), volume['name'])
+        LOG.info(_LI('Extending volume %s.'), volume['name'])
         path = self.local_path(volume)
         self._resize_image_file(path, new_size)
 
@@ -648,7 +645,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
         @utils.synchronized(dest_path, external=True)
         def _move_file(src, dst):
             if os.path.exists(dst):
-                LOG.warn(_("Destination %s already exists."), dst)
+                LOG.warn(_LW("Destination %s already exists."), dst)
                 return False
             self._execute('mv', src, dst,
                           run_as_root=self._execute_as_root)
@@ -657,7 +654,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
         try:
             return _move_file(source_path, dest_path)
         except Exception as e:
-            LOG.warn(_('Exception moving file %(src)s. Message - %(e)s')
+            LOG.warn(_LW('Exception moving file %(src)s. Message - %(e)s')
                      % {'src': source_path, 'e': e})
         return False
 
@@ -776,11 +773,11 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
         self.stale_vols = set()
         if self.vserver:
             self.ssc_enabled = True
-            LOG.info(_("Shares on vserver %s will only"
-                       " be used for provisioning.") % (self.vserver))
+            LOG.info(_LI("Shares on vserver %s will only"
+                         " be used for provisioning.") % (self.vserver))
         else:
             self.ssc_enabled = False
-            LOG.warn(_("No vserver set in config. SSC will be disabled."))
+            LOG.warn(_LW("No vserver set in config. SSC will be disabled."))
 
     def check_for_setup_error(self):
         """Check that the driver is working and can communicate."""
@@ -829,15 +826,15 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
 
         try:
             volume['provider_location'] = share
-            LOG.info(_('casted to %s') % volume['provider_location'])
+            LOG.info(_LI('casted to %s') % volume['provider_location'])
             self._do_create_volume(volume)
             if qos_policy_group:
                 self._set_qos_policy_group_on_volume(volume, share,
                                                      qos_policy_group)
             return {'provider_location': volume['provider_location']}
         except Exception as ex:
-            LOG.error(_("Exception creating vol %(name)s on "
-                        "share %(share)s. Details: %(ex)s")
+            LOG.error(_LE("Exception creating vol %(name)s on "
+                          "share %(share)s. Details: %(ex)s")
                       % {'name': volume['name'],
                          'share': volume['provider_location'],
                          'ex': ex})
@@ -1049,7 +1046,7 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
     def refresh_ssc_vols(self, vols):
         """Refreshes ssc_vols with latest entries."""
         if not self._mounted_shares:
-            LOG.warn(_("No shares found hence skipping ssc refresh."))
+            LOG.warn(_LW("No shares found hence skipping ssc refresh."))
             return
         mnt_share_vols = set()
         vs_ifs = self._get_vserver_ips(self.vserver)
@@ -1179,14 +1176,15 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
             if (major == 1 and minor >= 20 and col_path):
                 self._try_copyoffload(context, volume, image_service, image_id)
                 copy_success = True
-                LOG.info(_('Copied image %(img)s to volume %(vol)s using copy'
-                           ' offload workflow.')
+                LOG.info(_LI('Copied image %(img)s to '
+                             'volume %(vol)s using copy'
+                             ' offload workflow.')
                          % {'img': image_id, 'vol': volume['id']})
             else:
                 LOG.debug("Copy offload either not configured or"
                           " unsupported.")
         except Exception as e:
-            LOG.exception(_('Copy offload workflow unsuccessful. %s'), e)
+            LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e)
         finally:
             if not copy_success:
                 super(NetAppDirectCmodeNfsDriver, self).copy_image_to_volume(
@@ -1247,7 +1245,8 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
                 copied = True
                 break
             except Exception as e:
-                LOG.exception(_('Error in workflow copy from cache. %s.'), e)
+                LOG.exception(_LE('Error in workflow copy '
+                                  'from cache. %s.'), e)
         return copied
 
     def _clone_file_dst_exists(self, share, src_name, dst_name,
@@ -1398,14 +1397,14 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
             raise exception.InvalidHost(reason=msg)
 
         volume['provider_location'] = share
-        LOG.info(_('Creating volume at location %s')
+        LOG.info(_LI('Creating volume at location %s')
                  % volume['provider_location'])
 
         try:
             self._do_create_volume(volume)
         except Exception as ex:
-            LOG.error(_("Exception creating vol %(name)s on "
-                        "share %(share)s. Details: %(ex)s")
+            LOG.error(_LE("Exception creating vol %(name)s on "
+                          "share %(share)s. Details: %(ex)s")
                       % {'name': volume['name'],
                          'share': volume['provider_location'],
                          'ex': six.text_type(ex)})
index 7bd9f5c90f500b986889a0b4dd7fa00143063de6..59bcf63a18ae4bece9c84ee7bfbfc99b6a67bfe0 100644 (file)
@@ -23,7 +23,7 @@
 """
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 from cinder.volume.drivers import nexenta
@@ -149,7 +149,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         :param volume: volume reference
         :param new_size: volume new size in GB
         """
-        LOG.info(_('Extending volume: %(id)s New size: %(size)s GB'),
+        LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'),
                  {'id': volume['id'], 'size': new_size})
         self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']),
                                      'volsize', '%sG' % new_size)
@@ -165,8 +165,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             self.nms.zvol.destroy(volume_name, '')
         except nexenta.NexentaException as exc:
             if 'does not exist' in exc.args[0]:
-                LOG.info(_('Volume %s does not exist, it seems it was already '
-                           'deleted.'), volume_name)
+                LOG.info(_LI('Volume %s does not exist, it '
+                             'seems it was already deleted.'), volume_name)
                 return
             if 'zvol has children' in exc.args[0]:
                 raise exception.VolumeIsBusy(volume_name=volume_name)
@@ -178,7 +178,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             try:
                 self.delete_snapshot({'volume_name': volume, 'name': snapshot})
             except nexenta.NexentaException as exc:
-                LOG.warning(_('Cannot delete snapshot %(origin)s: %(exc)s'),
+                LOG.warning(_LW('Cannot delete snapshot %(origin)s: %(exc)s'),
                             {'origin': origin, 'exc': exc})
 
     def create_cloned_volume(self, volume, src_vref):
@@ -199,13 +199,13 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         try:
             self.create_volume_from_snapshot(volume, snapshot)
         except nexenta.NexentaException:
-            LOG.error(_('Volume creation failed, deleting created snapshot '
-                        '%(volume_name)s@%(name)s'), snapshot)
+            LOG.error(_LE('Volume creation failed, deleting created snapshot '
+                          '%(volume_name)s@%(name)s'), snapshot)
             try:
                 self.delete_snapshot(snapshot)
             except (nexenta.NexentaException, exception.SnapshotIsBusy):
-                LOG.warning(_('Failed to delete zfs snapshot '
-                              '%(volume_name)s@%(name)s'), snapshot)
+                LOG.warning(_LW('Failed to delete zfs snapshot '
+                                '%(volume_name)s@%(name)s'), snapshot)
             raise
 
     def _get_zfs_send_recv_cmd(self, src, dst):
@@ -266,8 +266,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
                 ssh_bound = True
                 break
         if not ssh_bound:
-            LOG.warning(_("Remote NexentaStor appliance at %s should be "
-                          "SSH-bound."), dst_host)
+            LOG.warning(_LW("Remote NexentaStor appliance at %s should be "
+                            "SSH-bound."), dst_host)
 
         # Create temporary snapshot of volume on NexentaStor Appliance.
         snapshot = {
@@ -286,22 +286,22 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         try:
             self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
         except nexenta.NexentaException as exc:
-            LOG.warning(_("Cannot send source snapshot %(src)s to "
-                          "destination %(dst)s. Reason: %(exc)s"),
+            LOG.warning(_LW("Cannot send source snapshot %(src)s to "
+                            "destination %(dst)s. Reason: %(exc)s"),
                         {'src': src, 'dst': dst, 'exc': exc})
             return false_ret
         finally:
             try:
                 self.delete_snapshot(snapshot)
             except nexenta.NexentaException as exc:
-                LOG.warning(_("Cannot delete temporary source snapshot "
-                              "%(src)s on NexentaStor Appliance: %(exc)s"),
+                LOG.warning(_LW("Cannot delete temporary source snapshot "
+                                "%(src)s on NexentaStor Appliance: %(exc)s"),
                             {'src': src, 'exc': exc})
         try:
             self.delete_volume(volume)
         except nexenta.NexentaException as exc:
-            LOG.warning(_("Cannot delete source volume %(volume)s on "
-                          "NexentaStor Appliance: %(exc)s"),
+            LOG.warning(_LW("Cannot delete source volume %(volume)s on "
+                            "NexentaStor Appliance: %(exc)s"),
                         {'volume': volume['name'], 'exc': exc})
 
         dst_nms = self.get_nms_for_url(nms_url)
@@ -310,8 +310,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         try:
             dst_nms.snapshot.destroy(dst_snapshot, '')
         except nexenta.NexentaException as exc:
-            LOG.warning(_("Cannot delete temporary destination snapshot "
-                          "%(dst)s on NexentaStor Appliance: %(exc)s"),
+            LOG.warning(_LW("Cannot delete temporary destination snapshot "
+                            "%(dst)s on NexentaStor Appliance: %(exc)s"),
                         {'dst': dst_snapshot, 'exc': exc})
 
         provider_location = '%(host)s:%(port)s,1 %(name)s 0' % {
@@ -353,8 +353,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             self.nms.snapshot.destroy(snapshot_name, '')
         except nexenta.NexentaException as exc:
             if "does not exist" in exc.args[0]:
-                LOG.info(_('Snapshot %s does not exist, it seems it was '
-                           'already deleted.'), snapshot_name)
+                LOG.info(_LI('Snapshot %s does not exist, it seems it was '
+                             'already deleted.'), snapshot_name)
                 return
             if "snapshot has dependent clones" in exc.args[0]:
                 raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
@@ -474,8 +474,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
                     'target_name': target_name})
             except nexenta.NexentaException as exc:
                 if ensure and 'already configured' in exc.args[0]:
-                    LOG.info(_('Ignored target creation error "%s" while '
-                               'ensuring export'), exc)
+                    LOG.info(_LI('Ignored target creation error "%s" while '
+                                 'ensuring export'), exc)
                 else:
                     raise
         if not self._target_group_exists(target_group_name):
@@ -484,8 +484,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             except nexenta.NexentaException as exc:
                 if ((ensure and 'already exists' in exc.args[0]) or
                         'target must be offline' in exc.args[0]):
-                    LOG.info(_('Ignored target group creation error "%s" '
-                               'while ensuring export'), exc)
+                    LOG.info(_LI('Ignored target group creation error "%s" '
+                                 'while ensuring export'), exc)
                 else:
                     raise
         if not self._target_member_in_target_group(target_group_name,
@@ -496,8 +496,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             except nexenta.NexentaException as exc:
                 if ((ensure and 'already exists' in exc.args[0]) or
                         'target must be offline' in exc.args[0]):
-                    LOG.info(_('Ignored target group member addition error '
-                               '"%s" while ensuring export'), exc)
+                    LOG.info(_LI('Ignored target group member addition error '
+                                 '"%s" while ensuring export'), exc)
                 else:
                     raise
         if not self._lu_exists(zvol_name):
@@ -506,8 +506,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             except nexenta.NexentaException as exc:
                 if not ensure or 'in use' not in exc.args[0]:
                     raise
-                LOG.info(_('Ignored LU creation error "%s" while ensuring '
-                           'export'), exc)
+                LOG.info(_LI('Ignored LU creation error "%s" while ensuring '
+                             'export'), exc)
         if not self._is_lu_shared(zvol_name):
             try:
                 self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
@@ -516,8 +516,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             except nexenta.NexentaException as exc:
                 if not ensure or 'view entry exists' not in exc.args[0]:
                     raise
-                LOG.info(_('Ignored LUN mapping entry addition error "%s" '
-                           'while ensuring export'), exc)
+                LOG.info(_LI('Ignored LUN mapping entry addition error "%s" '
+                             'while ensuring export'), exc)
 
     def create_export(self, _ctx, volume):
         """Create new export for zvol.
@@ -549,16 +549,16 @@ class NexentaISCSIDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             self.nms.stmf.destroy_targetgroup(target_group_name)
         except nexenta.NexentaException as exc:
             # We assume that target group is already gone
-            LOG.warn(_('Got error trying to destroy target group'
-                       ' %(target_group)s, assuming it is '
-                       'already gone: %(exc)s'),
+            LOG.warn(_LW('Got error trying to destroy target group'
+                         ' %(target_group)s, assuming it is '
+                         'already gone: %(exc)s'),
                      {'target_group': target_group_name, 'exc': exc})
         try:
             self.nms.iscsitarget.delete_target(target_name)
         except nexenta.NexentaException as exc:
             # We assume that target is gone as well
-            LOG.warn(_('Got error trying to delete target %(target)s,'
-                       ' assuming it is already gone: %(exc)s'),
+            LOG.warn(_LW('Got error trying to delete target %(target)s,'
+                         ' assuming it is already gone: %(exc)s'),
                      {'target': target_name, 'exc': exc})
 
     def get_volume_stats(self, refresh=False):
index 0cb654434911454b606c409b3cb42e77afc9cb28..54a9820fbac8bbcb9d51c98aa9cbbfd3a0b79e3b 100644 (file)
@@ -23,7 +23,7 @@
 
 import urllib2
 
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.openstack.common import jsonutils
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers import nexenta
@@ -86,9 +86,9 @@ class NexentaJSONProxy(object):
         response_obj = urllib2.urlopen(request)
         if response_obj.info().status == 'EOF in headers':
             if not self.auto or self.scheme != 'http':
-                LOG.error(_('No headers in server response'))
+                LOG.error(_LE('No headers in server response'))
                 raise NexentaJSONException(_('Bad response from server'))
-            LOG.info(_('Auto switching to HTTPS connection to %s'), self.url)
+            LOG.info(_LI('Auto switching to HTTPS connection to %s'), self.url)
             self.scheme = 'https'
             request = urllib2.Request(self.url, data, headers)
             response_obj = urllib2.urlopen(request)
index a5562ad26d8d8d39f2dc06daf263a3d49d21e999..d3eee00e644adc056ebe9f7ac94f9b199ed1ca98 100644 (file)
@@ -28,7 +28,7 @@ import re
 from cinder import context
 from cinder import db
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import units
 from cinder.volume.drivers import nexenta
@@ -145,8 +145,8 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
             try:
                 nms.folder.destroy('%s/%s' % (vol, folder))
             except nexenta.NexentaException:
-                LOG.warning(_("Cannot destroy created folder: "
-                              "%(vol)s/%(folder)s"),
+                LOG.warning(_LW("Cannot destroy created folder: "
+                                "%(vol)s/%(folder)s"),
                             {'vol': vol, 'folder': folder})
             raise exc
 
@@ -175,8 +175,8 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
             try:
                 nms.folder.destroy('%s/%s' % (vol, folder), '')
             except nexenta.NexentaException:
-                LOG.warning(_("Cannot destroy cloned folder: "
-                              "%(vol)s/%(folder)s"),
+                LOG.warning(_LW("Cannot destroy cloned folder: "
+                                "%(vol)s/%(folder)s"),
                             {'vol': vol, 'folder': folder})
             raise
 
@@ -188,7 +188,7 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
         :param volume: new volume reference
         :param src_vref: source volume reference
         """
-        LOG.info(_('Creating clone of volume: %s'), src_vref['id'])
+        LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
         snapshot = {'volume_name': src_vref['name'],
                     'volume_id': src_vref['id'],
                     'name': self._get_clone_snapshot_name(volume)}
@@ -199,13 +199,13 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
         try:
             return self.create_volume_from_snapshot(volume, snapshot)
         except nexenta.NexentaException:
-            LOG.error(_('Volume creation failed, deleting created snapshot '
-                        '%(volume_name)s@%(name)s'), snapshot)
+            LOG.error(_LE('Volume creation failed, deleting created snapshot '
+                          '%(volume_name)s@%(name)s'), snapshot)
             try:
                 self.delete_snapshot(snapshot)
             except (nexenta.NexentaException, exception.SnapshotIsBusy):
-                LOG.warning(_('Failed to delete zfs snapshot '
-                              '%(volume_name)s@%(name)s'), snapshot)
+                LOG.warning(_LW('Failed to delete zfs snapshot '
+                                '%(volume_name)s@%(name)s'), snapshot)
             raise
 
     def delete_volume(self, volume):
@@ -226,8 +226,8 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
                 nms.folder.destroy(folder, '-r')
             except nexenta.NexentaException as exc:
                 if 'does not exist' in exc.args[0]:
-                    LOG.info(_('Folder %s does not exist, it was '
-                               'already deleted.'), folder)
+                    LOG.info(_LI('Folder %s does not exist, it was '
+                                 'already deleted.'), folder)
                     return
                 raise
             origin = props.get('origin')
@@ -236,8 +236,8 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
                     nms.snapshot.destroy(origin, '')
                 except nexenta.NexentaException as exc:
                     if 'does not exist' in exc.args[0]:
-                        LOG.info(_('Snapshot %s does not exist, it was '
-                                   'already deleted.'), origin)
+                        LOG.info(_LI('Snapshot %s does not exist, it was '
+                                     'already deleted.'), origin)
                         return
                     raise
 
@@ -267,8 +267,8 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
             nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '')
         except nexenta.NexentaException as exc:
             if 'does not exist' in exc.args[0]:
-                LOG.info(_('Snapshot %s does not exist, it was '
-                           'already deleted.'), '%s@%s' % (folder, snapshot))
+                LOG.info(_LI('Snapshot %s does not exist, it was '
+                             'already deleted.'), '%s@%s' % (folder, snapshot))
                 return
             raise
 
@@ -297,8 +297,8 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
         block_size_mb = 1
         block_count = size * units.Gi / (block_size_mb * units.Mi)
 
-        LOG.info(_('Creating regular file: %s.'
-                   'This may take some time.') % path)
+        LOG.info(_LI('Creating regular file: %s.'
+                     'This may take some time.') % path)
 
         nms.appliance.execute(
             'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {
@@ -308,7 +308,7 @@ class NexentaNfsDriver(nfs.NfsDriver):  # pylint: disable=R0921
             }
         )
 
-        LOG.info(_('Regular file: %s created.') % path)
+        LOG.info(_LI('Regular file: %s created.') % path)
 
     def _set_rw_permissions_for_all(self, nms, path):
         """Sets 666 permissions for the path.
index 200503784a50af358e32405b83cca8dc1b366567..4c1850d8ac192eb3469a406072bc42edb39d6f08 100644 (file)
@@ -23,7 +23,7 @@ import tempfile
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import processutils as putils
@@ -158,7 +158,7 @@ class RemoteFSDriver(driver.VolumeDriver):
 
         volume['provider_location'] = self._find_share(volume['size'])
 
-        LOG.info(_('casted to %s') % volume['provider_location'])
+        LOG.info(_LI('casted to %s') % volume['provider_location'])
 
         self._do_create_volume(volume)
 
@@ -195,7 +195,7 @@ class RemoteFSDriver(driver.VolumeDriver):
                 self._ensure_share_mounted(share)
                 mounted_shares.append(share)
             except Exception as exc:
-                LOG.error(_('Exception during mounting %s') % (exc,))
+                LOG.error(_LE('Exception during mounting %s') % (exc,))
 
         self._mounted_shares = mounted_shares
 
@@ -210,8 +210,9 @@ class RemoteFSDriver(driver.VolumeDriver):
         :param volume: volume reference
         """
         if not volume['provider_location']:
-            LOG.warn(_('Volume %s does not have provider_location specified, '
-                     'skipping'), volume['name'])
+            LOG.warn(_LW('Volume %s does not have '
+                         'provider_location specified, '
+                         'skipping'), volume['name'])
             return
 
         self._ensure_share_mounted(volume['provider_location'])
@@ -287,8 +288,8 @@ class RemoteFSDriver(driver.VolumeDriver):
         else:
             permissions = 'ugo+rw'
             parms = {'path': path, 'perm': permissions}
-            LOG.warn(_('%(path)s is being set with open permissions: '
-                       '%(perm)s') % parms)
+            LOG.warn(_LW('%(path)s is being set with open permissions: '
+                         '%(perm)s') % parms)
 
         self._execute('chmod', permissions, path,
                       run_as_root=self._execute_as_root)
@@ -376,8 +377,8 @@ class RemoteFSDriver(driver.VolumeDriver):
             share_opts = share_info[1].strip() if len(share_info) > 1 else None
 
             if not re.match(self.SHARE_FORMAT_REGEX, share_address):
-                LOG.warn(_("Share %s ignored due to invalid format.  Must be "
-                           "of form address:/export.") % share_address)
+                LOG.warn(_LW("Share %s ignored due to invalid format. Must be "
+                             "of form address:/export.") % share_address)
                 continue
 
             self.shares[share_address] = share_opts
@@ -437,7 +438,7 @@ class RemoteFSDriver(driver.VolumeDriver):
             self._execute(*cmd, run_as_root=True)
         except putils.ProcessExecutionError as exc:
             if ensure and 'already mounted' in exc.stderr:
-                LOG.warn(_("%s is already mounted"), share)
+                LOG.warn(_LW("%s is already mounted"), share)
             else:
                 raise
 
@@ -470,16 +471,17 @@ class RemoteFSDriver(driver.VolumeDriver):
         doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
                    "/nfs_backend.html"
         self.configuration.nas_secure_file_operations = 'false'
-        LOG.warn(_("The NAS file operations will be run as root: allowing "
-                   "root level access at the storage backend. This is "
-                   "considered an insecure NAS environment. Please see %s for "
-                   "information on a secure NAS configuration.") %
+        LOG.warn(_LW("The NAS file operations will be run as root: allowing "
+                     "root level access at the storage backend. This is "
+                     "considered an insecure NAS environment. "
+                     "Please see %s for information on a secure NAS "
+                     "configuration.") %
                  doc_html)
         self.configuration.nas_secure_file_permissions = 'false'
-        LOG.warn(_("The NAS file permissions mode will be 666 (allowing "
-                   "other/world read & write access). This is considered an "
-                   "insecure NAS environment. Please see %s for information "
-                   "on a secure NFS configuration.") %
+        LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
+                     "other/world read & write access). This is considered an "
+                     "insecure NAS environment. Please see %s for information "
+                     "on a secure NFS configuration.") %
                  doc_html)
 
     def _determine_nas_security_option_setting(self, nas_option, mount_point,
@@ -503,7 +505,8 @@ class RemoteFSDriver(driver.VolumeDriver):
             file_path = os.path.join(mount_point, file_name)
             if os.path.isfile(file_path):
                 nas_option = 'true'
-                LOG.info(_('Cinder secure environment indicator file exists.'))
+                LOG.info(_LI('Cinder secure environment '
+                             'indicator file exists.'))
             else:
                 # The indicator file does not exist. If it is a new
                 # installation, set to 'true' and create the indicator file.
@@ -519,11 +522,11 @@ class RemoteFSDriver(driver.VolumeDriver):
                         # protect from accidental removal (owner write only).
                         self._execute('chmod', '640', file_path,
                                       run_as_root=False)
-                        LOG.info(_('New Cinder secure environment indicator '
-                                   'file created at path %s.') % file_path)
+                        LOG.info(_LI('New Cinder secure environment indicator'
+                                     ' file created at path %s.') % file_path)
                     except IOError as err:
-                        LOG.error(_('Failed to created Cinder secure '
-                                    'environment indicator file: %s') %
+                        LOG.error(_LE('Failed to created Cinder secure '
+                                      'environment indicator file: %s') %
                                   format(err))
                 else:
                     # For existing installs, we default to 'false'. The
@@ -772,7 +775,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
         return snap_info['active']
 
     def _create_cloned_volume(self, volume, src_vref):
-        LOG.info(_('Cloning volume %(src)s to volume %(dst)s') %
+        LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s') %
                  {'src': src_vref['id'],
                   'dst': volume['id']})
 
@@ -816,7 +819,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
         if (snapshot_file == active_file):
             return
 
-        LOG.info(_('Deleting stale snapshot: %s') % snapshot['id'])
+        LOG.info(_LI('Deleting stale snapshot: %s') % snapshot['id'])
         self._delete(snapshot_path)
         del(snap_info[snapshot['id']])
         self._write_info_file(info_path, snap_info)
@@ -856,8 +859,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
             # exist, do not attempt to delete.
             # (This happens, for example, if snapshot_create failed due to lack
             # of permission to write to the share.)
-            LOG.info(_('Snapshot record for %s is not present, allowing '
-                       'snapshot_delete to proceed.') % snapshot['id'])
+            LOG.info(_LI('Snapshot record for %s is not present, allowing '
+                         'snapshot_delete to proceed.') % snapshot['id'])
             return
 
         snapshot_file = snap_info[snapshot['id']]
index ce29acd16017833284529a2f8021f09f731f1621..9aab5af1726fd489f2f85c20793db7b4c8ea6cd0 100644 (file)
@@ -17,7 +17,7 @@
 Classes and utility methods for datastore selection.
 """
 
-from cinder.i18n import _
+from cinder.i18n import _LE, _LW
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.vmware import error_util
@@ -58,7 +58,7 @@ class DatastoreSelector(object):
         """
         profile_id = self._vops.retrieve_profile_id(profile_name)
         if profile_id is None:
-            LOG.error(_("Storage profile: %s cannot be found in vCenter."),
+            LOG.error(_LE("Storage profile: %s cannot be found in vCenter."),
                       profile_name)
             raise error_util.ProfileNotFoundException(
                 storage_profile=profile_name)
@@ -209,14 +209,13 @@ class DatastoreSelector(object):
             except error_util.VimConnectionException:
                 # No need to try other hosts when there is a connection problem
                 with excutils.save_and_reraise_exception():
-                    LOG.exception(_("Error occurred while selecting datastore."
-                                    ))
+                    LOG.exception(_LE("Error occurred while "
+                                      "selecting datastore."))
             except error_util.VimException:
                 # TODO(vbala) volumeops.get_dss_rp shouldn't throw VimException
                 # for empty datastore list.
-                LOG.warn(_("Unable to fetch datastores connected to host %s."),
-                         host_ref,
-                         exc_info=True)
+                LOG.warn(_LW("Unable to fetch datastores connected "
+                             "to host %s."), host_ref, exc_info=True)
                 continue
 
             if not datastores:
index 1eef770704cbb8c04e0f642b4130c95156151365..bae6fb6e312bf574d7378c1b9264b8e450da4d18 100644 (file)
@@ -19,7 +19,7 @@ Implements operations on volumes residing on VMware datastores.
 
 import urllib
 
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import units
 from cinder.volume.drivers.vmware import error_util
@@ -303,7 +303,7 @@ class VMwareVolumeOps(object):
                                         backing)
         LOG.debug("Initiated deletion of VM backing: %s." % backing)
         self._session.wait_for_task(task)
-        LOG.info(_("Deleted the VM backing: %s.") % backing)
+        LOG.info(_LI("Deleted the VM backing: %s.") % backing)
 
     # TODO(kartikaditya) Keep the methods not specific to volume in
     # a different file
@@ -567,8 +567,8 @@ class VMwareVolumeOps(object):
                                         newCapacityKb=size_in_kb,
                                         eagerZero=eager_zero)
         self._session.wait_for_task(task)
-        LOG.info(_("Successfully extended the volume %(name)s to "
-                   "%(size)s GB."),
+        LOG.info(_LI("Successfully extended the volume %(name)s to "
+                     "%(size)s GB."),
                  {'name': name, 'size': requested_size_in_gb})
 
     def _create_controller_config_spec(self, adapter_type):
@@ -712,7 +712,7 @@ class VMwareVolumeOps(object):
                                         pool=resource_pool, host=host)
         task_info = self._session.wait_for_task(task)
         backing = task_info.result
-        LOG.info(_("Successfully created volume backing: %s."), backing)
+        LOG.info(_LI("Successfully created volume backing: %s."), backing)
         return backing
 
     def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
@@ -870,8 +870,8 @@ class VMwareVolumeOps(object):
                                         backing, spec=relocate_spec)
         LOG.debug("Initiated relocation of volume backing: %s." % backing)
         self._session.wait_for_task(task)
-        LOG.info(_("Successfully relocated volume backing: %(backing)s "
-                   "to datastore: %(ds)s and resource pool: %(rp)s.") %
+        LOG.info(_LI("Successfully relocated volume backing: %(backing)s "
+                     "to datastore: %(ds)s and resource pool: %(rp)s.") %
                  {'backing': backing, 'ds': datastore, 'rp': resource_pool})
 
     def move_backing_to_folder(self, backing, folder):
@@ -888,8 +888,9 @@ class VMwareVolumeOps(object):
         LOG.debug("Initiated move of volume backing: %(backing)s into the "
                   "folder: %(fol)s." % {'backing': backing, 'fol': folder})
         self._session.wait_for_task(task)
-        LOG.info(_("Successfully moved volume backing: %(backing)s into the "
-                   "folder: %(fol)s.") % {'backing': backing, 'fol': folder})
+        LOG.info(_LI("Successfully moved volume "
+                     "backing: %(backing)s into the "
+                     "folder: %(fol)s.") % {'backing': backing, 'fol': folder})
 
     def create_snapshot(self, backing, name, description, quiesce=False):
         """Create snapshot of the backing with given name and description.
@@ -911,8 +912,8 @@ class VMwareVolumeOps(object):
                   "named: %(name)s." % {'backing': backing, 'name': name})
         task_info = self._session.wait_for_task(task)
         snapshot = task_info.result
-        LOG.info(_("Successfully created snapshot: %(snap)s for volume "
-                   "backing: %(backing)s.") %
+        LOG.info(_LI("Successfully created snapshot: %(snap)s for volume "
+                     "backing: %(backing)s.") %
                  {'snap': snapshot, 'backing': backing})
         return snapshot
 
@@ -973,8 +974,8 @@ class VMwareVolumeOps(object):
                   {'name': name, 'backing': backing})
         snapshot = self.get_snapshot(backing, name)
         if not snapshot:
-            LOG.info(_("Did not find the snapshot: %(name)s for backing: "
-                       "%(backing)s. Need not delete anything.") %
+            LOG.info(_LI("Did not find the snapshot: %(name)s for backing: "
+                         "%(backing)s. Need not delete anything.") %
                      {'name': name, 'backing': backing})
             return
         task = self._session.invoke_api(self._session.vim,
@@ -984,8 +985,8 @@ class VMwareVolumeOps(object):
                   "%(backing)s." %
                   {'name': name, 'backing': backing})
         self._session.wait_for_task(task)
-        LOG.info(_("Successfully deleted snapshot: %(name)s of backing: "
-                   "%(backing)s.") % {'backing': backing, 'name': name})
+        LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: "
+                     "%(backing)s.") % {'backing': backing, 'name': name})
 
     def _get_folder(self, backing):
         """Get parent folder of the backing.
@@ -1057,7 +1058,7 @@ class VMwareVolumeOps(object):
         LOG.debug("Initiated clone of backing: %s." % name)
         task_info = self._session.wait_for_task(task)
         new_backing = task_info.result
-        LOG.info(_("Successfully created clone: %s.") % new_backing)
+        LOG.info(_LI("Successfully created clone: %s.") % new_backing)
         return new_backing
 
     def _reconfigure_backing(self, backing, reconfig_spec):
@@ -1107,7 +1108,7 @@ class VMwareVolumeOps(object):
         :param backing: VM to be renamed
         :param new_name: new VM name
         """
-        LOG.info(_("Renaming backing VM: %(backing)s to %(new_name)s."),
+        LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."),
                  {'backing': backing,
                   'new_name': new_name})
         rename_task = self._session.invoke_api(self._session.vim,
@@ -1116,7 +1117,7 @@ class VMwareVolumeOps(object):
                                                newName=new_name)
         LOG.debug("Task: %s created for renaming VM.", rename_task)
         self._session.wait_for_task(rename_task)
-        LOG.info(_("Backing VM: %(backing)s renamed to %(new_name)s."),
+        LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."),
                  {'backing': backing,
                   'new_name': new_name})
 
@@ -1161,7 +1162,7 @@ class VMwareVolumeOps(object):
                                         datacenter=datacenter)
         LOG.debug("Initiated deletion via task: %s." % task)
         self._session.wait_for_task(task)
-        LOG.info(_("Successfully deleted file: %s.") % file_path)
+        LOG.info(_LI("Successfully deleted file: %s.") % file_path)
 
     def get_path_name(self, backing):
         """Get path name of the backing.
@@ -1195,7 +1196,8 @@ class VMwareVolumeOps(object):
             if device.__class__.__name__ == "VirtualDisk":
                 return device
 
-        LOG.error(_("Virtual disk device of backing: %s not found."), backing)
+        LOG.error(_LE("Virtual disk device of "
+                      "backing: %s not found."), backing)
         raise error_util.VirtualDiskNotFoundException()
 
     def get_vmdk_path(self, backing):
@@ -1309,7 +1311,7 @@ class VMwareVolumeOps(object):
                                         force=True)
         LOG.debug("Initiated copying disk data via task: %s." % task)
         self._session.wait_for_task(task)
-        LOG.info(_("Successfully copied disk at: %(src)s to: %(dest)s.") %
+        LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s.") %
                  {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
 
     def delete_vmdk_file(self, vmdk_file_path, dc_ref):
@@ -1327,7 +1329,7 @@ class VMwareVolumeOps(object):
                                         datacenter=dc_ref)
         LOG.debug("Initiated deleting vmdk file via task: %s." % task)
         self._session.wait_for_task(task)
-        LOG.info(_("Deleted vmdk file: %s.") % vmdk_file_path)
+        LOG.info(_LI("Deleted vmdk file: %s.") % vmdk_file_path)
 
     def get_all_profiles(self):
         """Get all profiles defined in current VC.