]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Implementing the use of _L’x’/i18n markers
authorMike Mason <mikemason010@gmail.com>
Thu, 4 Dec 2014 09:17:57 +0000 (09:17 +0000)
committerMike Mason <mikemason010@gmail.com>
Tue, 9 Dec 2014 10:03:29 +0000 (10:03 +0000)
Placing the _Lx markers back into the code. No other cleaner solution has
has been implemented. Patches will be submitted in a series of sub
directories and in a fashion that is manageable.
eighth commit of this kind
This is the last run through to pick up the ones that were missed

Change-Id: Ifd9d647175a840939bf01fa3bcecfa6384965e3b
Closes-Bug: #1384312

63 files changed:
cinder/api/__init__.py
cinder/api/extensions.py
cinder/api/openstack/wsgi.py
cinder/backup/manager.py
cinder/brick/initiator/connector.py
cinder/brick/initiator/linuxfc.py
cinder/brick/initiator/linuxscsi.py
cinder/brick/local_dev/lvm.py
cinder/common/sqlalchemyutils.py
cinder/consistencygroup/api.py
cinder/db/sqlalchemy/api.py
cinder/keymgr/barbican.py
cinder/keymgr/conf_key_mgr.py
cinder/openstack/common/request_utils.py
cinder/scheduler/host_manager.py
cinder/tests/brick/test_brick_connector.py
cinder/tests/fake_driver.py
cinder/tests/integrated/api/client.py
cinder/tests/test_fujitsu.py
cinder/tests/zonemanager/test_brcd_fc_zone_driver.py
cinder/transfer/api.py
cinder/utils.py
cinder/volume/api.py
cinder/volume/driver.py
cinder/volume/drivers/block_device.py
cinder/volume/drivers/emc/emc_vmax_iscsi.py
cinder/volume/drivers/emc/emc_vnx_cli.py
cinder/volume/drivers/emc/xtremio.py
cinder/volume/drivers/fujitsu_eternus_dx_common.py
cinder/volume/drivers/fujitsu_eternus_dx_iscsi.py
cinder/volume/drivers/fusionio/ioControl.py
cinder/volume/drivers/hitachi/hbsd_common.py
cinder/volume/drivers/hitachi/hbsd_fc.py
cinder/volume/drivers/hitachi/hbsd_horcm.py
cinder/volume/drivers/hitachi/hbsd_snm2.py
cinder/volume/drivers/huawei/huawei_t.py
cinder/volume/drivers/huawei/rest_common.py
cinder/volume/drivers/huawei/ssh_common.py
cinder/volume/drivers/ibm/gpfs.py
cinder/volume/drivers/ibm/storwize_svc/__init__.py
cinder/volume/drivers/ibm/storwize_svc/helpers.py
cinder/volume/drivers/nfs.py
cinder/volume/drivers/nimble.py
cinder/volume/drivers/prophetstor/dplcommon.py
cinder/volume/drivers/san/hp/hp_3par_common.py
cinder/volume/drivers/san/hp/hp_3par_iscsi.py
cinder/volume/drivers/san/hp/hp_lefthand_iscsi.py
cinder/volume/drivers/san/hp/hp_lefthand_rest_proxy.py
cinder/volume/drivers/san/san.py
cinder/volume/drivers/smbfs.py
cinder/volume/drivers/vmware/api.py
cinder/volume/drivers/vmware/vmdk.py
cinder/volume/drivers/vmware/vmware_images.py
cinder/volume/drivers/windows/windows_utils.py
cinder/volume/drivers/zadara.py
cinder/volume/drivers/zfssa/restclient.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/flows/manager/manage_existing.py
cinder/volume/qos_specs.py
cinder/volume/utils.py
cinder/volume/volume_types.py
cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py
cinder/zonemanager/utils.py

index bfa5ae02b2b94647046495a20e427fa6cc6512b8..3f5d60a8667b12f127dc6a06fe28fd940bf1afb2 100644 (file)
@@ -18,7 +18,7 @@
 from oslo.config import cfg
 import paste.urlmap
 
-from cinder.i18n import _
+from cinder.i18n import _LW
 from cinder.openstack.common import log as logging
 
 
@@ -28,9 +28,9 @@ LOG = logging.getLogger(__name__)
 
 def root_app_factory(loader, global_conf, **local_conf):
     if CONF.enable_v1_api:
-        LOG.warn(_('The v1 api is deprecated and will be removed after the '
-                   'Juno release. You should set enable_v1_api=false and '
-                   'enable_v2_api=true in your cinder.conf file.'))
+        LOG.warn(_LW('The v1 api is deprecated and will be removed after the '
+                     'Juno release. You should set enable_v1_api=false and '
+                     'enable_v2_api=true in your cinder.conf file.'))
     else:
         del local_conf['/v1']
     if not CONF.enable_v2_api:
index 488f9409d1e15b46d8b72740e883ec1037b18152..cc01f8d3fc626c0c44485a2f1f20a8c311c59bc6 100644 (file)
@@ -25,7 +25,7 @@ import cinder.api.openstack
 from cinder.api.openstack import wsgi
 from cinder.api import xmlutil
 from cinder import exception
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 import cinder.policy
 
@@ -273,8 +273,8 @@ class ExtensionManager(object):
             try:
                 self.load_extension(ext_factory)
             except Exception as exc:
-                LOG.warn(_('Failed to load extension %(ext_factory)s: '
-                           '%(exc)s'),
+                LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
+                             '%(exc)s'),
                          {'ext_factory': ext_factory, 'exc': exc})
 
 
index 46762d273bb9d5d91436860d9960766336cf7741..84ca382ed41b033860e74c6d7e3888767efb57d8 100644 (file)
@@ -27,7 +27,7 @@ import webob
 
 from cinder import exception
 from cinder import i18n
-from cinder.i18n import _, _LI
+from cinder.i18n import _, _LE, _LI
 from cinder.openstack.common import log as logging
 from cinder import utils
 from cinder import wsgi
@@ -750,7 +750,7 @@ class ResourceExceptionHandler(object):
                 code=ex_value.code, explanation=ex_value.msg))
         elif isinstance(ex_value, TypeError):
             exc_info = (ex_type, ex_value, ex_traceback)
-            LOG.error(_(
+            LOG.error(_LE(
                 'Exception handling resource: %s') %
                 ex_value, exc_info=exc_info)
             raise Fault(webob.exc.HTTPBadRequest())
index 5ca7b7b39d6242c77ceb5ee80bbedb71a9b2aad2..3b0b1635b5eefa76ccf44cd0d9f21c542652dc47 100644 (file)
@@ -173,8 +173,8 @@ class BackupManager(manager.SchedulerDependentManager):
             driver.do_setup(ctxt)
             driver.check_for_setup_error()
         except Exception as ex:
-            LOG.error(_("Error encountered during initialization of driver: "
-                        "%(name)s.") %
+            LOG.error(_LE("Error encountered during initialization of driver: "
+                          "%(name)s.") %
                       {'name': driver.__class__.__name__})
             LOG.exception(ex)
             # we don't want to continue since we failed
index 6f082e0a0d2a2ef22ad0b1ba2199c8b25a1613a9..defc652327611568a2de4f4f1211babf64ac0b7f 100644 (file)
@@ -26,7 +26,7 @@ from cinder.brick.initiator import host_driver
 from cinder.brick.initiator import linuxfc
 from cinder.brick.initiator import linuxscsi
 from cinder.brick.remotefs import remotefs
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 
@@ -227,8 +227,8 @@ class ISCSIConnector(InitiatorConnector):
             if tries >= self.device_scan_attempts:
                 raise exception.VolumeDeviceNotFound(device=host_device)
 
-            LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. "
-                       "Will rescan & retry.  Try number: %(tries)s"),
+            LOG.warn(_LW("ISCSI volume not yet found at: %(host_device)s. "
+                         "Will rescan & retry.  Try number: %(tries)s"),
                      {'host_device': host_device,
                       'tries': tries})
 
@@ -634,8 +634,8 @@ class FibreChannelConnector(InitiatorConnector):
                 LOG.error(msg)
                 raise exception.NoFibreChannelVolumeDeviceFound()
 
-            LOG.warn(_("Fibre volume not yet found. "
-                       "Will rescan & retry.  Try number: %(tries)s"),
+            LOG.warn(_LW("Fibre volume not yet found. "
+                         "Will rescan & retry.  Try number: %(tries)s"),
                      {'tries': tries})
 
             self._linuxfc.rescan_hosts(hbas)
@@ -778,8 +778,8 @@ class AoEConnector(InitiatorConnector):
             if waiting_status['tries'] >= self.device_scan_attempts:
                 raise exception.VolumeDeviceNotFound(device=aoe_path)
 
-            LOG.warn(_("AoE volume not yet found at: %(path)s. "
-                       "Try number: %(tries)s"),
+            LOG.warn(_LW("AoE volume not yet found at: %(path)s. "
+                         "Try number: %(tries)s"),
                      {'path': aoe_device,
                       'tries': waiting_status['tries']})
 
@@ -860,8 +860,8 @@ class RemoteFsConnector(InitiatorConnector):
                     kwargs.get('glusterfs_mount_point_base') or\
                     mount_point_base
         else:
-            LOG.warn(_("Connection details not present."
-                       " RemoteFsClient may not initialize properly."))
+            LOG.warn(_LW("Connection details not present."
+                         " RemoteFsClient may not initialize properly."))
         self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper,
                                                        execute=execute,
                                                        *args, **kwargs)
index 710db54a02d08f2e5d80e9112de365a2f333995f..419228574ca697f08303fba3d816a53ae2c370ec 100644 (file)
@@ -19,7 +19,7 @@ import errno
 from oslo.concurrency import processutils as putils
 
 from cinder.brick.initiator import linuxscsi
-from cinder.i18n import _
+from cinder.i18n import _LW
 from cinder.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
@@ -48,13 +48,13 @@ class LinuxFibreChannel(linuxscsi.LinuxSCSI):
             # and systool is not installed
             # 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
             if exc.exit_code == 96:
-                LOG.warn(_("systool is not installed"))
+                LOG.warn(_LW("systool is not installed"))
             return []
         except OSError as exc:
             # This handles the case where rootwrap is NOT used
             # and systool is not installed
             if exc.errno == errno.ENOENT:
-                LOG.warn(_("systool is not installed"))
+                LOG.warn(_LW("systool is not installed"))
             return []
 
         # No FC HBAs were found
index 7a1951c2bad7d550f421f21c76d79eee186b71e2..8be1d37f95cb6ebc67c2de0fff3e9107e90ce0cd 100644 (file)
@@ -22,7 +22,7 @@ import re
 from oslo.concurrency import processutils as putils
 
 from cinder.brick import executor
-from cinder.i18n import _
+from cinder.i18n import _, _LW
 from cinder.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
@@ -115,7 +115,7 @@ class LinuxSCSI(executor.Executor):
             self._execute('multipath', '-f', device, run_as_root=True,
                           root_helper=self._root_helper)
         except putils.ProcessExecutionError as exc:
-            LOG.warn(_("multipath call failed exit (%(code)s)")
+            LOG.warn(_LW("multipath call failed exit (%(code)s)")
                      % {'code': exc.exit_code})
 
     def flush_multipath_devices(self):
@@ -123,7 +123,7 @@ class LinuxSCSI(executor.Executor):
             self._execute('multipath', '-F', run_as_root=True,
                           root_helper=self._root_helper)
         except putils.ProcessExecutionError as exc:
-            LOG.warn(_("multipath call failed exit (%(code)s)")
+            LOG.warn(_LW("multipath call failed exit (%(code)s)")
                      % {'code': exc.exit_code})
 
     def find_multipath_device(self, device):
@@ -140,7 +140,7 @@ class LinuxSCSI(executor.Executor):
                                         run_as_root=True,
                                         root_helper=self._root_helper)
         except putils.ProcessExecutionError as exc:
-            LOG.warn(_("multipath call failed exit (%(code)s)")
+            LOG.warn(_LW("multipath call failed exit (%(code)s)")
                      % {'code': exc.exit_code})
             return None
 
@@ -163,7 +163,7 @@ class LinuxSCSI(executor.Executor):
                     mdev_id = mdev_id.replace(')', '')
 
                 if mdev is None:
-                    LOG.warn(_("Couldn't find multipath device %(line)s")
+                    LOG.warn(_LW("Couldn't find multipath device %(line)s")
                              % {'line': line})
                     return None
 
index 8b9cc861d5a21e22fd794e5e3e2942a54d7377d5..be92b2655dc8b7d106e88128156ac462878b2c48 100644 (file)
@@ -82,7 +82,7 @@ class LVM(executor.Executor):
                 raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
 
         if self._vg_exists() is False:
-            LOG.error(_('Unable to locate Volume Group %s') % vg_name)
+            LOG.error(_LE('Unable to locate Volume Group %s') % vg_name)
             raise exception.VolumeGroupNotFound(vg_name=vg_name)
 
         # NOTE: we assume that the VG has been activated outside of Cinder
@@ -396,7 +396,7 @@ class LVM(executor.Executor):
         vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
 
         if len(vg_list) != 1:
-            LOG.error(_('Unable to find VG: %s') % self.vg_name)
+            LOG.error(_LE('Unable to find VG: %s') % self.vg_name)
             raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
 
         self.vg_size = float(vg_list[0]['size'])
@@ -448,9 +448,9 @@ class LVM(executor.Executor):
         """
 
         if not self.supports_thin_provisioning(self._root_helper):
-            LOG.error(_('Requested to setup thin provisioning, '
-                        'however current LVM version does not '
-                        'support it.'))
+            LOG.error(_LE('Requested to setup thin provisioning, '
+                          'however current LVM version does not '
+                          'support it.'))
             return None
 
         if name is None:
@@ -521,7 +521,7 @@ class LVM(executor.Executor):
         """
         source_lvref = self.get_volume(source_lv_name)
         if source_lvref is None:
-            LOG.error(_("Trying to create snapshot by non-existent LV: %s")
+            LOG.error(_LE("Trying to create snapshot by non-existent LV: %s")
                       % source_lv_name)
             raise exception.VolumeDeviceNotFound(device=source_lv_name)
         cmd = ['lvcreate', '--name', name,
index a496893377259ac7ed54a55848e5c1ddcf3eb995..b35130ada1d061716e128a27d4013194cf943c99 100644 (file)
@@ -21,7 +21,7 @@
 import sqlalchemy
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
 from cinder.openstack.common import log as logging
 
 
@@ -64,7 +64,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
     if 'id' not in sort_keys:
         # TODO(justinsb): If this ever gives a false-positive, check
         # the actual primary key, rather than assuming its id
-        LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
+        LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?'))
 
     assert(not (sort_dir and sort_dirs))
 
index fb55e74eca0d45df3af231bbffe3924e05ab854c..9a4786563e8871ba99f275041ed5f29b0c74a1b0 100644 (file)
@@ -26,7 +26,7 @@ from oslo.utils import timeutils
 
 from cinder.db import base
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.openstack.common import log as logging
 import cinder.policy
 from cinder import quota
@@ -136,8 +136,8 @@ class API(base.Base):
             group = self.db.consistencygroup_create(context, options)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error occurred when creating consistency group"
-                            " %s."), name)
+                LOG.error(_LE("Error occurred when creating consistency group"
+                              " %s."), name)
 
         request_spec_list = []
         filter_properties_list = []
@@ -199,9 +199,9 @@ class API(base.Base):
                 try:
                     self.db.consistencygroup_destroy(context, group_id)
                 finally:
-                    LOG.error(_("Error occurred when building "
-                                "request spec list for consistency group "
-                                "%s."), group_id)
+                    LOG.error(_LE("Error occurred when building "
+                                  "request spec list for consistency group "
+                                  "%s."), group_id)
 
         # Cast to the scheduler and let it handle whatever is needed
         # to select the target host for this group.
@@ -226,8 +226,8 @@ class API(base.Base):
                     self.db.consistencygroup_destroy(context.elevated(),
                                                      group_id)
                 finally:
-                    LOG.error(_("Failed to update quota for "
-                                "consistency group %s."), group_id)
+                    LOG.error(_LE("Failed to update quota for "
+                                  "consistency group %s."), group_id)
 
     @wrap_check_policy
     def delete(self, context, group, force=False):
@@ -368,8 +368,8 @@ class API(base.Base):
                 try:
                     self.db.cgsnapshot_destroy(context, cgsnapshot_id)
                 finally:
-                    LOG.error(_("Error occurred when creating cgsnapshot"
-                                " %s."), cgsnapshot_id)
+                    LOG.error(_LE("Error occurred when creating cgsnapshot"
+                                  " %s."), cgsnapshot_id)
 
         self.volume_rpcapi.create_cgsnapshot(context, group, cgsnapshot)
 
index b1dea38c03b08fee49ae07a3d85e0d4e595fc693..c53bdbb6c131b42f10f80fa45ed9a9d72f53c963 100644 (file)
@@ -210,8 +210,8 @@ def _retry_on_deadlock(f):
             try:
                 return f(*args, **kwargs)
             except db_exc.DBDeadlock:
-                LOG.warn(_("Deadlock detected when running "
-                           "'%(func_name)s': Retrying..."),
+                LOG.warn(_LW("Deadlock detected when running "
+                             "'%(func_name)s': Retrying..."),
                          dict(func_name=f.__name__))
                 # Retry!
                 time.sleep(0.5)
index d32e4d8327c1a874b5c13a1dc6332ada368c0d76..76d1128b6c91fcdf0c131577075347a929ee2e2e 100644 (file)
@@ -28,7 +28,7 @@ from oslo.config import cfg
 from oslo.utils import excutils
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.keymgr import key as keymgr_key
 from cinder.keymgr import key_mgr
 from cinder.openstack.common import log as logging
@@ -73,7 +73,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
                     endpoint=self._barbican_endpoint)
             except Exception as e:
                 with excutils.save_and_reraise_exception():
-                    LOG.error(_("Error creating Barbican client: %s"), (e))
+                    LOG.error(_LE("Error creating Barbican client: %s"), (e))
 
         return self._barbican_client
 
@@ -110,7 +110,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             return secret_uuid
         except Exception as e:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error creating key: %s"), (e))
+                LOG.error(_LE("Error creating key: %s"), (e))
 
     def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key',
                   payload_content_type='application/octet-stream',
@@ -165,7 +165,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             return secret_uuid
         except Exception as e:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error storing key: %s"), (e))
+                LOG.error(_LE("Error storing key: %s"), (e))
 
     def copy_key(self, ctxt, key_id):
         """Copies (i.e., clones) a key stored by barbican.
@@ -193,7 +193,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             return copy_uuid
         except Exception as e:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error copying key: %s"), (e))
+                LOG.error(_LE("Error copying key: %s"), (e))
 
     def _create_secret_ref(self, key_id, barbican_client):
         """Creates the URL required for accessing a secret.
@@ -230,7 +230,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             return secret_data
         except Exception as e:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error getting secret data: %s"), (e))
+                LOG.error(_LE("Error getting secret data: %s"), (e))
 
     def _get_secret(self, ctxt, secret_ref):
         """Creates the URL required for accessing a secret's metadata.
@@ -249,7 +249,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             return barbican_client.secrets.get(secret_ref)
         except Exception as e:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error getting secret metadata: %s"), (e))
+                LOG.error(_LE("Error getting secret metadata: %s"), (e))
 
     def get_key(self, ctxt, key_id,
                 payload_content_type='application/octet-stream'):
@@ -278,7 +278,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             return key
         except Exception as e:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error getting key: %s"), (e))
+                LOG.error(_LE("Error getting key: %s"), (e))
 
     def delete_key(self, ctxt, key_id):
         """Deletes the specified key.
@@ -295,4 +295,4 @@ class BarbicanKeyManager(key_mgr.KeyManager):
             barbican_client.secrets.delete(secret_ref)
         except Exception as e:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error deleting key: %s"), (e))
+                LOG.error(_LE("Error deleting key: %s"), (e))
index e6ee56d1cda65278231d36679f5aeea81f95bf49..4b7d5660fa5325e58ba3e7e745cf5bb46d17680a 100644 (file)
@@ -36,7 +36,7 @@ import array
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
 from cinder.keymgr import key
 from cinder.keymgr import key_mgr
 from cinder.openstack.common import log as logging
@@ -75,8 +75,8 @@ class ConfKeyManager(key_mgr.KeyManager):
 
     def _generate_hex_key(self, **kwargs):
         if CONF.keymgr.fixed_key is None:
-            LOG.warn(_('config option keymgr.fixed_key has not been defined: '
-                       'some operations may fail unexpectedly'))
+            LOG.warn(_LW('config option keymgr.fixed_key has not been defined:'
+                         ' some operations may fail unexpectedly'))
             raise ValueError(_('keymgr.fixed_key not defined'))
         return CONF.keymgr.fixed_key
 
@@ -131,4 +131,4 @@ class ConfKeyManager(key_mgr.KeyManager):
             raise exception.KeyManagerError(
                 reason="cannot delete non-existent key")
 
-        LOG.warn(_("Not deleting key %s"), key_id)
+        LOG.warn(_LW("Not deleting key %s"), key_id)
index 105a628ff68818f266b188eec9aab8c333cb3d1c..1ba9f8402789c470eb4305fe15dac5788df3a202 100644 (file)
@@ -20,7 +20,7 @@ Utilities for linking request ID's across service calls.
 import logging
 
 from openstack.common.gettextutils import _  # noqa
-
+from cinder.i18n import _LI
 
 LOG = logging.getLogger(__name__)
 
@@ -73,12 +73,12 @@ def link_request_ids(context, source_id, target_id=None, stage=None,
     if target_name or target_id:
         arrow = " -> "
 
-    LOG.info(_("Request ID Link: %(event_name)s '%(source_id)s'%(arrow)s"
-               "%(target_name)s%(target_id)s") % {"event_name": event_name,
-                                                  "source_id": source_id,
-                                                  "target_name": rtarget_name,
-                                                  "arrow": arrow,
-                                                  "target_id": rtarget_id})
+    LOG.info(_LI("Request ID Link: %(event_name)s '%(source_id)s'%(arrow)s"
+                 "%(target_name)s%(target_id)s") % {"event_name": event_name,
+                                                    "source_id": source_id,
+                                                    "target_name": rtarget_name,
+                                                    "arrow": arrow,
+                                                    "target_id": rtarget_id})
 
     if notifier:
         payload = {"source_request_id": source_id,
index ace38eb043df6e2f3569a25423dc5e5c74d2f9f6..89ba56b481c0590eef48f69e6ea41fccde955b2c 100644 (file)
@@ -24,7 +24,7 @@ from oslo.utils import timeutils
 
 from cinder import db
 from cinder import exception
-from cinder.i18n import _, _LI
+from cinder.i18n import _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common.scheduler import filters
 from cinder.openstack.common.scheduler import weights
@@ -452,7 +452,7 @@ class HostManager(object):
         for service in volume_services:
             host = service['host']
             if not utils.service_is_up(service):
-                LOG.warn(_("volume service is down. (host: %s)") % host)
+                LOG.warn(_LW("volume service is down. (host: %s)") % host)
                 continue
             capabilities = self.service_states.get(host, None)
             host_state = self.host_state_map.get(host)
index 5662eb028ebf1fe5b7697fc8b189a56abbf25778..2fb11db0d5bc21e25343f33b547072b1504fa032 100644 (file)
@@ -21,7 +21,7 @@ from oslo.concurrency import processutils as putils
 from cinder.brick import exception
 from cinder.brick.initiator import connector
 from cinder.brick.initiator import host_driver
-from cinder.i18n import _
+from cinder.i18n import _LE
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder import test
@@ -490,7 +490,7 @@ class FakeFixedIntervalLoopingCall(object):
             except loopingcall.LoopingCallDone:
                 return self
             except Exception:
-                LOG.exception(_('in fixed duration looping call'))
+                LOG.exception(_LE('in fixed duration looping call'))
                 raise
 
 
index aaa961798d0bed84f7634fafad475f1093d0555f..68c8afa5d248a2f7cd296ee9ec73ddf06bfed48d 100644 (file)
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from cinder.i18n import _
+from cinder.i18n import _LE
 from cinder.openstack.common import log as logging
 from cinder.tests.brick.fake_lvm import FakeBrickLVM
 from cinder.volume import driver
@@ -132,7 +132,7 @@ class LoggingVolumeDriver(driver.VolumeDriver):
         self.log_action('clear_volume', volume)
 
     def local_path(self, volume):
-        LOG.error(_("local_path not implemented"))
+        LOG.error(_LE("local_path not implemented"))
         raise NotImplementedError()
 
     def ensure_export(self, context, volume):
index 795b933d28b908e96c03b5fa6e8199fe75cf2309..d22f78aa142d9bb905080cbca21f6e1c27e62acf 100644 (file)
@@ -17,7 +17,7 @@ from oslo.serialization import jsonutils
 import requests
 import six.moves.urllib.parse as urlparse
 
-from cinder.i18n import _
+from cinder.i18n import _, _LI
 from cinder.openstack.common import log as logging
 
 
@@ -94,10 +94,10 @@ class TestOpenStackClient(object):
         relative_url = parsed_url.path
         if parsed_url.query:
             relative_url = relative_url + "?" + parsed_url.query
-        LOG.info(_("Doing %(method)s on %(relative_url)s"),
+        LOG.info(_LI("Doing %(method)s on %(relative_url)s"),
                  {'method': method, 'relative_url': relative_url})
         if body:
-            LOG.info(_("Body: %s") % body)
+            LOG.info(_LI("Body: %s") % body)
 
         if port:
             _url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url)
index ed8eabac90c87fa506e67f95dc5a4883adfc2e52..9b22fb96686e30b1b86e750c53ecb322257e1cd6 100644 (file)
@@ -20,7 +20,7 @@ import tempfile
 import mock
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LW
 from cinder.openstack.common import log as logging
 from cinder import test
 from cinder.volume.drivers.fujitsu_eternus_dx_common import FJDXCommon
@@ -173,7 +173,7 @@ class FakeEcomConnection():
             rc = 0L
             job = {}
         else:
-            LOG.warn(_('method is not exist '))
+            LOG.warn(_LW('method is not exist '))
             raise exception.VolumeBackendAPIException(data="invoke method")
         LOG.debug('exit InvokeMethod:MAP_STAT: %s  VOL_STAT: %s'
                   '  Method: %s  rc: %d  job: %s' %
index ab5133ef52cb0f99c1d61988e7a885cbb08ae4a4..5845c853882424a411a44fd3a396cbfa65a65d35 100644 (file)
@@ -25,7 +25,7 @@ from oslo.utils import importutils
 import paramiko
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LI
 from cinder.openstack.common import log as logging
 from cinder import test
 from cinder.volume import configuration as conf
@@ -124,10 +124,10 @@ class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
         """Normal flow for i-t mode."""
         GlobalVars._is_normal_test = True
         GlobalVars._zone_state = []
-        LOG.info(_("In Add GlobalVars._is_normal_test: "
-                   "%s"), GlobalVars._is_normal_test)
-        LOG.info(_("In Add GlobalVars._zone_state:"
-                   " %s"), GlobalVars._zone_state)
+        LOG.info(_LI("In Add GlobalVars._is_normal_test: "
+                     "%s"), GlobalVars._is_normal_test)
+        LOG.info(_LI("In Add GlobalVars._zone_state:"
+                     " %s"), GlobalVars._zone_state)
         get_active_zs_mock.return_value = _active_cfg_before_add
         self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
         self.assertTrue(_zone_name in GlobalVars._zone_state)
@@ -181,8 +181,8 @@ class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
 
 class FakeBrcdFCZoneClientCLI(object):
     def __init__(self, ipaddress, username, password, port):
-        LOG.info(_("User: %s"), username)
-        LOG.info(_("_zone_state: %s"), GlobalVars._zone_state)
+        LOG.info(_LI("User: %s"), username)
+        LOG.info(_LI("_zone_state: %s"), GlobalVars._zone_state)
         self.firmware_supported = True
         if not GlobalVars._is_normal_test:
             raise paramiko.SSHException("Unable to connect to fabric")
index f6ff3de97f61f4ff0c5262458545c27c195fcdf2..46a706b4b5e9958131b582096b80427f33eefc3e 100644 (file)
@@ -121,7 +121,8 @@ class API(base.Base):
         try:
             transfer = self.db.transfer_create(context, transfer_rec)
         except Exception:
-            LOG.error(_("Failed to create transfer record for %s") % volume_id)
+            LOG.error(_LE("Failed to create transfer record "
+                          "for %s") % volume_id)
             raise
         return {'id': transfer['id'],
                 'volume_id': transfer['volume_id'],
index 5cf97ff8c04c969bccec43347e912b19afe2d502..4ab6b8a1f91975572f069a93cdc7be1d7a154e68 100644 (file)
@@ -44,7 +44,7 @@ import six
 
 from cinder.brick.initiator import connector
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.openstack.common import log as logging
 
 
@@ -614,7 +614,7 @@ def require_driver_initialized(driver):
     # we can't do anything if the driver didn't init
     if not driver.initialized:
         driver_name = driver.__class__.__name__
-        LOG.error(_("Volume driver %s not initialized") % driver_name)
+        LOG.error(_LE("Volume driver %s not initialized") % driver_name)
         raise exception.DriverNotInitialized()
 
 
index 237e07c3433b3aa900d3ac37a52900300bcf2639..f1929957f39ff27f5c146388201abad5a6c54776 100644 (file)
@@ -1236,7 +1236,7 @@ class API(base.Base):
                     elevated, svc_host, CONF.volume_topic)
             except exception.ServiceNotFound:
                 with excutils.save_and_reraise_exception():
-                    LOG.error(_('Unable to find service for given host.'))
+                    LOG.error(_LE('Unable to find service for given host.'))
             availability_zone = service.get('availability_zone')
 
         volume_type_id = volume_type['id'] if volume_type else None
index db7d39d185c91c3e8678e799d52d0be667ecd2b5..48611512f42c07ae2672b93b85b02bc992b92705 100755 (executable)
@@ -24,7 +24,7 @@ from oslo.config import cfg
 from oslo.utils import excutils
 
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import log as logging
@@ -889,7 +889,8 @@ class ISCSIDriver(VolumeDriver):
     def _do_iscsi_discovery(self, volume):
         # TODO(justinsb): Deprecate discovery and use stored info
         # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
-        LOG.warn(_("ISCSI provider_location not stored, using discovery"))
+        LOG.warn(_LW("ISCSI provider_location not "
+                     "stored, using discovery"))
 
         volume_name = volume['name']
 
@@ -902,7 +903,7 @@ class ISCSIDriver(VolumeDriver):
                                         volume['host'].split('@')[0],
                                         run_as_root=True)
         except processutils.ProcessExecutionError as ex:
-            LOG.error(_("ISCSI discovery attempt failed for:%s") %
+            LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
                       volume['host'].split('@')[0])
             LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr)
             return None
index 95f5b4fc9e7636babc5d15a7d301728d8d447f38..e2e741f341e9021df6befe82031944552cf80e3c 100644 (file)
@@ -20,7 +20,7 @@ from oslo.config import cfg
 from cinder import context
 from cinder.db.sqlalchemy import api
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
@@ -139,7 +139,7 @@ class BlockDeviceDriver(driver.ISCSIDriver):
                                   self.local_path(volume))
 
     def create_cloned_volume(self, volume, src_vref):
-        LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
+        LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
         device = self.find_appropriate_size_device(src_vref['size'])
         volutils.copy_volume(
             self.local_path(src_vref), device,
index 8a8ab6762d8d706fa536b1f741ccc42ecdfc5b66..a2159dea9746dc517715405ae315cfc4bcff1988 100644 (file)
@@ -20,7 +20,7 @@ import six
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 from cinder.volume.drivers.emc import emc_vmax_common
@@ -145,7 +145,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
         iscsi_properties = self.smis_get_iscsi_properties(
             volume, connector)
 
-        LOG.info(_("Leaving initialize_connection: %s") % (iscsi_properties))
+        LOG.info(_LI("Leaving initialize_connection: %s") % (iscsi_properties))
         return {
             'driver_volume_type': 'iscsi',
             'data': iscsi_properties
@@ -153,14 +153,14 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
 
     def smis_do_iscsi_discovery(self, volume):
 
-        LOG.info(_("ISCSI provider_location not stored, using discovery."))
+        LOG.info(_LI("ISCSI provider_location not stored, using discovery."))
 
         (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
                                     '-t', 'sendtargets', '-p',
                                     self.configuration.iscsi_ip_address,
                                     run_as_root=True)
 
-        LOG.info(_(
+        LOG.info(_LI(
             "smis_do_iscsi_discovery is: %(out)s")
             % {'out': out})
         targets = []
@@ -206,7 +206,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
 
         device_number = device_info['hostlunid']
 
-        LOG.info(_(
+        LOG.info(_LI(
             "location is: %(location)s") % {'location': location})
 
         for loc in location:
@@ -218,14 +218,14 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
 
         properties['volume_id'] = volume['id']
 
-        LOG.info(_("ISCSI properties: %(properties)s")
+        LOG.info(_LI("ISCSI properties: %(properties)s")
                  % {'properties': properties})
-        LOG.info(_("ISCSI volume is: %(volume)s")
+        LOG.info(_LI("ISCSI volume is: %(volume)s")
                  % {'volume': volume})
 
         if 'provider_auth' in volume:
             auth = volume['provider_auth']
-            LOG.info(_("AUTH properties: %(authProps)s")
+            LOG.info(_LI("AUTH properties: %(authProps)s")
                      % {'authProps': auth})
 
             if auth is not None:
@@ -235,7 +235,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
                 properties['auth_username'] = auth_username
                 properties['auth_password'] = auth_secret
 
-                LOG.info(_("AUTH properties: %s") % (properties))
+                LOG.info(_LI("AUTH properties: %s") % (properties))
 
         return properties
 
index 9cd7bd92760baa9ee29136404ce1d1b23456ce8d..18a70c6d843b968f59556b4b22dd3698eb9eb0ba 100644 (file)
@@ -2368,7 +2368,8 @@ class EMCVnxCliBase(object):
                 properties['auth_username'] = auth_username
                 properties['auth_password'] = auth_secret
         else:
-            LOG.error(_('Failed to find an available iSCSI targets for %s.'),
+            LOG.error(_LE('Failed to find an available '
+                          'iSCSI targets for %s.'),
                       storage_group)
 
         return properties
index 8888b6393dcb1f6a52b45d7c700da45f5ebe46a0..d6bdf5d76f4051c1d5ed5d00d1c07f3028c69b5d 100644 (file)
@@ -125,8 +125,8 @@ class XtremIOVolumeDriver(san.SanDriver):
             try:
                 return json.loads(str_result)
             except Exception:
-                LOG.exception(_('querying %(typ)s, %(req)s failed to '
-                                'parse result, return value = %(res)s'),
+                LOG.exception(_LE('querying %(typ)s, %(req)s failed to '
+                                  'parse result, return value = %(res)s'),
                               {'typ': object_type,
                                'req': request_typ,
                                'res': str_result})
index 795b944ef87abb44518c1a5febf032898b96286e..caa329f6b233d0161f3581b13a094cc3d72cdb54 100644 (file)
@@ -30,7 +30,7 @@ from oslo.utils import units
 import six
 
 from cinder import exception
-from cinder.i18n import _, _LE, _LW
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.volume import volume_types
@@ -157,7 +157,7 @@ class FJDXCommon(object):
         volumesize = int(volume['size']) * units.Gi
         volumename = self._create_volume_name(volume['id'])
 
-        LOG.info(_('Create Volume: %(volume)s  Size: %(size)lu')
+        LOG.info(_LI('Create Volume: %(volume)s  Size: %(size)lu')
                  % {'volume': volumename,
                     'size': volumesize})
 
@@ -287,8 +287,8 @@ class FJDXCommon(object):
         volumename = self._create_volume_name(volume['id'])
         vol_instance = None
 
-        LOG.info(_('Create Volume from Snapshot: Volume: %(volumename)s  '
-                   'Snapshot: %(snapshotname)s')
+        LOG.info(_LI('Create Volume from Snapshot: Volume: %(volumename)s  '
+                     'Snapshot: %(snapshotname)s')
                  % {'volumename': volumename,
                     'snapshotname': snapshotname})
 
@@ -396,8 +396,8 @@ class FJDXCommon(object):
         srcname = self._create_volume_name(src_vref['id'])
         volumename = self._create_volume_name(volume['id'])
 
-        LOG.info(_('Create a Clone from Volume: Volume: %(volumename)s  '
-                   'Source Volume: %(srcname)s')
+        LOG.info(_LI('Create a Clone from Volume: Volume: %(volumename)s  '
+                     'Source Volume: %(srcname)s')
                  % {'volumename': volumename,
                     'srcname': srcname})
 
@@ -500,7 +500,7 @@ class FJDXCommon(object):
         """Deletes an volume."""
         LOG.debug('Entering delete_volume.')
         volumename = self._create_volume_name(volume['id'])
-        LOG.info(_('Delete Volume: %(volume)s')
+        LOG.info(_LI('Delete Volume: %(volume)s')
                  % {'volume': volumename})
 
         self.conn = self._get_ecom_connection()
@@ -574,7 +574,7 @@ class FJDXCommon(object):
 
         snapshotname = self._create_volume_name(snapshot['id'])
         volumename = snapshot['volume_name']
-        LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s')
+        LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s')
                  % {'snapshot': snapshotname,
                     'volume': volumename})
 
@@ -702,7 +702,7 @@ class FJDXCommon(object):
 
         snapshotname = snapshot['name']
         volumename = snapshot['volume_name']
-        LOG.info(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s')
+        LOG.info(_LI('Delete Snapshot: %(snapshot)s: volume: %(volume)s')
                  % {'snapshot': snapshotname,
                     'volume': volumename})
 
@@ -783,8 +783,8 @@ class FJDXCommon(object):
                 sync_name, storage_system =\
                     self._find_storage_sync_sv_sv(snapshot, volume, False)
                 if sync_name is None:
-                    LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
-                               'Snapshot is deleted.')
+                    LOG.info(_LI('Snapshot: %(snapshot)s: volume: %(volume)s. '
+                                 'Snapshot is deleted.')
                              % {'snapshot': snapshotname,
                                 'volume': volumename})
                     raise loopingcall.LoopingCallDone()
@@ -797,8 +797,8 @@ class FJDXCommon(object):
             except Exception as ex:
                 if ex.args[0] == 6:
                     # 6 means object not found, so snapshot is deleted cleanly
-                    LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
-                               'Snapshot is deleted.')
+                    LOG.info(_LI('Snapshot: %(snapshot)s: volume: %(volume)s. '
+                                 'Snapshot is deleted.')
                              % {'snapshot': snapshotname,
                                 'volume': volumename})
                 else:
@@ -931,7 +931,7 @@ class FJDXCommon(object):
     def _map_lun(self, volume, connector):
         """Maps a volume to the host."""
         volumename = self._create_volume_name(volume['id'])
-        LOG.info(_('Map volume: %(volume)s')
+        LOG.info(_LI('Map volume: %(volume)s')
                  % {'volume': volumename})
 
         vol_instance = self._find_lun(volume)
@@ -950,13 +950,13 @@ class FJDXCommon(object):
     def _unmap_lun(self, volume, connector):
         """Unmaps a volume from the host."""
         volumename = self._create_volume_name(volume['id'])
-        LOG.info(_('Unmap volume: %(volume)s')
+        LOG.info(_LI('Unmap volume: %(volume)s')
                  % {'volume': volumename})
 
         device_info = self.find_device_number(volume, connector)
         device_number = device_info['hostlunid']
         if device_number is None:
-            LOG.info(_("Volume %s is not mapped. No volume to unmap.")
+            LOG.info(_LI("Volume %s is not mapped. No volume to unmap.")
                      % (volumename))
             return
 
@@ -975,13 +975,13 @@ class FJDXCommon(object):
     def initialize_connection(self, volume, connector):
         """Initializes the connection and returns connection info."""
         volumename = self._create_volume_name(volume['id'])
-        LOG.info(_('Initialize connection: %(volume)s')
+        LOG.info(_LI('Initialize connection: %(volume)s')
                  % {'volume': volumename})
         self.conn = self._get_ecom_connection()
         device_info = self.find_device_number(volume, connector)
         device_number = device_info['hostlunid']
         if device_number is not None:
-            LOG.info(_("Volume %s is already mapped.")
+            LOG.info(_LI("Volume %s is already mapped.")
                      % (volumename))
         else:
             self._map_lun(volume, connector)
@@ -993,7 +993,7 @@ class FJDXCommon(object):
     def terminate_connection(self, volume, connector):
         """Disallow connection from connector."""
         volumename = self._create_volume_name(volume['id'])
-        LOG.info(_('Terminate connection: %(volume)s')
+        LOG.info(_LI('Terminate connection: %(volume)s')
                  % {'volume': volumename})
         self.conn = self._get_ecom_connection()
         self._unmap_lun(volume, connector)
@@ -1010,7 +1010,7 @@ class FJDXCommon(object):
         volumesize = int(new_size) * units.Gi
         volumename = self._create_volume_name(volume['id'])
 
-        LOG.info(_('Extend Volume: %(volume)s  New size: %(size)lu')
+        LOG.info(_LI('Extend Volume: %(volume)s  New size: %(size)lu')
                  % {'volume': volumename,
                     'size': volumesize})
 
@@ -1353,8 +1353,9 @@ class FJDXCommon(object):
         snapshot_instance = self._find_lun(snapshot)
         volume_instance = self._find_lun(volume)
         if snapshot_instance is None or volume_instance is None:
-            LOG.info(_('Snapshot Volume %(snapshotname)s, '
-                       'Source Volume %(volumename)s not found on the array.')
+            LOG.info(_LI('Snapshot Volume %(snapshotname)s, '
+                         'Source Volume %(volumename)s not '
+                         'found on the array.')
                      % {'snapshotname': snapshotname,
                         'volumename': volumename})
             return None, None
@@ -1415,8 +1416,8 @@ class FJDXCommon(object):
             if self._is_job_finished(conn, job):
                 raise loopingcall.LoopingCallDone()
             if self.retries > JOB_RETRIES:
-                LOG.error(_("_wait_for_job_complete failed after %(retries)d "
-                          "tries") % {'retries': self.retries})
+                LOG.error(_LE("_wait_for_job_complete failed after %(retries)d"
+                          " tries") % {'retries': self.retries})
                 raise loopingcall.LoopingCallDone()
             try:
                 self.retries += 1
@@ -1424,7 +1425,7 @@ class FJDXCommon(object):
                     if self._is_job_finished(conn, job):
                         self.wait_for_job_called = True
             except Exception as e:
-                LOG.error(_("Exception: %s") % six.text_type(e))
+                LOG.error(_LE("Exception: %s") % six.text_type(e))
                 exceptionMessage = (_("Issue encountered waiting for job."))
                 LOG.error(exceptionMessage)
                 raise exception.VolumeBackendAPIException(exceptionMessage)
@@ -1479,7 +1480,7 @@ class FJDXCommon(object):
             if self._is_sync_complete(conn, syncName):
                 raise loopingcall.LoopingCallDone()
             if self.retries > JOB_RETRIES:
-                LOG.error(_("_wait_for_sync failed after %(retries)d tries")
+                LOG.error(_LE("_wait_for_sync failed after %(retries)d tries")
                           % {'retries': self.retries})
                 raise loopingcall.LoopingCallDone()
             try:
@@ -1488,7 +1489,7 @@ class FJDXCommon(object):
                     if self._is_sync_complete(conn, syncName):
                         self.wait_for_sync_called = True
             except Exception as e:
-                LOG.error(_("Exception: %s") % six.text_type(e))
+                LOG.error(_LE("Exception: %s") % six.text_type(e))
                 exceptionMessage = (_("Issue encountered waiting for "
                                       "synchronization."))
                 LOG.error(exceptionMessage)
@@ -1668,8 +1669,8 @@ class FJDXCommon(object):
                     break
 
         if out_num_device_number is None:
-            LOG.info(_("Device number not found for volume "
-                       "%(volumename)s %(vol_instance)s.")
+            LOG.info(_LI("Device number not found for volume "
+                         "%(volumename)s %(vol_instance)s.")
                      % {'volumename': volumename,
                         'vol_instance': vol_instance.path})
         else:
index ab41f6eda75e932ea2fab73bd502deb5206b8ccf..afd05182d2b5225e5a7c69db602f1810c0b57367 100644 (file)
@@ -22,7 +22,7 @@ import six
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 from cinder.volume.drivers import fujitsu_eternus_dx_common
@@ -147,7 +147,7 @@ class FJDXISCSIDriver(driver.ISCSIDriver):
 
     def _do_iscsi_discovery(self, volume):
 
-        LOG.warn(_("ISCSI provider_location not stored, using discovery"))
+        LOG.warn(_LW("ISCSI provider_location not stored, using discovery"))
 
         (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
                                     '-t', 'sendtargets', '-p',
index e3b09856bacc25dc5ae1b8583668a4ca99dc443f..ce7985c262d8578766e401bf1243961ea05ee859 100644 (file)
@@ -28,7 +28,7 @@ from oslo.utils import units
 import requests
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.volume.drivers.san.san import SanISCSIDriver
@@ -251,8 +251,8 @@ class FIOioControlDriver(SanISCSIDriver):
                    if i.key == 'fio-qos' and i.value in valid_presets]
         if len(presets) > 0:
             if len(presets) > 1:
-                LOG.warning(_('More than one valid preset was '
-                              'detected, using %s') % presets[0])
+                LOG.warning(_LW('More than one valid preset was '
+                                'detected, using %s') % presets[0])
             return self.fio_qos_dict[presets[0]]
 
     def _set_qos_by_volume_type(self, type_id):
index 4b8af246f64c2a0b9b74a43f5754997828669a81..bc6b1e42cab15ece81583166289947ade3d61100 100644 (file)
@@ -27,7 +27,7 @@ import six
 from cinder.db.sqlalchemy import api
 from cinder.db.sqlalchemy import models
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder import utils
 from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
@@ -389,14 +389,14 @@ class HBSDCommon(object):
                     try:
                         self.command.restart_pair_horcm()
                     except Exception as e:
-                        LOG.warning(_('Failed to restart horcm: %s') %
+                        LOG.warning(_LW('Failed to restart horcm: %s') %
                                     six.text_type(e))
         else:
             if (all_split or is_vvol) and restart:
                 try:
                     self.command.restart_pair_horcm()
                 except Exception as e:
-                    LOG.warning(_('Failed to restart horcm: %s') %
+                    LOG.warning(_LW('Failed to restart horcm: %s') %
                                 six.text_type(e))
 
     def copy_async_data(self, pvol, svol, is_vvol):
index 8fb2269b96a12f05e293fdafbf29f39b6d50b2a1..aab7165ca78e46a908f8c9e3892a637c5db00be0 100644 (file)
@@ -25,7 +25,7 @@ from oslo.utils import excutils
 import six
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LW
 from cinder.openstack.common import log as logging
 from cinder import utils
 import cinder.volume.driver
@@ -181,7 +181,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
                     try:
                         self._fill_group(hgs, port, host_grp_name, wwns_copy)
                     except Exception as ex:
-                        LOG.warning(_('Failed to add host group: %s') %
+                        LOG.warning(_LW('Failed to add host group: %s') %
                                     six.text_type(ex))
                         msg = basic_lib.set_msg(
                             308, port=port, name=host_grp_name)
index 108735a81bb43fac809f170cfdcea54003df0900..a7b2272d71f099d5c985393fe9400f3312d86bd9 100644 (file)
@@ -26,7 +26,7 @@ from oslo.utils import excutils
 import six
 
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder import utils
@@ -894,7 +894,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
         try:
             self.comm_modify_ldev(ldev)
         except Exception as e:
-            LOG.warning(_('Failed to discard zero page: %s') %
+            LOG.warning(_LW('Failed to discard zero page: %s') %
                         six.text_type(e))
 
     @storage_synchronized
@@ -1393,7 +1393,7 @@ HORCM_CMD
                                            [basic_lib.PSUS], timeout,
                                            interval, check_svol=True)
                         except Exception as ex:
-                            LOG.warning(_('Failed to create pair: %s') %
+                            LOG.warning(_LW('Failed to create pair: %s') %
                                         six.text_type(ex))
 
                         try:
@@ -1403,7 +1403,7 @@ HORCM_CMD
                                 [basic_lib.SMPL], timeout,
                                 self.conf.hitachi_async_copy_check_interval)
                         except Exception as ex:
-                            LOG.warning(_('Failed to create pair: %s') %
+                            LOG.warning(_LW('Failed to create pair: %s') %
                                         six.text_type(ex))
 
                     if self.is_smpl(copy_group, ldev_name):
@@ -1411,14 +1411,14 @@ HORCM_CMD
                             self.delete_pair_config(pvol, svol, copy_group,
                                                     ldev_name)
                         except Exception as ex:
-                            LOG.warning(_('Failed to create pair: %s') %
+                            LOG.warning(_LW('Failed to create pair: %s') %
                                         six.text_type(ex))
 
                     if restart:
                         try:
                             self.restart_pair_horcm()
                         except Exception as ex:
-                            LOG.warning(_('Failed to restart horcm: %s') %
+                            LOG.warning(_LW('Failed to restart horcm: %s') %
                                         six.text_type(ex))
 
         else:
@@ -1437,7 +1437,7 @@ HORCM_CMD
                             pvol, svol, [basic_lib.SMPL], timeout,
                             self.conf.hitachi_async_copy_check_interval)
                     except Exception as ex:
-                        LOG.warning(_('Failed to create pair: %s') %
+                        LOG.warning(_LW('Failed to create pair: %s') %
                                     six.text_type(ex))
 
     def delete_pair(self, pvol, svol, is_vvol):
index 8359704dd7e7559828d95d10db2312eaea2d4607..c72506ed87ed47bfe278e5f2ed0a5d56fa00dd5f 100644 (file)
@@ -21,7 +21,7 @@ import time
 import six
 
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder import utils
@@ -126,8 +126,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
                     used_list.append(int(line[2]))
                 if int(line[3]) == ldev:
                     hlu = int(line[2])
-                    LOG.warning(_('ldev(%(ldev)d) is already mapped '
-                                  '(hlun: %(hlu)d)')
+                    LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
+                                    '(hlun: %(hlu)d)')
                                 % {'ldev': ldev, 'hlu': hlu})
                     return hlu
         return None
index fdf8918c0ed905e86d7dafd2a7b3daa131efc7ec..2964d30b83c278b8e892c170c2d5d02a79b4eca6 100644 (file)
@@ -21,7 +21,7 @@ import re
 import time
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 from cinder.volume.drivers.huawei import huawei_utils
@@ -350,8 +350,8 @@ class HuaweiTISCSIDriver(driver.ISCSIDriver):
                     port_num -= 1
                     break
         else:
-            LOG.warn(_('_remove_iscsi_port: iSCSI port was not found '
-                       'on host %(hostid)s.') % {'hostid': hostid})
+            LOG.warn(_LW('_remove_iscsi_port: iSCSI port was not found '
+                         'on host %(hostid)s.') % {'hostid': hostid})
 
         # Delete host if no initiator added to it.
         if port_num == 0:
@@ -579,8 +579,8 @@ class HuaweiTFCDriver(driver.FibreChannelDriver):
                     self.common._delete_hostport(port[0])
                     port_num -= 1
         else:
-            LOG.warn(_('_remove_fc_ports: FC port was not found '
-                       'on host %(hostid)s.') % {'hostid': hostid})
+            LOG.warn(_LW('_remove_fc_ports: FC port was not found '
+                         'on host %(hostid)s.') % {'hostid': hostid})
 
         if port_num == 0:
             self.common._delete_host(hostid)
index 93a8a7a74a3b959e694a4c6d0323a52a4dca6db8..bb8cbfb7fddccc403f69d9089fd08934a4f32dcf 100644 (file)
@@ -28,7 +28,7 @@ from oslo.utils import units
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder import utils
 from cinder.volume.drivers.huawei import huawei_utils
@@ -206,7 +206,7 @@ class HVSCommon():
             if policy_id:
                 self._update_qos_policy_lunlist(lun_list, policy_id)
             else:
-                LOG.warn(_("Can't find the Qos policy in array"))
+                LOG.warn(_LW("Can't find the Qos policy in array"))
 
         # Create lun group and add LUN into to lun group
         lungroup_id = self._create_lungroup(volume_name)
@@ -244,7 +244,7 @@ class HVSCommon():
             self._delete_lungroup(lungroup_id)
             self._delete_lun(lun_id)
         else:
-            LOG.warn(_("Can't find lun or lun group in array"))
+            LOG.warn(_LW("Can't find lun or lun group in array"))
 
     def _delete_lun_from_qos_policy(self, volume, lun_id):
         """Remove lun from qos policy."""
@@ -1155,10 +1155,11 @@ class HVSCommon():
                     params[key] = value.strip()
                 else:
                     conf = self.configuration.cinder_huawei_conf_file
-                    LOG.warn(_('_parse_volume_type: Unacceptable parameter '
-                               '%(key)s. Please check this key in extra_specs '
-                               'and make it consistent with the configuration '
-                               'file %(conf)s.') % {'key': key, 'conf': conf})
+                    LOG.warn(_LW('_parse_volume_type: Unacceptable parameter '
+                                 '%(key)s. Please check this key in '
+                                 'extra_specs and make it consistent with the '
+                                 'configuration file '
+                                 '%(conf)s.') % {'key': key, 'conf': conf})
 
         LOG.debug("The config parameters are: %s" % params)
         return params
@@ -1223,7 +1224,7 @@ class HVSCommon():
             try:
                 tree.write(filename, 'UTF-8')
             except Exception as err:
-                LOG.warn(_('%s') % err)
+                LOG.warn(_LW('%s') % err)
 
         return logininfo
 
@@ -1298,4 +1299,4 @@ class HVSCommon():
             result = self.call(url, data, "PUT")
             self._assert_rest_result(result, 'Extend lun error.')
         else:
-            LOG.warn(_('Can not find lun in array'))
+            LOG.warn(_LW('Can not find lun in array'))
index 3e970d0e9e6400c02fa160da6338f9c4121c377b..215bdcea4dd5fb3c27c6b784b6867d8b958eec71 100644 (file)
@@ -30,7 +30,7 @@ from oslo.utils import excutils
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder import ssh_utils
 from cinder import utils
@@ -278,10 +278,11 @@ class TseriesCommon():
                     params[key] = value.strip()
                 else:
                     conf = self.configuration.cinder_huawei_conf_file
-                    LOG.warn(_('_parse_volume_type: Unacceptable parameter '
-                               '%(key)s. Please check this key in extra_specs '
-                               'and make it consistent with the element in '
-                               'configuration file %(conf)s.')
+                    LOG.warn(_LW('_parse_volume_type: Unacceptable parameter '
+                                 '%(key)s. Please check this key in '
+                                 'extra_specs '
+                                 'and make it consistent with the element in '
+                                 'configuration file %(conf)s.')
                              % {'key': key,
                                 'conf': conf})
 
@@ -1118,9 +1119,9 @@ class TseriesCommon():
         if map_id is not None:
             self._delete_map(map_id)
         else:
-            LOG.warn(_('remove_map: No map between host %(host)s and '
-                       'volume %(volume)s.') % {'host': host_name,
-                                                'volume': volume_id})
+            LOG.warn(_LW('remove_map: No map between host %(host)s and '
+                         'volume %(volume)s.') % {'host': host_name,
+                                                  'volume': volume_id})
         return host_id
 
     def _delete_map(self, mapid, attempts=2):
index 1a871d0a7aa2c1fbf5ae38660b2ccebeca6a6099..4d455f030e95a42782b8400da8a0326c258537aa 100644 (file)
@@ -801,8 +801,8 @@ class GPFSDriver(driver.VolumeDriver):
         try:
             image_utils.resize_image(vol_path, new_size, run_as_root=True)
         except processutils.ProcessExecutionError as exc:
-            LOG.error(_("Failed to resize volume "
-                        "%(volume_id)s, error: %(error)s.") %
+            LOG.error(_LE("Failed to resize volume "
+                          "%(volume_id)s, error: %(error)s.") %
                       {'volume_id': volume['id'],
                        'error': exc.stderr})
             raise exception.VolumeBackendAPIException(data=exc.stderr)
@@ -875,9 +875,9 @@ class GPFSDriver(driver.VolumeDriver):
             self._execute('mv', local_path, new_path, run_as_root=True)
             return (True, None)
         except processutils.ProcessExecutionError as exc:
-            LOG.error(_('Driver-based migration of volume %(vol)s failed. '
-                        'Move from %(src)s to %(dst)s failed with error: '
-                        '%(error)s.') %
+            LOG.error(_LE('Driver-based migration of volume %(vol)s failed. '
+                          'Move from %(src)s to %(dst)s failed with error: '
+                          '%(error)s.') %
                       {'vol': volume['name'],
                        'src': local_path,
                        'dst': new_path,
index 8ff9cc5b8a1aef99dcd768a3e89907ec5fa717d6..ca0cd3d8dcefc0306f7691a3b3583c72d962fad1 100644 (file)
@@ -43,7 +43,7 @@ from oslo.utils import units
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder import utils
@@ -366,8 +366,8 @@ class StorwizeSVCDriver(san.SanDriver):
             if chap_enabled and chap_secret is None:
                 chap_secret = self._helpers.add_chap_secret_to_host(host_name)
             elif not chap_enabled and chap_secret:
-                LOG.warning(_('CHAP secret exists for host but CHAP is '
-                              'disabled'))
+                LOG.warning(_LW('CHAP secret exists for host but CHAP is '
+                                'disabled'))
 
         volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
         if volume_attributes is None:
@@ -411,8 +411,8 @@ class StorwizeSVCDriver(san.SanDriver):
             if not preferred_node_entry and not vol_opts['multipath']:
                 # Get 1st node in I/O group
                 preferred_node_entry = io_group_nodes[0]
-                LOG.warn(_('initialize_connection: Did not find a preferred '
-                           'node for volume %s') % volume_name)
+                LOG.warn(_LW('initialize_connection: Did not find a preferred '
+                             'node for volume %s') % volume_name)
 
             properties = {}
             properties['target_discovered'] = False
@@ -462,10 +462,10 @@ class StorwizeSVCDriver(san.SanDriver):
                             properties['target_wwn'] = WWPN
                             break
                     else:
-                        LOG.warning(_('Unable to find a preferred node match '
-                                      'for node %(node)s in the list of '
-                                      'available WWPNs on %(host)s. '
-                                      'Using first available.') %
+                        LOG.warning(_LW('Unable to find a preferred node match'
+                                        ' for node %(node)s in the list of '
+                                        'available WWPNs on %(host)s. '
+                                        'Using first available.') %
                                     {'node': preferred_node,
                                      'host': host_name})
                         properties['target_wwn'] = conn_wwpns[0]
@@ -767,7 +767,7 @@ class StorwizeSVCDriver(san.SanDriver):
             try:
                 volume = self.db.volume_get(ctxt, vol_id)
             except Exception:
-                LOG.warn(_('Volume %s does not exist.'), vol_id)
+                LOG.warn(_LW('Volume %s does not exist.'), vol_id)
                 del self._vdiskcopyops[vol_id]
                 if not len(self._vdiskcopyops):
                     self._vdiskcopyops_loop.stop()
@@ -1028,7 +1028,7 @@ class StorwizeSVCDriver(san.SanDriver):
 
         attributes = self._helpers.get_pool_attrs(pool)
         if not attributes:
-            LOG.error(_('Could not get pool data from the storage'))
+            LOG.error(_LE('Could not get pool data from the storage'))
             exception_message = (_('_update_volume_stats: '
                                    'Could not get storage pool data'))
             raise exception.VolumeBackendAPIException(data=exception_message)
index 6d02b0893f2e792982c9ae8703e425889ec221a9..acc4de9fae7e1fbe92b0df37fd5e2bcf88f8f362 100644 (file)
@@ -25,7 +25,7 @@ import six
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.volume.drivers.ibm.storwize_svc import ssh as storwize_ssh
@@ -152,7 +152,7 @@ class StorwizeHelpers(object):
                 if 'unconfigured' != s:
                     wwpns.add(i)
             node['WWPN'] = list(wwpns)
-            LOG.info(_('WWPN on node %(node)s: %(wwpn)s')
+            LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s')
                      % {'node': node['id'], 'wwpn': node['WWPN']})
 
     def add_chap_secret_to_host(self, host_name):
@@ -341,15 +341,15 @@ class StorwizeHelpers(object):
         # Check if the mapping exists
         resp = self.ssh.lsvdiskhostmap(volume_name)
         if not len(resp):
-            LOG.warning(_('unmap_vol_from_host: No mapping of volume '
-                          '%(vol_name)s to any host found.') %
+            LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
+                            '%(vol_name)s to any host found.') %
                         {'vol_name': volume_name})
             return
         if host_name is None:
             if len(resp) > 1:
-                LOG.warning(_('unmap_vol_from_host: Multiple mappings of '
-                              'volume %(vol_name)s found, no host '
-                              'specified.') % {'vol_name': volume_name})
+                LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
+                                'volume %(vol_name)s found, no host '
+                                'specified.') % {'vol_name': volume_name})
                 return
             else:
                 host_name = resp[0]['host_name']
@@ -359,8 +359,8 @@ class StorwizeHelpers(object):
                 if h == host_name:
                     found = True
             if not found:
-                LOG.warning(_('unmap_vol_from_host: No mapping of volume '
-                              '%(vol_name)s to host %(host)s found.') %
+                LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
+                                '%(vol_name)s to host %(host)s found.') %
                             {'vol_name': volume_name, 'host': host_name})
 
         # We now know that the mapping exists
@@ -797,7 +797,7 @@ class StorwizeHelpers(object):
         """Ensures that vdisk is not part of FC mapping and deletes it."""
         LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk)
         if not self.is_vdisk_defined(vdisk):
-            LOG.info(_('Tried to delete non-existant vdisk %s.') % vdisk)
+            LOG.info(_LI('Tried to delete non-existant vdisk %s.') % vdisk)
             return
         self.ensure_vdisk_no_fc_mappings(vdisk)
         self.ssh.rmvdisk(vdisk, force=force)
index 3b70b212895b409b6412a1ae778cd689326c7862..a6ee75164a4f140f6a14e1d7a9b6abf4a9027178 100644 (file)
@@ -24,7 +24,7 @@ import six
 
 from cinder.brick.remotefs import remotefs as remotefs_brick
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder import utils
@@ -165,8 +165,8 @@ class NfsDriver(remotefs.RemoteFSDriver):
                 if attempt == (num_attempts - 1):
                     LOG.error(_LE('Mount failure for %(share)s after '
                                   '%(count)d attempts.') % {
-                                      'share': nfs_share,
-                                      'count': num_attempts})
+                              'share': nfs_share,
+                              'count': num_attempts})
                     raise exception.NfsException(e)
                 LOG.debug('Mount attempt %d failed: %s.\nRetrying mount ...' %
                           (attempt, six.text_type(e)))
@@ -278,7 +278,7 @@ class NfsDriver(remotefs.RemoteFSDriver):
 
     def extend_volume(self, volume, new_size):
         """Extend an existing volume to the new size."""
-        LOG.info(_('Extending volume %s.'), volume['id'])
+        LOG.info(_LI('Extending volume %s.'), volume['id'])
         extend_by = int(new_size) - volume['size']
         if not self._is_share_eligible(volume['provider_location'],
                                        extend_by):
@@ -286,7 +286,7 @@ class NfsDriver(remotefs.RemoteFSDriver):
                                               ' extend volume %s to %sG'
                                               % (volume['id'], new_size))
         path = self.local_path(volume)
-        LOG.info(_('Resizing file to %sG...'), new_size)
+        LOG.info(_LI('Resizing file to %sG...'), new_size)
         image_utils.resize_image(path, new_size,
                                  run_as_root=self._execute_as_root)
         if not self._is_file_size_equal(path, new_size):
@@ -328,10 +328,11 @@ class NfsDriver(remotefs.RemoteFSDriver):
                   self.configuration.nas_secure_file_permissions)
 
         if self.configuration.nas_secure_file_permissions == 'false':
-            LOG.warn(_("The NAS file permissions mode will be 666 (allowing "
-                       "other/world read & write access). This is considered "
-                       "an insecure NAS environment. Please see %s for "
-                       "information on a secure NFS configuration.") %
+            LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
+                         "other/world read & write access). "
+                         "This is considered an insecure NAS environment. "
+                         "Please see %s for information on a secure "
+                         "NFS configuration.") %
                      doc_html)
 
         self.configuration.nas_secure_file_operations = \
@@ -348,8 +349,9 @@ class NfsDriver(remotefs.RemoteFSDriver):
                   self.configuration.nas_secure_file_operations)
 
         if self.configuration.nas_secure_file_operations == 'false':
-            LOG.warn(_("The NAS file operations will be run as root: allowing "
-                       "root level access at the storage backend. This is "
-                       "considered an insecure NAS environment. Please see %s "
-                       "for information on a secure NAS configuration.") %
+            LOG.warn(_LW("The NAS file operations will be run as "
+                         "root: allowing root level access at the storage "
+                         "backend. This is considered an insecure NAS "
+                         "environment. Please see %s "
+                         "for information on a secure NAS configuration.") %
                      doc_html)
index 76a74aad6626cc3786eb83a72306d298f22e0c7f..ed1f4d0b6c0793f8f732db16cdcad916cf39fdf7 100644 (file)
@@ -449,7 +449,7 @@ class NimbleAPIExecutor:
     def login(self):
         """Execute Https Login API."""
         response = self._execute_login()
-        LOG.info(_('Successful login by user %s') % self.username)
+        LOG.info(_LI('Successful login by user %s') % self.username)
         self.sid = response['authInfo']['sid']
 
     @_connection_checker
@@ -573,7 +573,7 @@ class NimbleAPIExecutor:
     @_response_checker
     def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs):
         """Execute onlineSnap API."""
-        LOG.info(_('Setting snapshot %(snap)s to online_flag %(flag)s')
+        LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s')
                  % {'snap': snap_name, 'flag': online_flag})
         return self.client.service.onlineSnap(request={'sid': self.sid,
                                                        'vol': vol_name,
index e719de1a64eaea16dcfbfbb1d2a69ccc23bb6051..c393c2ca3450c69398b84207aef8ac33cebe95fb 100644 (file)
@@ -1422,8 +1422,8 @@ class DPLCOMMONDriver(driver.VolumeDriver):
                     ret = 0
                     output = status.get('output', {})
             else:
-                LOG.error(_('Flexvisor failed to get pool info '
-                            '(failed to get event)%s.') % (poolid))
+                LOG.error(_LE('Flexvisor failed to get pool info '
+                              '(failed to get event)%s.') % (poolid))
                 raise exception.VolumeBackendAPIException(
                     data="failed to get event")
         elif ret != 0:
index 0ea4876e577ecd4a89f476483176f5bd19ed650a..26f7beb747d26e97b8cbff5b8455d496bbbc66f6 100644 (file)
@@ -57,7 +57,7 @@ from oslo.utils import units
 from cinder import context
 from cinder import exception
 from cinder import flow_utils
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.volume import qos_specs
@@ -399,8 +399,8 @@ class HP3PARCommon(object):
                           'new_type': volume_type.get('name')})
             except Exception:
                 with excutils.save_and_reraise_exception():
-                    LOG.warning(_("Failed to manage virtual volume %(disp)s "
-                                  "due to error during retype.") %
+                    LOG.warning(_LW("Failed to manage virtual volume %(disp)s "
+                                    "due to error during retype.") %
                                 {'disp': display_name})
                     # Try to undo the rename and clear the new comment.
                     self.client.modifyVolume(
index 347a7f91972fa94e4610471228c189d3a19e584c..612160a39ae23db6dd3eecd7bcc833f07adcfd4d 100644 (file)
@@ -36,7 +36,7 @@ except ImportError:
     hpexceptions = None
 
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
 from cinder.openstack.common import log as logging
 import cinder.volume.driver
 from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
@@ -436,9 +436,10 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
                 host = common._get_3par_host(hostname)
             elif (not host['initiatorChapEnabled'] and
                     self.configuration.hp3par_iscsi_chap_enabled):
-                LOG.warn(_("Host exists without CHAP credentials set and has "
-                           "iSCSI attachments but CHAP is enabled.  Updating "
-                           "host with new CHAP credentials."))
+                LOG.warn(_LW("Host exists without CHAP credentials set "
+                             "and has iSCSI attachments but CHAP is "
+                             "enabled.  Updating host with new CHAP "
+                             "credentials."))
                 self._set_3par_chaps(
                     common,
                     hostname,
@@ -468,11 +469,11 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
             host_info = common.client.getHost(chap_username)
 
             if not host_info['initiatorChapEnabled']:
-                LOG.warn(_("Host has no CHAP key, but CHAP is enabled."))
+                LOG.warn(_LW("Host has no CHAP key, but CHAP is enabled."))
 
         except hpexceptions.HTTPNotFound:
             chap_password = volume_utils.generate_password(16)
-            LOG.warn(_("No host or VLUNs exist. Generating new CHAP key."))
+            LOG.warn(_LW("No host or VLUNs exist. Generating new CHAP key."))
         else:
             # Get a list of all iSCSI VLUNs and see if there is already a CHAP
             # key assigned to one of them.  Use that CHAP key if present,
@@ -500,12 +501,12 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
                                   "but CHAP is enabled. Skipping." %
                                   vlun['remoteName'])
                 else:
-                    LOG.warn(_("Non-iSCSI VLUN detected."))
+                    LOG.warn(_LW("Non-iSCSI VLUN detected."))
 
             if not chap_exists:
                 chap_password = volume_utils.generate_password(16)
-                LOG.warn(_("No VLUN contained CHAP credentials. "
-                           "Generating new CHAP key."))
+                LOG.warn(_LW("No VLUN contained CHAP credentials. "
+                             "Generating new CHAP key."))
 
         # Add CHAP credentials to the volume metadata
         vol_name = common._get_3par_vol_name(volume['id'])
index a4671a828af94ce0549a8a57abe4251111b8c738..430dffefabdc3ab08948d77c7a2a5c3313d46738 100644 (file)
@@ -32,7 +32,7 @@ hplefthand_password for credentials to talk to the REST service on the
 LeftHand array.
 """
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LI
 from cinder.openstack.common import log as logging
 from cinder import utils
 from cinder.volume.driver import VolumeDriver
@@ -77,7 +77,8 @@ class HPLeftHandISCSIDriver(VolumeDriver):
         self.proxy = self._create_proxy(*self.args, **self.kwargs)
         self.proxy.do_setup(context)
 
-        LOG.info(_("HPLeftHand driver %(driver_ver)s, proxy %(proxy_ver)s") % {
+        LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
+                     "proxy %(proxy_ver)s") % {
             "driver_ver": self.VERSION,
             "proxy_ver": self.proxy.get_version_string()})
 
index e6757bf283b57ecbc86924934bc4758b944dbb7f..3f0071df03b967e1e1e30e4fca973b4ba62fbc15 100644 (file)
@@ -20,7 +20,7 @@ from oslo.utils import units
 
 from cinder import context
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume.driver import ISCSIDriver
 from cinder.volume import utils
@@ -377,11 +377,11 @@ class HPLeftHandRESTProxy(ISCSIDriver):
             server_info = self.client.getServerByName(connector['host'])
             chap_secret = server_info['chapTargetSecret']
             if not chap_enabled and chap_secret:
-                LOG.warning(_('CHAP secret exists for host %s but CHAP is '
-                              'disabled') % connector['host'])
+                LOG.warning(_LW('CHAP secret exists for host %s but CHAP is '
+                                'disabled') % connector['host'])
             if chap_enabled and chap_secret is None:
-                LOG.warning(_('CHAP is enabled, but server secret not '
-                              'configured on server %s') % connector['host'])
+                LOG.warning(_LW('CHAP is enabled, but server secret not '
+                                'configured on server %s') % connector['host'])
             return server_info
         except hpexceptions.HTTPNotFound:
             # server does not exist, so create one
@@ -498,20 +498,20 @@ class HPLeftHandRESTProxy(ISCSIDriver):
             virtual_ips = cluster_info['virtualIPAddresses']
 
             if driver != self.__class__.__name__:
-                LOG.info(_("Cannot provide backend assisted migration for "
-                           "volume: %s because volume is from a different "
-                           "backend.") % volume['name'])
+                LOG.info(_LI("Cannot provide backend assisted migration for "
+                             "volume: %s because volume is from a different "
+                             "backend.") % volume['name'])
                 return false_ret
             if vip != virtual_ips[0]['ipV4Address']:
-                LOG.info(_("Cannot provide backend assisted migration for "
-                           "volume: %s because cluster exists in different "
-                           "management group.") % volume['name'])
+                LOG.info(_LI("Cannot provide backend assisted migration for "
+                             "volume: %s because cluster exists in different "
+                             "management group.") % volume['name'])
                 return false_ret
 
         except hpexceptions.HTTPNotFound:
-            LOG.info(_("Cannot provide backend assisted migration for "
-                       "volume: %s because cluster exists in different "
-                       "management group.") % volume['name'])
+            LOG.info(_LI("Cannot provide backend assisted migration for "
+                         "volume: %s because cluster exists in different "
+                         "management group.") % volume['name'])
             return false_ret
 
         try:
@@ -520,9 +520,9 @@ class HPLeftHandRESTProxy(ISCSIDriver):
 
             # can't migrate if server is attached
             if volume_info['iscsiSessions'] is not None:
-                LOG.info(_("Cannot provide backend assisted migration "
-                           "for volume: %s because the volume has been "
-                           "exported.") % volume['name'])
+                LOG.info(_LI("Cannot provide backend assisted migration "
+                             "for volume: %s because the volume has been "
+                             "exported.") % volume['name'])
                 return false_ret
 
             # can't migrate if volume has snapshots
@@ -531,17 +531,17 @@ class HPLeftHandRESTProxy(ISCSIDriver):
                 'fields=snapshots,snapshots[resource[members[name]]]')
             LOG.debug('Snapshot info: %s' % snap_info)
             if snap_info['snapshots']['resource'] is not None:
-                LOG.info(_("Cannot provide backend assisted migration "
-                           "for volume: %s because the volume has "
-                           "snapshots.") % volume['name'])
+                LOG.info(_LI("Cannot provide backend assisted migration "
+                             "for volume: %s because the volume has "
+                             "snapshots.") % volume['name'])
                 return false_ret
 
             options = {'clusterName': cluster}
             self.client.modifyVolume(volume_info['id'], options)
         except hpexceptions.HTTPNotFound:
-            LOG.info(_("Cannot provide backend assisted migration for "
-                       "volume: %s because volume does not exist in this "
-                       "management group.") % volume['name'])
+            LOG.info(_LI("Cannot provide backend assisted migration for "
+                         "volume: %s because volume does not exist in this "
+                         "management group.") % volume['name'])
             return false_ret
         except hpexceptions.HTTPServerError as ex:
             LOG.error(ex)
index f187f1fde7cbb9639d43e5ff862acaef00b4a366..0fade4e1f9cc5caa70361a586f3281b79652ffe1 100644 (file)
@@ -27,7 +27,7 @@ from oslo.config import cfg
 from oslo.utils import excutils
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
 from cinder.openstack.common import log as logging
 from cinder import ssh_utils
 from cinder import utils
@@ -148,7 +148,7 @@ class SanDriver(driver.VolumeDriver):
 
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.error(_("Error running SSH command: %s") % command)
+                LOG.error(_LE("Error running SSH command: %s") % command)
 
     def ensure_export(self, context, volume):
         """Synchronously recreates an export for a logical volume."""
index 7d2c893fb811707809a7008c5a9931239ac67970..0fd8d9488561c9e6f1b57f729e7fcc452249ef0f 100644 (file)
@@ -22,7 +22,7 @@ from oslo.utils import units
 
 from cinder.brick.remotefs import remotefs
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI, _LW
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder import utils
@@ -205,8 +205,8 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
     def delete_volume(self, volume):
         """Deletes a logical volume."""
         if not volume['provider_location']:
-            LOG.warn(_('Volume %s does not have provider_location specified, '
-                       'skipping.'), volume['name'])
+            LOG.warn(_LW('Volume %s does not have provider_location '
+                         'specified, skipping.'), volume['name'])
             return
 
         self._ensure_share_mounted(volume['provider_location'])
@@ -227,7 +227,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
         pattern = r"qemu-img version ([0-9\.]*)"
         version = re.match(pattern, info)
         if not version:
-            LOG.warn(_("qemu-img is not installed."))
+            LOG.warn(_LW("qemu-img is not installed."))
             return None
         return [int(x) for x in version.groups()[0].split('.')]
 
@@ -404,14 +404,14 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
 
     @utils.synchronized('smbfs', external=False)
     def extend_volume(self, volume, size_gb):
-        LOG.info(_('Extending volume %s.'), volume['id'])
+        LOG.info(_LI('Extending volume %s.'), volume['id'])
         self._extend_volume(volume, size_gb)
 
     def _extend_volume(self, volume, size_gb):
         volume_path = self.local_path(volume)
 
         self._check_extend_volume_support(volume, size_gb)
-        LOG.info(_('Resizing file to %sG...') % size_gb)
+        LOG.info(_LI('Resizing file to %sG...') % size_gb)
 
         self._do_extend_volume(volume_path, size_gb, volume['name'])
 
index 40901f6b4fbda66cba5decc3ef45fda09a2f0c1d..7eac767d527401f5bc3c635fa547e03b3861fd05 100644 (file)
@@ -18,7 +18,7 @@ Session and API call management for VMware ESX/VC server.
 Provides abstraction over cinder.volume.drivers.vmware.vim.Vim SOAP calls.
 """
 
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.volume.drivers.vmware import error_util
@@ -69,8 +69,8 @@ class Retry(object):
             try:
                 result = f(*args, **kwargs)
             except self._exceptions as excep:
-                LOG.exception(_("Failure while invoking function: "
-                                "%(func)s. Error: %(excep)s.") %
+                LOG.exception(_LE("Failure while invoking function: "
+                                  "%(func)s. Error: %(excep)s.") %
                               {'func': f.__name__, 'excep': excep})
                 if (self._max_retry_count != -1 and
                         self._retry_count >= self._max_retry_count):
@@ -167,7 +167,7 @@ class VMwareAPISession(object):
                 # have been cleared. We could have made a call to
                 # SessionIsActive, but that is an overhead because we
                 # anyway would have to call TerminateSession.
-                LOG.exception(_("Error while terminating session: %s.") %
+                LOG.exception(_LE("Error while terminating session: %s.") %
                               excep)
         self._session_id = session.key
 
@@ -180,21 +180,21 @@ class VMwareAPISession(object):
 
         if self.pbm:
             self.pbm.set_cookie()
-        LOG.info(_("Successfully established connection to the server."))
+        LOG.info(_LI("Successfully established connection to the server."))
 
     def __del__(self):
         """Logs-out the sessions."""
         try:
             self.vim.Logout(self.vim.service_content.sessionManager)
         except Exception as excep:
-            LOG.exception(_("Error while logging out from vim session: %s."),
+            LOG.exception(_LE("Error while logging out from vim session: %s."),
                           excep)
         if self._pbm:
             try:
                 self.pbm.Logout(self.pbm.service_content.sessionManager)
             except Exception as excep:
-                LOG.exception(_("Error while logging out from pbm session: "
-                                "%s."), excep)
+                LOG.exception(_LE("Error while logging out from pbm session: "
+                                  "%s."), excep)
 
     def invoke_api(self, module, method, *args, **kwargs):
         """Wrapper method for invoking APIs.
@@ -242,9 +242,9 @@ class VMwareAPISession(object):
                         return []
 
                     # empty response is due to an inactive session
-                    LOG.warn(_("Current session: %(session)s is inactive; "
-                               "re-creating the session while invoking "
-                               "method %(module)s.%(method)s."),
+                    LOG.warn(_LW("Current session: %(session)s is inactive; "
+                                 "re-creating the session while invoking "
+                                 "method %(module)s.%(method)s."),
                              {'session': self._session_id,
                               'module': module,
                               'method': method},
@@ -268,8 +268,8 @@ class VMwareAPISession(object):
                 sessionID=self._session_id,
                 userName=self._session_username)
         except error_util.VimException:
-            LOG.warn(_("Error occurred while checking whether the "
-                       "current session: %s is active."),
+            LOG.warn(_LW("Error occurred while checking whether the "
+                         "current session: %s is active."),
                      self._session_id,
                      exc_info=True)
 
@@ -310,11 +310,13 @@ class VMwareAPISession(object):
                 LOG.debug("Task %s status: success." % task)
             else:
                 error_msg = str(task_info.error.localizedMessage)
-                LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
+                LOG.exception(_LE("Task: %(task)s failed with "
+                                  "error: %(err)s.") %
                               {'task': task, 'err': error_msg})
                 raise error_util.VimFaultException([], error_msg)
         except Exception as excep:
-            LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
+            LOG.exception(_LE("Task: %(task)s failed with "
+                              "error: %(err)s.") %
                           {'task': task, 'err': excep})
             raise excep
         # got the result. So stop the loop.
index 9cfa8f26d8d35cbfb9297d68a6360e8bbdbf927c..0fc4614088ca2e72376e561d8c06a34626e96a80 100644 (file)
@@ -32,7 +32,7 @@ from oslo.utils import excutils
 from oslo.utils import units
 
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI, _LW
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import uuidutils
@@ -194,9 +194,10 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
     VERSION = '1.4.0'
 
     def _do_deprecation_warning(self):
-        LOG.warn(_('The VMware ESX VMDK driver is now deprecated and will be '
-                   'removed in the Juno release. The VMware vCenter VMDK '
-                   'driver will remain and continue to be supported.'))
+        LOG.warn(_LW('The VMware ESX VMDK driver is now deprecated '
+                     'and will be removed in the Juno release. The VMware '
+                     'vCenter VMDK driver will remain and continue to be '
+                     'supported.'))
 
     def __init__(self, *args, **kwargs):
         super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
@@ -262,8 +263,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
             max_objects = self.configuration.vmware_max_objects_retrieval
             self._volumeops = volumeops.VMwareVolumeOps(self.session,
                                                         max_objects)
-            LOG.info(_("Successfully setup driver: %(driver)s for "
-                       "server: %(ip)s.") %
+            LOG.info(_LI("Successfully setup driver: %(driver)s for "
+                         "server: %(ip)s.") %
                      {'driver': driver,
                       'ip': self.configuration.vmware_host_ip})
 
@@ -327,7 +328,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         """
         backing = self.volumeops.get_backing(volume['name'])
         if not backing:
-            LOG.info(_("Backing not available, no operation to be performed."))
+            LOG.info(_LI("Backing not available, no operation "
+                         "to be performed."))
             return
         self.volumeops.delete_backing(backing)
 
@@ -467,9 +469,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                 LOG.error(msg, storage_profile)
                 raise error_util.VimException(msg % storage_profile)
         elif storage_profile:
-            LOG.warn(_("Ignoring storage profile %s requirement for this "
-                       "volume since policy based placement is "
-                       "disabled."), storage_profile)
+            LOG.warn(_LW("Ignoring storage profile %s requirement for this "
+                         "volume since policy based placement is "
+                         "disabled."), storage_profile)
 
         size_bytes = volume['size'] * units.Gi
         datastore_summary = self._select_datastore_summary(size_bytes,
@@ -583,9 +585,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                     selected_host = host
                     break
                 except error_util.VimException as excep:
-                    LOG.warn(_("Unable to find suitable datastore for volume "
-                               "of size: %(vol)s GB under host: %(host)s. "
-                               "More details: %(excep)s") %
+                    LOG.warn(_LW("Unable to find suitable datastore for volume"
+                                 " of size: %(vol)s GB under host: %(host)s. "
+                                 "More details: %(excep)s") %
                              {'vol': volume['size'],
                               'host': host, 'excep': excep})
             if selected_host:
@@ -624,9 +626,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                     if backing:
                         break
                 except error_util.VimException as excep:
-                    LOG.warn(_("Unable to find suitable datastore for "
-                               "volume: %(vol)s under host: %(host)s. "
-                               "More details: %(excep)s") %
+                    LOG.warn(_LW("Unable to find suitable datastore for "
+                                 "volume: %(vol)s under host: %(host)s. "
+                                 "More details: %(excep)s") %
                              {'vol': volume['name'],
                               'host': host.obj, 'excep': excep})
             if backing:
@@ -660,8 +662,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
             if not backing:
                 # Create a backing in case it does not exist under the
                 # host managing the instance.
-                LOG.info(_("There is no backing for the volume: %s. "
-                           "Need to create one.") % volume['name'])
+                LOG.info(_LI("There is no backing for the volume: %s. "
+                             "Need to create one.") % volume['name'])
                 backing = self._create_backing(volume, host)
             else:
                 # Relocate volume is necessary
@@ -673,7 +675,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
             if not backing:
                 # Create a backing in case it does not exist. It is a bad use
                 # case to boot from an empty volume.
-                LOG.warn(_("Trying to boot from an empty volume: %s.") %
+                LOG.warn(_LW("Trying to boot from an empty volume: %s.") %
                          volume['name'])
                 # Create backing
                 backing = self._create_backing_in_inventory(volume)
@@ -682,8 +684,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         connection_info['data'] = {'volume': backing.value,
                                    'volume_id': volume['id']}
 
-        LOG.info(_("Returning connection_info: %(info)s for volume: "
-                   "%(volume)s with connector: %(connector)s.") %
+        LOG.info(_LI("Returning connection_info: %(info)s for volume: "
+                     "%(volume)s with connector: %(connector)s.") %
                  {'info': connection_info,
                   'volume': volume['name'],
                   'connector': connector})
@@ -735,12 +737,12 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
             raise exception.InvalidVolume(msg % volume['status'])
         backing = self.volumeops.get_backing(snapshot['volume_name'])
         if not backing:
-            LOG.info(_("There is no backing, so will not create "
-                       "snapshot: %s.") % snapshot['name'])
+            LOG.info(_LI("There is no backing, so will not create "
+                         "snapshot: %s.") % snapshot['name'])
             return
         self.volumeops.create_snapshot(backing, snapshot['name'],
                                        snapshot['display_description'])
-        LOG.info(_("Successfully created snapshot: %s.") % snapshot['name'])
+        LOG.info(_LI("Successfully created snapshot: %s.") % snapshot['name'])
 
     def create_snapshot(self, snapshot):
         """Creates a snapshot.
@@ -766,11 +768,11 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
             raise exception.InvalidVolume(msg % volume['status'])
         backing = self.volumeops.get_backing(snapshot['volume_name'])
         if not backing:
-            LOG.info(_("There is no backing, and so there is no "
-                       "snapshot: %s.") % snapshot['name'])
+            LOG.info(_LI("There is no backing, and so there is no "
+                         "snapshot: %s.") % snapshot['name'])
         else:
             self.volumeops.delete_snapshot(backing, snapshot['name'])
-            LOG.info(_("Successfully deleted snapshot: %s.") %
+            LOG.info(_LI("Successfully deleted snapshot: %s.") %
                      snapshot['name'])
 
     def delete_snapshot(self, snapshot):
@@ -811,8 +813,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         if volume['size'] > src_size_in_gb:
             self._extend_volumeops_virtual_disk(volume['size'], dest_vmdk_path,
                                                 datacenter)
-        LOG.info(_("Successfully cloned new backing: %(back)s from "
-                   "source VMDK file: %(vmdk)s.") %
+        LOG.info(_LI("Successfully cloned new backing: %(back)s from "
+                     "source VMDK file: %(vmdk)s.") %
                  {'back': backing, 'vmdk': src_vmdk_path})
 
     def _create_cloned_volume(self, volume, src_vref):
@@ -828,9 +830,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         self._verify_volume_creation(volume)
         backing = self.volumeops.get_backing(src_vref['name'])
         if not backing:
-            LOG.info(_("There is no backing for the source volume: "
-                       "%(svol)s. Not creating any backing for the "
-                       "volume: %(vol)s.") %
+            LOG.info(_LI("There is no backing for the source volume: "
+                         "%(svol)s. Not creating any backing for the "
+                         "volume: %(vol)s.") %
                      {'svol': src_vref['name'],
                       'vol': volume['name']})
             return
@@ -859,18 +861,18 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         self._verify_volume_creation(volume)
         backing = self.volumeops.get_backing(snapshot['volume_name'])
         if not backing:
-            LOG.info(_("There is no backing for the source snapshot: "
-                       "%(snap)s. Not creating any backing for the "
-                       "volume: %(vol)s.") %
+            LOG.info(_LI("There is no backing for the source snapshot: "
+                         "%(snap)s. Not creating any backing for the "
+                         "volume: %(vol)s.") %
                      {'snap': snapshot['name'],
                       'vol': volume['name']})
             return
         snapshot_moref = self.volumeops.get_snapshot(backing,
                                                      snapshot['name'])
         if not snapshot_moref:
-            LOG.info(_("There is no snapshot point for the snapshotted "
-                       "volume: %(snap)s. Not creating any backing for "
-                       "the volume: %(vol)s.") %
+            LOG.info(_LI("There is no snapshot point for the snapshotted "
+                         "volume: %(snap)s. Not creating any backing for "
+                         "the volume: %(vol)s.") %
                      {'snap': snapshot['name'], 'vol': volume['name']})
             return
         src_vmdk_path = self.volumeops.get_vmdk_path(snapshot_moref)
@@ -942,8 +944,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
             self.volumeops.delete_vmdk_file(
                 descriptor_ds_file_path, dc_ref)
         except error_util.VimException:
-            LOG.warn(_("Error occurred while deleting temporary "
-                       "disk: %s."),
+            LOG.warn(_LW("Error occurred while deleting temporary "
+                         "disk: %s."),
                      descriptor_ds_file_path,
                      exc_info=True)
 
@@ -956,8 +958,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                 dest_path.get_descriptor_ds_file_path())
         except error_util.VimException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Error occurred while copying %(src)s to "
-                                "%(dst)s."),
+                LOG.exception(_LE("Error occurred while copying %(src)s to "
+                                  "%(dst)s."),
                               {'src': src_path.get_descriptor_ds_file_path(),
                                'dst': dest_path.get_descriptor_ds_file_path()})
         finally:
@@ -1018,8 +1020,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         except Exception:
             # Delete the descriptor.
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Error occurred while copying image: "
-                                "%(image_id)s to %(path)s."),
+                LOG.exception(_LE("Error occurred while copying image: "
+                                  "%(image_id)s to %(path)s."),
                               {'path': path.get_descriptor_ds_file_path(),
                                'image_id': image_id})
                 LOG.debug("Deleting descriptor: %s.",
@@ -1028,8 +1030,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                     self.volumeops.delete_file(
                         path.get_descriptor_ds_file_path(), dc_ref)
                 except error_util.VimException:
-                    LOG.warn(_("Error occurred while deleting "
-                               "descriptor: %s."),
+                    LOG.warn(_LW("Error occurred while deleting "
+                                 "descriptor: %s."),
                              path.get_descriptor_ds_file_path(),
                              exc_info=True)
 
@@ -1057,7 +1059,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         try:
             self.volumeops.delete_backing(backing)
         except error_util.VimException:
-            LOG.warn(_("Error occurred while deleting backing: %s."),
+            LOG.warn(_LW("Error occurred while deleting backing: %s."),
                      backing,
                      exc_info=True)
 
@@ -1143,8 +1145,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         except Exception:
             # Delete backing and virtual disk created from image.
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Error occurred while creating volume: %(id)s"
-                                " from image: %(image_id)s."),
+                LOG.exception(_LE("Error occurred while creating "
+                                  "volume: %(id)s"
+                                  " from image: %(image_id)s."),
                               {'id': volume['id'],
                                'image_id': image_id})
                 self._delete_temp_backing(backing)
@@ -1210,15 +1213,15 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                                                        image_size=image_size)
         except exception.CinderException as excep:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Exception in copy_image_to_volume: %s."),
+                LOG.exception(_LE("Exception in copy_image_to_volume: %s."),
                               excep)
                 backing = self.volumeops.get_backing(volume['name'])
                 if backing:
-                    LOG.exception(_("Deleting the backing: %s") % backing)
+                    LOG.exception(_LE("Deleting the backing: %s") % backing)
                     # delete the backing
                     self.volumeops.delete_backing(backing)
 
-        LOG.info(_("Done copying image: %(id)s to volume: %(vol)s.") %
+        LOG.info(_LI("Done copying image: %(id)s to volume: %(vol)s.") %
                  {'id': image_id, 'vol': volume['name']})
 
     def _extend_vmdk_virtual_disk(self, name, new_size_in_gb):
@@ -1229,9 +1232,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         """
         backing = self.volumeops.get_backing(name)
         if not backing:
-            LOG.info(_("The backing is not found, so there is no need "
-                       "to extend the vmdk virtual disk for the volume "
-                       "%s."), name)
+            LOG.info(_LI("The backing is not found, so there is no need "
+                         "to extend the vmdk virtual disk for the volume "
+                         "%s."), name)
         else:
             root_vmdk_path = self.volumeops.get_vmdk_path(backing)
             datacenter = self.volumeops.get_dc(backing)
@@ -1251,8 +1254,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                                                root_vmdk_path, datacenter)
         except error_util.VimException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Unable to extend the size of the "
-                                "vmdk virtual disk at the path %s."),
+                LOG.exception(_LE("Unable to extend the size of the "
+                                  "vmdk virtual disk at the path %s."),
                               root_vmdk_path)
 
     def copy_image_to_volume(self, context, volume, image_service, image_id):
@@ -1301,8 +1304,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                     image_size_in_bytes, image_adapter_type, image_disk_type)
         except exception.CinderException as excep:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Exception in copying the image to the "
-                                "volume: %s."), excep)
+                LOG.exception(_LE("Exception in copying the image to the "
+                                  "volume: %s."), excep)
 
         LOG.debug("Volume: %(id)s created from image: %(image_id)s.",
                   {'id': volume['id'],
@@ -1349,7 +1352,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         # get backing vm of volume and its vmdk path
         backing = self.volumeops.get_backing(volume['name'])
         if not backing:
-            LOG.info(_("Backing not found, creating for volume: %s") %
+            LOG.info(_LI("Backing not found, creating for volume: %s") %
                      volume['name'])
             backing = self._create_backing_in_inventory(volume)
         vmdk_file_path = self.volumeops.get_vmdk_path(backing)
@@ -1368,7 +1371,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                                    vmdk_size=volume['size'] * units.Gi,
                                    image_name=image_meta['name'],
                                    image_version=1)
-        LOG.info(_("Done copying volume %(vol)s to a new image %(img)s") %
+        LOG.info(_LI("Done copying volume %(vol)s to a new image %(img)s") %
                  {'vol': volume['name'], 'img': image_meta['name']})
 
     def _in_use(self, volume):
@@ -1397,7 +1400,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         """
         # Can't attempt retype if the volume is in use.
         if self._in_use(volume):
-            LOG.warn(_("Volume: %s is in use, can't retype."),
+            LOG.warn(_LW("Volume: %s is in use, can't retype."),
                      volume['name'])
             return False
 
@@ -1466,8 +1469,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
             best_candidate = self.ds_sel.select_datastore(req)
             if not best_candidate:
                 # No candidate datastores; can't retype.
-                LOG.warn(_("There are no datastores matching new requirements;"
-                           " can't retype volume: %s."),
+                LOG.warn(_LW("There are no datastores matching new "
+                             "requirements; can't retype volume: %s."),
                          volume['name'])
                 return False
 
@@ -1503,8 +1506,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                     backing = new_backing
                 except error_util.VimException:
                     with excutils.save_and_reraise_exception():
-                        LOG.exception(_("Error occurred while cloning backing:"
-                                        " %s during retype."),
+                        LOG.exception(_LE("Error occurred while cloning "
+                                          "backing:"
+                                          " %s during retype."),
                                       backing)
                         if renamed:
                             LOG.debug("Undo rename of backing: %(backing)s; "
@@ -1517,9 +1521,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                                 self.volumeops.rename_backing(backing,
                                                               volume['name'])
                             except error_util.VimException:
-                                LOG.warn(_("Changing backing: %(backing)s name"
-                                           " from %(new_name)s to %(old_name)s"
-                                           " failed."),
+                                LOG.warn(_LW("Changing backing: %(backing)s "
+                                             "name from %(new_name)s to "
+                                             "%(old_name)s failed."),
                                          {'backing': backing,
                                           'new_name': tmp_name,
                                           'old_name': volume['name']})
@@ -1553,24 +1557,25 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
         # try extending vmdk in place
         try:
             self._extend_vmdk_virtual_disk(vol_name, new_size)
-            LOG.info(_("Done extending volume %(vol)s to size %(size)s GB.") %
+            LOG.info(_LI("Done extending volume %(vol)s "
+                         "to size %(size)s GB.") %
                      {'vol': vol_name, 'size': new_size})
             return
         except error_util.VimFaultException:
-            LOG.info(_("Relocating volume %s vmdk to a different "
-                       "datastore since trying to extend vmdk file "
-                       "in place failed."), vol_name)
+            LOG.info(_LI("Relocating volume %s vmdk to a different "
+                         "datastore since trying to extend vmdk file "
+                         "in place failed."), vol_name)
         # If in place extend fails, then try to relocate the volume
         try:
             (host, rp, folder, summary) = self._select_ds_for_volume(new_size)
         except error_util.VimException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Not able to find a different datastore to "
-                                "place the extended volume %s."), vol_name)
+                LOG.exception(_LE("Not able to find a different datastore to "
+                                  "place the extended volume %s."), vol_name)
 
-        LOG.info(_("Selected datastore %(ds)s to place extended volume of "
-                   "size %(size)s GB.") % {'ds': summary.name,
-                                           'size': new_size})
+        LOG.info(_LI("Selected datastore %(ds)s to place extended volume of "
+                     "size %(size)s GB.") % {'ds': summary.name,
+                                             'size': new_size})
 
         try:
             backing = self.volumeops.get_backing(vol_name)
@@ -1580,9 +1585,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
             self.volumeops.move_backing_to_folder(backing, folder)
         except error_util.VimException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Not able to relocate volume %s for "
-                                "extending."), vol_name)
-        LOG.info(_("Done extending volume %(vol)s to size %(size)s GB.") %
+                LOG.exception(_LE("Not able to relocate volume %s for "
+                                  "extending."), vol_name)
+        LOG.info(_LI("Done extending volume %(vol)s to size %(size)s GB.") %
                  {'vol': vol_name, 'size': new_size})
 
     @contextlib.contextmanager
@@ -1681,8 +1686,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                 return vm_ref
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Error occurred while creating temporary "
-                                "backing."))
+                LOG.exception(_LE("Error occurred while creating temporary "
+                                  "backing."))
                 backing = self.volumeops.get_backing(name)
                 if backing is not None:
                     self._delete_temp_backing(backing)
@@ -1746,9 +1751,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
                             self.volumeops.rename_backing(backing,
                                                           volume['name'])
                         except error_util.VimException:
-                            LOG.warn(_("Cannot undo volume rename; old name "
-                                       "was %(old_name)s and new name is "
-                                       "%(new_name)s."),
+                            LOG.warn(_LW("Cannot undo volume rename; old name "
+                                         "was %(old_name)s and new name is "
+                                         "%(new_name)s."),
                                      {'old_name': volume['name'],
                                       'new_name': tmp_backing_name},
                                      exc_info=True)
@@ -1850,10 +1855,10 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         pbm_service_wsdl = os.path.join(curr_dir, 'wsdl', major_minor,
                                         'pbmService.wsdl')
         if not os.path.exists(pbm_service_wsdl):
-            LOG.warn(_("PBM WSDL file %s is missing!"), pbm_service_wsdl)
+            LOG.warn(_LW("PBM WSDL file %s is missing!"), pbm_service_wsdl)
             return
         pbm_wsdl = 'file://' + pbm_service_wsdl
-        LOG.info(_("Using PBM WSDL location: %s"), pbm_wsdl)
+        LOG.info(_LI("Using PBM WSDL location: %s"), pbm_wsdl)
         return pbm_wsdl
 
     def _get_vc_version(self):
@@ -1864,18 +1869,18 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         """
         version_str = self.configuration.vmware_host_version
         if version_str:
-            LOG.info(_("Using overridden vmware_host_version from config: "
-                       "%s"), version_str)
+            LOG.info(_LI("Using overridden vmware_host_version from config: "
+                         "%s"), version_str)
         else:
             version_str = self.session.vim.service_content.about.version
-            LOG.info(_("Fetched VC server version: %s"), version_str)
+            LOG.info(_LI("Fetched VC server version: %s"), version_str)
         # convert version_str to LooseVersion and return
         version = None
         try:
             version = dist_version.LooseVersion(version_str)
         except Exception:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_("Version string '%s' is not parseable"),
+                LOG.exception(_LE("Version string '%s' is not parseable"),
                               version_str)
         return version
 
@@ -1902,9 +1907,9 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects)
         self._ds_sel = hub.DatastoreSelector(self.volumeops, self.session)
 
-        LOG.info(_("Successfully setup driver: %(driver)s for server: "
-                   "%(ip)s.") % {'driver': self.__class__.__name__,
-                                 'ip': self.configuration.vmware_host_ip})
+        LOG.info(_LI("Successfully setup driver: %(driver)s for server: "
+                     "%(ip)s.") % {'driver': self.__class__.__name__,
+                                   'ip': self.configuration.vmware_host_ip})
 
     def _get_volume_group_folder(self, datacenter):
         """Get volume group folder.
@@ -1950,7 +1955,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         (folder, summary) = self._get_folder_ds_summary(volume,
                                                         resource_pool,
                                                         datastores)
-        LOG.info(_("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") %
+        LOG.info(_LI("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") %
                  {'backing': backing, 'ds': summary, 'rp': resource_pool})
         # Relocate the backing to the datastore and folder
         self.volumeops.relocate_backing(backing, summary.datastore,
@@ -1998,7 +2003,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         # the size of the source volume to the volume size.
         if volume['size'] > src_vsize:
             self._extend_vmdk_virtual_disk(volume['name'], volume['size'])
-        LOG.info(_("Successfully created clone: %s.") % clone)
+        LOG.info(_LI("Successfully created clone: %s.") % clone)
 
     def _create_volume_from_snapshot(self, volume, snapshot):
         """Creates a volume from a snapshot.
@@ -2012,17 +2017,17 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         self._verify_volume_creation(volume)
         backing = self.volumeops.get_backing(snapshot['volume_name'])
         if not backing:
-            LOG.info(_("There is no backing for the snapshotted volume: "
-                       "%(snap)s. Not creating any backing for the "
-                       "volume: %(vol)s.") %
+            LOG.info(_LI("There is no backing for the snapshotted volume: "
+                         "%(snap)s. Not creating any backing for the "
+                         "volume: %(vol)s.") %
                      {'snap': snapshot['name'], 'vol': volume['name']})
             return
         snapshot_moref = self.volumeops.get_snapshot(backing,
                                                      snapshot['name'])
         if not snapshot_moref:
-            LOG.info(_("There is no snapshot point for the snapshotted "
-                       "volume: %(snap)s. Not creating any backing for "
-                       "the volume: %(vol)s.") %
+            LOG.info(_LI("There is no snapshot point for the snapshotted "
+                         "volume: %(snap)s. Not creating any backing for "
+                         "the volume: %(vol)s.") %
                      {'snap': snapshot['name'], 'vol': volume['name']})
             return
         clone_type = VMwareVcVmdkDriver._get_clone_type(volume)
@@ -2049,8 +2054,8 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
         self._verify_volume_creation(volume)
         backing = self.volumeops.get_backing(src_vref['name'])
         if not backing:
-            LOG.info(_("There is no backing for the source volume: %(src)s. "
-                       "Not creating any backing for volume: %(vol)s.") %
+            LOG.info(_LI("There is no backing for the source volume: %(src)s. "
+                         "Not creating any backing for volume: %(vol)s.") %
                      {'src': src_vref['name'], 'vol': volume['name']})
             return
         clone_type = VMwareVcVmdkDriver._get_clone_type(volume)
index 0c604501d1fd060fde7a6c33dbd453f87a0da674..35a3d281b9211f2cf5054f1779b51ba1ebb9a14d 100644 (file)
@@ -18,7 +18,7 @@ Utility functions for Image transfer.
 
 from eventlet import timeout
 
-from cinder.i18n import _
+from cinder.i18n import _LE, _LI
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.vmware import error_util
 from cinder.volume.drivers.vmware import io_util
@@ -79,7 +79,7 @@ def start_transfer(context, timeout_secs, read_file_handle, max_data_size,
         write_thread.stop()
 
         # Log and raise the exception.
-        LOG.exception(_("Error occurred during image transfer."))
+        LOG.exception(_LE("Error occurred during image transfer."))
         if isinstance(exc, error_util.ImageTransferException):
             raise
         raise error_util.ImageTransferException(exc)
@@ -107,7 +107,8 @@ def fetch_flat_image(context, timeout_secs, image_service, image_id, **kwargs):
                                                file_size)
     start_transfer(context, timeout_secs, read_handle, file_size,
                    write_file_handle=write_handle)
-    LOG.info(_("Downloaded image: %s from glance image server.") % image_id)
+    LOG.info(_LI("Downloaded image: %s from glance "
+                 "image server.") % image_id)
 
 
 def fetch_stream_optimized_image(context, timeout_secs, image_service,
@@ -126,7 +127,8 @@ def fetch_stream_optimized_image(context, timeout_secs, image_service,
                                                file_size)
     start_transfer(context, timeout_secs, read_handle, file_size,
                    write_file_handle=write_handle)
-    LOG.info(_("Downloaded image: %s from glance image server.") % image_id)
+    LOG.info(_LI("Downloaded image: %s from glance image "
+                 "server.") % image_id)
 
 
 def upload_image(context, timeout_secs, image_service, image_id, owner_id,
@@ -158,7 +160,7 @@ def upload_image(context, timeout_secs, image_service, image_id, owner_id,
     start_transfer(context, timeout_secs, read_handle, file_size,
                    image_service=image_service, image_id=image_id,
                    image_meta=image_metadata)
-    LOG.info(_("Uploaded image: %s to the Glance image server.") % image_id)
+    LOG.info(_LI("Uploaded image: %s to the Glance image server.") % image_id)
 
 
 def download_stream_optimized_disk(
index 6728913d61a7cfbd38e114156ab2f285d2f29c28..1cbb143f059eb3e6163621f10db3db2c5eccbdcc 100644 (file)
@@ -22,7 +22,7 @@ import os
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.windows import constants
 
@@ -270,8 +270,8 @@ class WindowsUtils(object):
                 LOG.error(err_msg)
                 raise exception.VolumeBackendAPIException(data=err_msg)
             else:
-                LOG.info(_('Ignored target creation error "%s"'
-                           ' while ensuring export'), exc)
+                LOG.info(_LI('Ignored target creation error "%s"'
+                             ' while ensuring export'), exc)
 
     def remove_iscsi_target(self, target_name):
         """Removes ISCSI target."""
index 4a10ea75bd98d6b6b4195ca9f7c51879730eff5a..baa66bee1f149faf64835a26c6c517b11faa7495 100644 (file)
@@ -26,7 +26,7 @@ from lxml import etree
 from oslo.config import cfg
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 
@@ -462,16 +462,16 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         cg_name = self._get_volume_cg_name(volume_name)
         if not cg_name:
             # If the volume isn't present, then don't attempt to delete
-            LOG.warning(_("snapshot: original volume %s not found, "
-                        "skipping delete operation")
+            LOG.warning(_LW("snapshot: original volume %s not found, "
+                            "skipping delete operation")
                         % snapshot['volume_name'])
             return True
 
         snap_id = self._get_snap_id(cg_name, snapshot['name'])
         if not snap_id:
             # If the snapshot isn't present, then don't attempt to delete
-            LOG.warning(_("snapshot: snapshot %s not found, "
-                        "skipping delete operation")
+            LOG.warning(_LW("snapshot: snapshot %s not found, "
+                            "skipping delete operation")
                         % snapshot['name'])
             return True
 
index 48aa1400a07b2a91a61c914aecfa4c67e6214edf..6672469549c7a2671f1b10b32b4c15821e588abe 100644 (file)
@@ -21,7 +21,7 @@ import StringIO
 import time
 import urllib2
 
-from cinder.i18n import _, _LE
+from cinder.i18n import _LE, _LI
 from cinder.openstack.common import log
 
 LOG = log.getLogger(__name__)
@@ -176,7 +176,7 @@ class RestClientURL(object):
                 self.headers['x-auth-session'] = \
                     result.get_header('x-auth-session')
                 self.do_logout = True
-                LOG.info(_('ZFSSA version: %s') %
+                LOG.info(_LI('ZFSSA version: %s') %
                          result.get_header('x-zfssa-version'))
 
             elif result.status == httplib.NOT_FOUND:
index e64d0b59d4514c1b80477cf8eca7d371c05d554c..903dd314bba38c32bda6cd273d8729da3c4e573e 100644 (file)
@@ -140,7 +140,8 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
             self.db.volume_update(context, volume_id, update)
         except exception.CinderException:
             # Don't let resetting the status cause the rescheduling to fail.
-            LOG.exception(_("Volume %s: resetting 'creating' status failed."),
+            LOG.exception(_LE("Volume %s: resetting 'creating' "
+                              "status failed."),
                           volume_id)
 
     def revert(self, context, result, flow_failures, **kwargs):
@@ -159,7 +160,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
                 self._reschedule(context, cause, **kwargs)
                 self._post_reschedule(context, volume_id)
             except exception.CinderException:
-                LOG.exception(_("Volume %s: rescheduling failed"), volume_id)
+                LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id)
 
 
 class ExtractVolumeRefTask(flow_utils.CinderTask):
@@ -315,8 +316,8 @@ class NotifyVolumeActionTask(flow_utils.CinderTask):
             # If notification sending of volume database entry reading fails
             # then we shouldn't error out the whole workflow since this is
             # not always information that must be sent for volumes to operate
-            LOG.exception(_("Failed notifying about the volume"
-                            " action %(event)s for volume %(volume_id)s") %
+            LOG.exception(_LE("Failed notifying about the volume"
+                              " action %(event)s for volume %(volume_id)s") %
                           {'event': self.event_suffix,
                            'volume_id': volume_id})
 
@@ -414,9 +415,10 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
                                                   snapshot_ref['volume_id'])
             make_bootable = originating_vref.bootable
         except exception.CinderException as ex:
-            LOG.exception(_("Failed fetching snapshot %(snapshot_id)s bootable"
-                            " flag using the provided glance snapshot "
-                            "%(snapshot_ref_id)s volume reference") %
+            LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s "
+                              "bootable"
+                              " flag using the provided glance snapshot "
+                              "%(snapshot_ref_id)s volume reference") %
                           {'snapshot_id': snapshot_id,
                            'snapshot_ref_id': snapshot_ref['volume_id']})
             raise exception.MetadataUpdateFailure(reason=ex)
@@ -430,8 +432,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
             LOG.debug('Marking volume %s as bootable.', volume_id)
             self.db.volume_update(context, volume_id, {'bootable': True})
         except exception.CinderException as ex:
-            LOG.exception(_("Failed updating volume %(volume_id)s bootable"
-                            flag to true") % {'volume_id': volume_id})
+            LOG.exception(_LE("Failed updating volume %(volume_id)s bootable "
+                              "flag to true") % {'volume_id': volume_id})
             raise exception.MetadataUpdateFailure(reason=ex)
 
     def _create_from_source_volume(self, context, volume_ref,
@@ -582,8 +584,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
                 volume_ref = self.db.volume_update(context,
                                                    volume_ref['id'], updates)
             except exception.CinderException:
-                LOG.exception(_("Failed updating volume %(volume_id)s with "
-                                "%(updates)s") %
+                LOG.exception(_LE("Failed updating volume %(volume_id)s with "
+                                  "%(updates)s") %
                               {'volume_id': volume_ref['id'],
                                'updates': updates})
             self._copy_image_to_volume(context, volume_ref,
@@ -648,8 +650,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
             # If somehow the update failed we want to ensure that the
             # failure is logged (but not try rescheduling since the volume at
             # this point has been created).
-            LOG.exception(_("Failed updating model of volume %(volume_id)s"
-                            with creation provided model %(model)s") %
+            LOG.exception(_LE("Failed updating model of volume %(volume_id)s "
+                              "with creation provided model %(model)s") %
                           {'volume_id': volume_id, 'model': model_update})
             raise
 
@@ -691,9 +693,9 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
             # Now use the parent to notify.
             super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
         except exception.CinderException:
-            LOG.exception(_("Failed updating volume %(volume_id)s with "
-                            "%(update)s") % {'volume_id': volume_id,
-                                             'update': update})
+            LOG.exception(_LE("Failed updating volume %(volume_id)s with "
+                              "%(update)s") % {'volume_id': volume_id,
+                                               'update': update})
         # Even if the update fails, the volume is ready.
         msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
         LOG.info(msg % {
index 80627537c46c697bfafb64cae6cd4bf68a03c73d..ca39a1369bad8c414f3d1942acbd4b322a888542 100644 (file)
@@ -82,8 +82,8 @@ class ManageExistingTask(flow_utils.CinderTask):
             volume_ref = self.db.volume_update(context, volume_ref['id'],
                                                model_update)
         except exception.CinderException:
-            LOG.exception(_("Failed updating model of volume %(volume_id)s"
-                            " with creation provided model %(model)s") %
+            LOG.exception(_LE("Failed updating model of volume %(volume_id)s"
+                              " with creation provided model %(model)s") %
                           {'volume_id': volume_ref['id'],
                            'model': model_update})
             raise
index e90ae13b2de41d7d2a57dcfd2597fe3df0b799f6..3ab430ea95a3de4065fbae6ab2334f407bf6a552 100644 (file)
@@ -22,7 +22,7 @@ from oslo.db import exception as db_exc
 from cinder import context
 from cinder import db
 from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
 from cinder.openstack.common import log as logging
 from cinder.volume import volume_types
 
@@ -196,8 +196,8 @@ def associate_qos_with_type(context, specs_id, type_id):
             db.qos_specs_associate(context, specs_id, type_id)
     except db_exc.DBError as e:
         LOG.exception(_LE('DB error: %s') % e)
-        LOG.warn(_('Failed to associate qos specs '
-                   '%(id)s with type: %(vol_type_id)s') %
+        LOG.warn(_LW('Failed to associate qos specs '
+                     '%(id)s with type: %(vol_type_id)s') %
                  dict(id=specs_id, vol_type_id=type_id))
         raise exception.QoSSpecsAssociateFailed(specs_id=specs_id,
                                                 type_id=type_id)
@@ -210,8 +210,8 @@ def disassociate_qos_specs(context, specs_id, type_id):
         db.qos_specs_disassociate(context, specs_id, type_id)
     except db_exc.DBError as e:
         LOG.exception(_LE('DB error: %s') % e)
-        LOG.warn(_('Failed to disassociate qos specs '
-                   '%(id)s with type: %(vol_type_id)s') %
+        LOG.warn(_LW('Failed to disassociate qos specs '
+                     '%(id)s with type: %(vol_type_id)s') %
                  dict(id=specs_id, vol_type_id=type_id))
         raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
                                                    type_id=type_id)
@@ -224,7 +224,7 @@ def disassociate_all(context, specs_id):
         db.qos_specs_disassociate_all(context, specs_id)
     except db_exc.DBError as e:
         LOG.exception(_LE('DB error: %s') % e)
-        LOG.warn(_('Failed to disassociate qos specs %s.') % specs_id)
+        LOG.warn(_LW('Failed to disassociate qos specs %s.') % specs_id)
         raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
                                                    type_id=None)
 
index 9b7d915a64d0f8d7eff4fa5ec05c635dec471802..0098c0f38e72723ea74b4d1ae36ae3947d882ace 100644 (file)
@@ -26,7 +26,7 @@ from oslo.utils import units
 
 from cinder.brick.local_dev import lvm as brick_lvm
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI, _LW
 from cinder.openstack.common import log as logging
 from cinder import rpc
 from cinder import utils
@@ -242,7 +242,7 @@ def setup_blkio_cgroup(srcpath, dstpath, bps_limit, execute=utils.execute):
     try:
         execute('cgcreate', '-g', 'blkio:%s' % group_name, run_as_root=True)
     except processutils.ProcessExecutionError:
-        LOG.warn(_('Failed to create blkio cgroup'))
+        LOG.warn(_LW('Failed to create blkio cgroup'))
         return None
 
     try:
@@ -362,7 +362,7 @@ def clear_volume(volume_size, volume_path, volume_clear=None,
     if volume_clear_ionice is None:
         volume_clear_ionice = CONF.volume_clear_ionice
 
-    LOG.info(_("Performing secure delete on volume: %s") % volume_path)
+    LOG.info(_LI("Performing secure delete on volume: %s") % volume_path)
 
     if volume_clear == 'zero':
         return copy_volume('/dev/zero', volume_path, volume_clear_size,
@@ -387,7 +387,7 @@ def clear_volume(volume_size, volume_path, volume_clear=None,
     # some incredible event this is 0 (cirros image?) don't barf
     if duration < 1:
         duration = 1
-    LOG.info(_('Elapsed time for clear volume: %.2f sec') % duration)
+    LOG.info(_LI('Elapsed time for clear volume: %.2f sec') % duration)
 
 
 def supports_thin_provisioning():
index 72c4e965c702bcd5347b9276bdaeff0ddb0cddf9..c196ff1e51dc47748d3aff86cf8dce44d740f190 100644 (file)
@@ -139,8 +139,9 @@ def get_default_volume_type():
             # Couldn't find volume type with the name in default_volume_type
             # flag, record this issue and move on
             #TODO(zhiteng) consider add notification to warn admin
-            LOG.exception(_('Default volume type is not found, '
-                            'please check default_volume_type config: %s'), e)
+            LOG.exception(_LE('Default volume type is not found, '
+                              'please check default_volume_type '
+                              'config: %s'), e)
 
     return vol_type
 
index 676cf98e6948fa26d944e675b5e4a391292271dc..fa31c94755ae691f0c8d6999b9b143c00c0012bc 100644 (file)
@@ -217,7 +217,7 @@ class CiscoFCZoneDriver(FCZoneDriver):
                         msg = _("Exception: %s") % six.text_type(cisco_ex)
                         raise exception.FCZoneDriverException(msg)
                     except Exception as e:
-                        LOG.error(_("Exception: %s") % six.text_type(e))
+                        LOG.error(_LE("Exception: %s") % six.text_type(e))
                         msg = (_("Failed to add zoning configuration %s") %
                                six.text_type(e))
                         raise exception.FCZoneDriverException(msg)
index 1dc8c4d36a6ea8edd1a362e20f33534916b13361..dbe67ac56eb9a0104778497cf7d5264c0ce69aa4 100644 (file)
@@ -19,7 +19,7 @@ Utility functions related to the Zone Manager.
 """
 import logging
 
-from cinder.i18n import _, _LI
+from cinder.i18n import _LI, _LW
 from cinder.openstack.common import log
 from cinder.volume.configuration import Configuration
 from cinder.volume import manager
@@ -75,8 +75,8 @@ def AddFCZone(initialize_connection):
     def decorator(self, *args, **kwargs):
         conn_info = initialize_connection(self, *args, **kwargs)
         if not conn_info:
-            LOG.warn(_("Driver didn't return connection info, "
-                       "can't add zone."))
+            LOG.warn(_LW("Driver didn't return connection info, "
+                         "can't add zone."))
             return None
 
         vol_type = conn_info.get('driver_volume_type', None)
@@ -100,8 +100,8 @@ def RemoveFCZone(terminate_connection):
     def decorator(self, *args, **kwargs):
         conn_info = terminate_connection(self, *args, **kwargs)
         if not conn_info:
-            LOG.warn(_("Driver didn't return connection info from "
-                       "terminate_connection call."))
+            LOG.warn(_LW("Driver didn't return connection info from "
+                         "terminate_connection call."))
             return None
 
         vol_type = conn_info.get('driver_volume_type', None)