]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Remove use of deprecated LOG.warn
authorSean McGinnis <sean_mcginnis@dell.com>
Wed, 13 May 2015 14:30:20 +0000 (09:30 -0500)
committerSean McGinnis <sean_mcginnis@dell.com>
Wed, 13 May 2015 15:51:45 +0000 (10:51 -0500)
LOG.warn is deprecated and LOG.warning should be used.

This patch fixes up instances of LOG.warn usage and adds a
hacking check to make sure it doesn't creep back in.

See Logger.warning note here for background:
https://docs.python.org/3/library/logging.html

Also cleaned up some remaining instances where logging was
preformatting strings rather than passing in formatting
arguments to the logger to handle.

Change-Id: Id2e6cba489d8509601820b5aed83652f71be2bdc

14 files changed:
HACKING.rst
cinder/hacking/checks.py
cinder/tests/unit/image/fake.py
cinder/tests/unit/integrated/integrated_helpers.py
cinder/tests/unit/integrated/test_volumes.py
cinder/tests/unit/integrated/test_xml.py
cinder/tests/unit/test_hds_iscsi.py
cinder/volume/api.py
cinder/volume/drivers/emc/emc_vmax_utils.py
cinder/volume/flows/api/create_volume.py
cinder/volume/qos_specs.py
cinder/volume/targets/iet.py
cinder/volume/throttling.py
cinder/volume/utils.py

index 7acf18287c112b8d7aad144560b5de162f1438cc..d526dbef8044165b0b26006ecf1aca3e8bdf8e73 100644 (file)
@@ -23,6 +23,7 @@ Cinder Specific Commandments
 - [C304] Enforce no use of LOG.audit messages.  LOG.info should be used instead.
 - [C305] Prevent use of deprecated contextlib.nested.
 - [C306] timeutils.strtime() must not be used (deprecated).
+- [C307] LOG.warn is deprecated. Enforce use of LOG.warning.
 
 
 General
index c0930c17c69ca0c2ececa4d90c80047483418b87..831843de107c58492f13347f868ba010888acebd 100644 (file)
@@ -297,6 +297,12 @@ def check_timeutils_strtime(logical_line):
         yield(0, msg)
 
 
+def no_log_warn(logical_line):
+    msg = "C307: LOG.warn is deprecated, please use LOG.warning!"
+    if "LOG.warn(" in logical_line:
+        yield (0, msg)
+
+
 def factory(register):
     register(no_vi_headers)
     register(no_translate_debug_logs)
@@ -312,3 +318,4 @@ def factory(register):
     register(check_no_print_statements)
     register(check_no_log_audit)
     register(check_no_contextlib_nested)
+    register(no_log_warn)
index b3cf50cc9810769c857b9cdc4c79bcaead5a442a..b178326e2284268f82ec8e781012856c42a7426d 100644 (file)
@@ -163,8 +163,8 @@ class _FakeImageService(object):
         image = self.images.get(str(image_id))
         if image:
             return copy.deepcopy(image)
-        LOG.warn('Unable to find image id %s.  Have images: %s',
-                 image_id, self.images)
+        LOG.warning('Unable to find image id %s. Have images: %s',
+                    image_id, self.images)
         raise exception.ImageNotFound(image_id=image_id)
 
     def create(self, context, metadata, data=None):
index 17a9543a8419c0bff8222f989035c73abf2053eb..a3e7ff1c72e8c0fb35472b8c63cbfc3136df0fb9 100644 (file)
@@ -54,7 +54,7 @@ def generate_new_element(items, prefix, numeric=False):
             candidate = prefix + generate_random_alphanumeric(8)
         if candidate not in items:
             return candidate
-        LOG.debug("Random collision on %s" % candidate)
+        LOG.debug("Random collision on %s", candidate)
 
 
 class _IntegratedTestBase(test.TestCase):
@@ -85,7 +85,7 @@ class _IntegratedTestBase(test.TestCase):
         # FIXME(ja): this is not the auth url - this is the service url
         # FIXME(ja): this needs fixed in nova as well
         self.auth_url = 'http://%s:%s/v2' % (self.osapi.host, self.osapi.port)
-        LOG.warn(self.auth_url)
+        LOG.warning(self.auth_url)
 
     def _get_flags(self):
         """An opportunity to setup flags, before the services are started."""
@@ -115,7 +115,7 @@ class _IntegratedTestBase(test.TestCase):
         server = {}
 
         image = self.api.get_images()[0]
-        LOG.debug("Image: %s" % image)
+        LOG.debug("Image: %s", image)
 
         if 'imageRef' in image:
             image_href = image['imageRef']
@@ -128,7 +128,7 @@ class _IntegratedTestBase(test.TestCase):
 
         # Set a valid flavorId
         flavor = self.api.get_flavors()[0]
-        LOG.debug("Using flavor: %s" % flavor)
+        LOG.debug("Using flavor: %s", flavor)
         server['flavorRef'] = 'http://fake.server/%s' % flavor['id']
 
         # Set a valid server name
index caee2a7d53bdefb0f09aa2140b3de0b06b7fa899..a2cb3e7462a2d1ec151a8bf9c9172b5314eeb9e7 100644 (file)
@@ -36,7 +36,7 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
         self.osapi = service.WSGIService("osapi_volume")
         self.osapi.start()
         self.auth_url = 'http://%s:%s/v2' % (self.osapi.host, self.osapi.port)
-        LOG.warn(self.auth_url)
+        LOG.warning(self.auth_url)
 
     def _get_flags(self):
         f = super(VolumesTest, self)._get_flags()
@@ -48,13 +48,13 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
         """Simple check that listing volumes works."""
         volumes = self.api.get_volumes(False)
         for volume in volumes:
-            LOG.debug("volume: %s" % volume)
+            LOG.debug("volume: %s", volume)
 
     def test_get_volumes(self):
         """Simple check that listing volumes works."""
         volumes = self.api.get_volumes()
         for volume in volumes:
-            LOG.debug("volume: %s" % volume)
+            LOG.debug("volume: %s", volume)
 
     def _poll_while(self, volume_id, continue_states, max_retries=5):
         """Poll (briefly) while the state is in continue_states."""
@@ -67,7 +67,7 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
                 LOG.debug("Got 404, proceeding")
                 break
 
-            LOG.debug("Found %s" % found_volume)
+            LOG.debug("Found %s", found_volume)
 
             self.assertEqual(volume_id, found_volume['id'])
 
@@ -86,7 +86,7 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
 
         # Create volume
         created_volume = self.api.post_volume({'volume': {'size': 1}})
-        LOG.debug("created_volume: %s" % created_volume)
+        LOG.debug("created_volume: %s", created_volume)
         self.assertTrue(created_volume['id'])
         created_volume_id = created_volume['id']
 
@@ -114,12 +114,12 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
         # Should be gone
         self.assertFalse(found_volume)
 
-        LOG.debug("Logs: %s" % fake_driver.LoggingVolumeDriver.all_logs())
+        LOG.debug("Logs: %s", fake_driver.LoggingVolumeDriver.all_logs())
 
         create_actions = fake_driver.LoggingVolumeDriver.logs_like(
             'create_volume',
             id=created_volume_id)
-        LOG.debug("Create_Actions: %s" % create_actions)
+        LOG.debug("Create_Actions: %s", create_actions)
 
         self.assertEqual(1, len(create_actions))
         create_action = create_actions[0]
@@ -151,7 +151,7 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
         created_volume = self.api.post_volume(
             {'volume': {'size': 1,
                         'metadata': metadata}})
-        LOG.debug("created_volume: %s" % created_volume)
+        LOG.debug("created_volume: %s", created_volume)
         self.assertTrue(created_volume['id'])
         created_volume_id = created_volume['id']
 
@@ -168,7 +168,7 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
         created_volume = self.api.post_volume(
             {'volume': {'size': 1,
                         'availability_zone': availability_zone}})
-        LOG.debug("created_volume: %s" % created_volume)
+        LOG.debug("created_volume: %s", created_volume)
         self.assertTrue(created_volume['id'])
         created_volume_id = created_volume['id']
 
index e24957dc220545cd63bf830ba02a99491189086b..c238775895bb25fc4012f4d9de9072ce2309df4b 100644 (file)
@@ -44,6 +44,6 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
         response = self.api.api_request('/volumes', headers=headers,
                                         stream=True)
         data = response.raw
-        LOG.warn("data: %s" % data)
+        LOG.warning("data: %s", data)
         root = etree.parse(data).getroot()
         self.assertEqual(root.nsmap.get(None), common.XML_NS_V2)
index 24abcb4d314e43719cee65db7744256b6b789bbc..7377e4b44c22039377858e2cbd0cac4400845051 100644 (file)
@@ -104,24 +104,24 @@ class SimulatedHnasBackend(object):
         self.connections = []
 
     def deleteVolume(self, name):
-        LOG.info("delVolume: name %s" % name)
+        LOG.info("delVolume: name %s", name)
 
         volume = self.getVolume(name)
         if volume:
-            LOG.info("deleteVolume: deleted name %s provider %s"
-                     % (volume['name'], volume['provider_location']))
+            LOG.info("deleteVolume: deleted name %s provider %s",
+                     volume['name'], volume['provider_location'])
             self.volumes.remove(volume)
             return True
         else:
             return False
 
     def deleteVolumebyProvider(self, provider):
-        LOG.info("delVolumeP: provider %s" % provider)
+        LOG.info("delVolumeP: provider %s", provider)
 
         volume = self.getVolumebyProvider(provider)
         if volume:
-            LOG.info("deleteVolumeP: deleted name %s provider %s"
-                     % (volume['name'], volume['provider_location']))
+            LOG.info("deleteVolumeP: deleted name %s provider %s",
+                     volume['name'], volume['provider_location'])
             self.volumes.remove(volume)
             return True
         else:
@@ -131,13 +131,13 @@ class SimulatedHnasBackend(object):
         return self.volumes
 
     def getVolume(self, name):
-        LOG.info("getVolume: find by name %s" % name)
+        LOG.info("getVolume: find by name %s", name)
 
         if self.volumes:
             for volume in self.volumes:
                 if str(volume['name']) == name:
-                    LOG.info("getVolume: found name %s provider %s"
-                             % (volume['name'], volume['provider_location']))
+                    LOG.info("getVolume: found name %s provider %s",
+                             volume['name'], volume['provider_location'])
                     return volume
         else:
             LOG.info("getVolume: no volumes")
@@ -146,13 +146,13 @@ class SimulatedHnasBackend(object):
         return None
 
     def getVolumebyProvider(self, provider):
-        LOG.info("getVolumeP: find by provider %s" % provider)
+        LOG.info("getVolumeP: find by provider %s", provider)
 
         if self.volumes:
             for volume in self.volumes:
                 if str(volume['provider_location']) == provider:
-                    LOG.info("getVolumeP: found name %s provider %s"
-                             % (volume['name'], volume['provider_location']))
+                    LOG.info("getVolumeP: found name %s provider %s",
+                             volume['name'], volume['provider_location'])
                     return volume
         else:
             LOG.info("getVolumeP: no volumes")
@@ -161,8 +161,8 @@ class SimulatedHnasBackend(object):
         return None
 
     def createVolume(self, name, provider, sizeMiB, comment):
-        LOG.info("createVolume: name %s provider %s comment %s"
-                 % (name, provider, comment))
+        LOG.info("createVolume: name %s provider %s comment %s",
+                 name, provider, comment)
 
         new_vol = {'additionalStates': [],
                    'adminSpace': {'freeMiB': 0,
@@ -203,10 +203,10 @@ class SimulatedHnasBackend(object):
     def delete_lu(self, cmd, ip0, user, pw, hdp, lun):
         _out = ""
         id = "myID"
-        LOG.info("Delete_Lu: check lun %s id %s" % (lun, id))
+        LOG.info("Delete_Lu: check lun %s id %s", lun, id)
 
         if self.deleteVolumebyProvider(id + '.' + str(lun)):
-            LOG.warn("Delete_Lu: failed to delete lun %s id %s" % (lun, id))
+            LOG.warning("Delete_Lu: failed to delete lun %s id %s", lun, id)
         return _out
 
     def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
@@ -214,7 +214,7 @@ class SimulatedHnasBackend(object):
                 (self.start_lun, size))
 
         id = name
-        LOG.info("HNAS Create_Dup: %d" % self.start_lun)
+        LOG.info("HNAS Create_Dup: %d", self.start_lun)
         self.createVolume(name, id + '.' + str(self.start_lun), size,
                           "create-dup")
         self.start_lun += 1
@@ -231,7 +231,7 @@ class SimulatedHnasBackend(object):
         self.init_index += 1
         self.target_index += 1
         self.hlun += 1
-        LOG.debug("Created connection %d" % self.init_index)
+        LOG.debug("Created connection %d", self.init_index)
         self.connections.append(conn)
         return _out
 
@@ -246,11 +246,11 @@ class SimulatedHnasBackend(object):
         _out = ("LUN: %s successfully extended to %s MB" % (lu, size))
         id = name
         self.out = _out
-        LOG.info("extend_vol: lu: %s %d -> %s" % (lu, int(size), self.out))
+        LOG.info("extend_vol: lu: %s %d -> %s", lu, int(size), self.out)
         v = self.getVolumebyProvider(id + '.' + str(lu))
         if v:
             v['sizeMiB'] = size
-        LOG.info("extend_vol: out %s %s" % (self.out, self))
+        LOG.info("extend_vol: out %s %s", self.out, self)
         return _out
 
     def get_luns(self):
index 56379bfc80babc6343a14c4de09198d6cf8aec9a..3b14eec023492d8b3733284d97966d5d8fe66747 100644 (file)
@@ -712,10 +712,10 @@ class API(base.Base):
                     msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                               "%(s_size)sG snapshot (%(d_consumed)dG of "
                               "%(d_quota)dG already consumed).")
-                    LOG.warn(msg, {'s_pid': context.project_id,
-                                   's_size': volume['size'],
-                                   'd_consumed': _consumed(over),
-                                   'd_quota': quotas[over]})
+                    LOG.warning(msg, {'s_pid': context.project_id,
+                                      's_size': volume['size'],
+                                      'd_consumed': _consumed(over),
+                                      'd_quota': quotas[over]})
                     raise exception.VolumeSizeExceedsAvailableQuota(
                         requested=volume['size'],
                         consumed=_consumed('gigabytes'),
@@ -725,8 +725,8 @@ class API(base.Base):
                               "snapshot (%(d_consumed)d snapshots "
                               "already consumed).")
 
-                    LOG.warn(msg, {'s_pid': context.project_id,
-                                   'd_consumed': _consumed(over)})
+                    LOG.warning(msg, {'s_pid': context.project_id,
+                                      'd_consumed': _consumed(over)})
                     raise exception.SnapshotLimitExceeded(
                         allowed=quotas[over])
 
@@ -955,15 +955,15 @@ class API(base.Base):
         for k, v in metadata.iteritems():
             if len(k) == 0:
                 msg = _("Metadata property key blank.")
-                LOG.warn(msg)
+                LOG.warning(msg)
                 raise exception.InvalidVolumeMetadata(reason=msg)
             if len(k) > 255:
                 msg = _("Metadata property key greater than 255 characters.")
-                LOG.warn(msg)
+                LOG.warning(msg)
                 raise exception.InvalidVolumeMetadataSize(reason=msg)
             if len(v) > 255:
                 msg = _("Metadata property value greater than 255 characters.")
-                LOG.warn(msg)
+                LOG.warning(msg)
                 raise exception.InvalidVolumeMetadataSize(reason=msg)
 
     @wrap_check_policy
index 30bb8ad5ec2fbc149e2046f9100ca77f1ed70bf7..9de984fc454e99c9db788458cd5852eb14b20158 100644 (file)
@@ -1915,9 +1915,9 @@ class EMCVMAXUtils(object):
                       {'initiator': initiator, 'rc': rc, 'ret': ret})
             hardwareIdList = ret['HardwareID']
         else:
-            LOG.warn(_LW("CreateStorageHardwareID failed. initiator: "
-                         "%(initiator)s, rc=%(rc)d, ret=%(ret)s."),
-                     {'initiator': initiator, 'rc': rc, 'ret': ret})
+            LOG.warning(_LW("CreateStorageHardwareID failed. initiator: "
+                            "%(initiator)s, rc=%(rc)d, ret=%(ret)s."),
+                        {'initiator': initiator, 'rc': rc, 'ret': ret})
         return hardwareIdList
 
     def _get_hardware_type(
@@ -1935,5 +1935,5 @@ class EMCVMAXUtils(object):
             if 'iqn' in initiator.lower():
                 hardwareTypeId = 5
         if hardwareTypeId == 0:
-            LOG.warn(_LW("Cannot determine the hardware type."))
+            LOG.warning(_LW("Cannot determine the hardware type."))
         return hardwareTypeId
index 7d490066385f1b004b6e70f9278bed2f40bba4de..79852531104f7d251e96cff56a5f7e1ead0f1dea 100644 (file)
@@ -21,7 +21,7 @@ from taskflow.types import failure as ft
 
 from cinder import exception
 from cinder import flow_utils
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
 from cinder import policy
 from cinder import quota
 from cinder import utils
@@ -283,17 +283,17 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
         for (k, v) in metadata.iteritems():
             if len(k) == 0:
                 msg = _("Metadata property key blank")
-                LOG.warn(msg)
+                LOG.warning(msg)
                 raise exception.InvalidVolumeMetadata(reason=msg)
             if len(k) > 255:
                 msg = _("Metadata property key %s greater than 255 "
                         "characters") % k
-                LOG.warn(msg)
+                LOG.warning(msg)
                 raise exception.InvalidVolumeMetadataSize(reason=msg)
             if len(v) > 255:
                 msg = _("Metadata property key %s value greater than"
                         " 255 characters") % k
-                LOG.warn(msg)
+                LOG.warning(msg)
                 raise exception.InvalidVolumeMetadataSize(reason=msg)
 
     def _extract_availability_zone(self, availability_zone, snapshot,
@@ -329,7 +329,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
                 availability_zone = CONF.storage_availability_zone
         if availability_zone not in self.availability_zones:
             msg = _("Availability zone '%s' is invalid") % (availability_zone)
-            LOG.warn(msg)
+            LOG.warning(msg)
             raise exception.InvalidInput(reason=msg)
 
         # If the configuration only allows cloning to the same availability
@@ -386,9 +386,9 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
                 current_volume_type_id = volume_type.get('id')
                 if (current_volume_type_id !=
                         snapshot['volume_type_id']):
-                    msg = _("Volume type will be changed to "
-                            "be the same as the source volume.")
-                    LOG.warn(msg)
+                    msg = _LW("Volume type will be changed to "
+                              "be the same as the source volume.")
+                    LOG.warning(msg)
             volume_type_id = snapshot['volume_type_id']
         else:
             volume_type_id = volume_type.get('id')
@@ -593,23 +593,23 @@ class QuotaReserveTask(flow_utils.CinderTask):
                 return False
 
             if _is_over('gigabytes'):
-                msg = _("Quota exceeded for %(s_pid)s, tried to create "
-                        "%(s_size)sG volume (%(d_consumed)dG "
-                        "of %(d_quota)dG already consumed)")
-                LOG.warn(msg % {'s_pid': context.project_id,
-                                's_size': size,
-                                'd_consumed': _consumed('gigabytes'),
-                                'd_quota': quotas['gigabytes']})
+                msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+                          "%(s_size)sG volume (%(d_consumed)dG "
+                          "of %(d_quota)dG already consumed)")
+                LOG.warning(msg, {'s_pid': context.project_id,
+                                  's_size': size,
+                                  'd_consumed': _consumed('gigabytes'),
+                                  'd_quota': quotas['gigabytes']})
                 raise exception.VolumeSizeExceedsAvailableQuota(
                     requested=size,
                     consumed=_consumed('gigabytes'),
                     quota=quotas['gigabytes'])
             elif _is_over('volumes'):
-                msg = _("Quota exceeded for %(s_pid)s, tried to create "
-                        "volume (%(d_consumed)d volumes "
-                        "already consumed)")
-                LOG.warn(msg % {'s_pid': context.project_id,
-                                'd_consumed': _consumed('volumes')})
+                msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+                          "volume (%(d_consumed)d volumes "
+                          "already consumed)")
+                LOG.warning(msg, {'s_pid': context.project_id,
+                                  'd_consumed': _consumed('volumes')})
                 raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
             else:
                 # If nothing was reraised, ensure we reraise the initial error
index fbe4fc26d5b3e31735ec7d92f19fb6449c67f956..ee0a438fcf0090012525d68709378c4f66a03ec1 100644 (file)
@@ -77,12 +77,12 @@ def create(context, name, specs=None):
 
     values = dict(name=name, qos_specs=specs)
 
-    LOG.debug("Dict for qos_specs: %s" % values)
+    LOG.debug("Dict for qos_specs: %s", values)
 
     try:
         qos_specs_ref = db.qos_specs_create(context, values)
-    except db_exc.DBError as e:
-        LOG.exception(_LE('DB error: %s') % e)
+    except db_exc.DBError:
+        LOG.exception(_LE('DB error:'))
         raise exception.QoSSpecsCreateFailed(name=name,
                                              qos_specs=specs)
     return qos_specs_ref
@@ -102,8 +102,8 @@ def update(context, qos_specs_id, specs):
     LOG.debug('qos_specs.update(): specs %s' % specs)
     try:
         res = db.qos_specs_update(context, qos_specs_id, specs)
-    except db_exc.DBError as e:
-        LOG.exception(_LE('DB error: %s') % e)
+    except db_exc.DBError:
+        LOG.exception(_LE('DB error:'))
         raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id,
                                              qos_specs=specs)
 
@@ -152,11 +152,11 @@ def get_associations(context, specs_id):
     try:
         # query returns a list of volume types associated with qos specs
         associates = db.qos_specs_associations_get(context, specs_id)
-    except db_exc.DBError as e:
-        LOG.exception(_LE('DB error: %s') % e)
+    except db_exc.DBError:
+        LOG.exception(_LE('DB error:'))
         msg = _('Failed to get all associations of '
                 'qos specs %s') % specs_id
-        LOG.warn(msg)
+        LOG.warning(msg)
         raise exception.CinderException(message=msg)
 
     result = []
@@ -194,11 +194,11 @@ def associate_qos_with_type(context, specs_id, type_id):
                 raise exception.InvalidVolumeType(reason=msg)
         else:
             db.qos_specs_associate(context, specs_id, type_id)
-    except db_exc.DBError as e:
-        LOG.exception(_LE('DB error: %s') % e)
-        LOG.warn(_LW('Failed to associate qos specs '
-                     '%(id)s with type: %(vol_type_id)s') %
-                 dict(id=specs_id, vol_type_id=type_id))
+    except db_exc.DBError:
+        LOG.exception(_LE('DB error:'))
+        LOG.warning(_LW('Failed to associate qos specs '
+                        '%(id)s with type: %(vol_type_id)s'),
+                    dict(id=specs_id, vol_type_id=type_id))
         raise exception.QoSSpecsAssociateFailed(specs_id=specs_id,
                                                 type_id=type_id)
 
@@ -208,11 +208,11 @@ def disassociate_qos_specs(context, specs_id, type_id):
     try:
         get_qos_specs(context, specs_id)
         db.qos_specs_disassociate(context, specs_id, type_id)
-    except db_exc.DBError as e:
-        LOG.exception(_LE('DB error: %s') % e)
-        LOG.warn(_LW('Failed to disassociate qos specs '
-                     '%(id)s with type: %(vol_type_id)s') %
-                 dict(id=specs_id, vol_type_id=type_id))
+    except db_exc.DBError:
+        LOG.exception(_LE('DB error:'))
+        LOG.warning(_LW('Failed to disassociate qos specs '
+                        '%(id)s with type: %(vol_type_id)s'),
+                    dict(id=specs_id, vol_type_id=type_id))
         raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
                                                    type_id=type_id)
 
@@ -222,9 +222,9 @@ def disassociate_all(context, specs_id):
     try:
         get_qos_specs(context, specs_id)
         db.qos_specs_disassociate_all(context, specs_id)
-    except db_exc.DBError as e:
-        LOG.exception(_LE('DB error: %s') % e)
-        LOG.warn(_LW('Failed to disassociate qos specs %s.') % specs_id)
+    except db_exc.DBError:
+        LOG.exception(_LE('DB error:'))
+        LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id)
         raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
                                                    type_id=None)
 
@@ -239,7 +239,7 @@ def get_all_specs(context, inactive=False, search_opts=None):
     qos_specs = db.qos_specs_get_all(context, inactive)
 
     if search_opts:
-        LOG.debug("Searching by: %s" % search_opts)
+        LOG.debug("Searching by: %s", search_opts)
 
         def _check_specs_match(qos_specs, searchdict):
             for k, v in searchdict.iteritems():
index 9af7f0283f1278f8ba473d09841de13190ec3dc0..93e79bf7c9009160680b24637dc4896f621180ea 100644 (file)
@@ -110,9 +110,10 @@ class IetAdm(iscsi.ISCSITarget):
             # Missing config file is unxepected sisuation. But we will create
             # new config file during create_iscsi_target(). Just we warn the
             # operator here.
-            LOG.warn(_LW("Failed to find CHAP auth from config for "
-                         "%(vol_id)s. Config file %(conf)s does not exist."),
-                     {'vol_id': vol_id, 'conf': self.iet_conf})
+            LOG.warning(_LW("Failed to find CHAP auth from config for "
+                            "%(vol_id)s. Config file %(conf)s does not "
+                            "exist."),
+                        {'vol_id': vol_id, 'conf': self.iet_conf})
             return None
 
     def create_iscsi_target(self, name, tid, lun, path,
@@ -219,14 +220,14 @@ class IetAdm(iscsi.ISCSITarget):
                         iet_conf_text.writelines(new_iet_conf_txt)
             except Exception:
                 LOG.exception(_LE("Failed to update %(conf)s for volume id "
-                                  "%(vol_id) after removing iscsi target"),
+                                  "%(vol_id)s after removing iscsi target"),
                               {'conf': conf_file, 'vol_id': vol_id})
                 raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
         else:
-            LOG.warn(_LW("Failed to update %(conf)s for volume id %(vol_id) "
-                         "after removing iscsi target. "
-                         "%(conf)s does not exist."),
-                     {'conf': conf_file, 'vol_id': vol_id})
+            LOG.warning(_LW("Failed to update %(conf)s for volume id "
+                            "%(vol_id)s after removing iscsi target. "
+                            "%(conf)s does not exist."),
+                        {'conf': conf_file, 'vol_id': vol_id})
 
     def _find_sid_cid_for_target(self, tid, name, vol_id):
         """Find sid, cid for existing iscsi target"""
index ac50d94a4b21da67fa37deacc1e2c55426ffd692..860aba99bafa028fcd283f6949f9c742bc40cc26 100644 (file)
@@ -83,8 +83,8 @@ class BlkioCgroup(Throttle):
             utils.execute('cgset', '-r', 'blkio.throttle.%s_bps_device=%s %d'
                           % (rw, dev, bps), self.cgroup, run_as_root=True)
         except processutils.ProcessExecutionError:
-            LOG.warn(_LW('Failed to setup blkio cgroup to throttle the '
-                         'device \'%(device)s\'.'), {'device': dev})
+            LOG.warning(_LW('Failed to setup blkio cgroup to throttle the '
+                            'device \'%(device)s\'.'), {'device': dev})
 
     def _set_limits(self, rw, devs):
         total = sum(devs.itervalues())
index 405c8ef1610061f38b9acc3537ff6614906b11ff..bccb7714ed832ab5a63c1f2c40e58273453da5ac 100644 (file)
@@ -27,7 +27,7 @@ from oslo_utils import units
 
 from cinder.brick.local_dev import lvm as brick_lvm
 from cinder import exception
-from cinder.i18n import _, _LI
+from cinder.i18n import _LI, _LW
 from cinder import rpc
 from cinder import utils
 from cinder.volume import throttling
@@ -252,11 +252,10 @@ def _calculate_count(size_in_m, blocksize):
             raise ValueError
         bs = strutils.string_to_bytes('%sB' % blocksize)
     except ValueError:
-        msg = (_("Incorrect value error: %(blocksize)s, "
-                 "it may indicate that \'volume_dd_blocksize\' "
-                 "was configured incorrectly. Fall back to default.")
-               % {'blocksize': blocksize})
-        LOG.warn(msg)
+        LOG.warning(_LW("Incorrect value error: %(blocksize)s, "
+                        "it may indicate that \'volume_dd_blocksize\' "
+                        "was configured incorrectly. Fall back to default."),
+                    {'blocksize': blocksize})
         # Fall back to default blocksize
         CONF.clear_override('volume_dd_blocksize')
         blocksize = CONF.volume_dd_blocksize
@@ -315,14 +314,14 @@ def _copy_volume(prefix, srcstr, deststr, size_in_m, blocksize, sync=False,
     if duration < 1:
         duration = 1
     mbps = (size_in_m / duration)
-    mesg = ("Volume copy details: src %(src)s, dest %(dest)s, "
-            "size %(sz).2f MB, duration %(duration).2f sec")
-    LOG.debug(mesg % {"src": srcstr,
-                      "dest": deststr,
-                      "sz": size_in_m,
-                      "duration": duration})
-    mesg = _("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s")
-    LOG.info(mesg % {'size_in_m': size_in_m, 'mbps': mbps})
+    LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, "
+              "size %(sz).2f MB, duration %(duration).2f sec",
+              {"src": srcstr,
+               "dest": deststr,
+               "sz": size_in_m,
+               "duration": duration})
+    LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"),
+             {'size_in_m': size_in_m, 'mbps': mbps})
 
 
 def copy_volume(srcstr, deststr, size_in_m, blocksize, sync=False,
@@ -351,7 +350,7 @@ def clear_volume(volume_size, volume_path, volume_clear=None,
     if volume_clear_ionice is None:
         volume_clear_ionice = CONF.volume_clear_ionice
 
-    LOG.info(_LI("Performing secure delete on volume: %s") % volume_path)
+    LOG.info(_LI("Performing secure delete on volume: %s"), volume_path)
 
     if volume_clear == 'zero':
         return copy_volume('/dev/zero', volume_path, volume_clear_size,
@@ -377,7 +376,7 @@ def clear_volume(volume_size, volume_path, volume_clear=None,
     # some incredible event this is 0 (cirros image?) don't barf
     if duration < 1:
         duration = 1
-    LOG.info(_LI('Elapsed time for clear volume: %.2f sec') % duration)
+    LOG.info(_LI('Elapsed time for clear volume: %.2f sec'), duration)
 
 
 def supports_thin_provisioning():