LOG.warn is deprecated and LOG.warning should be used.
This patch fixes up instances of LOG.warn usage and adds a
hacking check to make sure it doesn't creep back in.
See Logger.warning note here for background:
https://docs.python.org/3/library/logging.html
Also cleaned up some remaining instances where logging was
preformatting strings rather than passing in formatting
arguments to the logger to handle.
Change-Id: Id2e6cba489d8509601820b5aed83652f71be2bdc
- [C304] Enforce no use of LOG.audit messages. LOG.info should be used instead.
- [C305] Prevent use of deprecated contextlib.nested.
- [C306] timeutils.strtime() must not be used (deprecated).
+- [C307] LOG.warn is deprecated. Enforce use of LOG.warning.
General
yield(0, msg)
+def no_log_warn(logical_line):
+ msg = "C307: LOG.warn is deprecated, please use LOG.warning!"
+ if "LOG.warn(" in logical_line:
+ yield (0, msg)
+
+
def factory(register):
register(no_vi_headers)
register(no_translate_debug_logs)
register(check_no_print_statements)
register(check_no_log_audit)
register(check_no_contextlib_nested)
+ register(no_log_warn)
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
- LOG.warn('Unable to find image id %s. Have images: %s',
- image_id, self.images)
+ LOG.warning('Unable to find image id %s. Have images: %s',
+ image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
candidate = prefix + generate_random_alphanumeric(8)
if candidate not in items:
return candidate
- LOG.debug("Random collision on %s" % candidate)
+ LOG.debug("Random collision on %s", candidate)
class _IntegratedTestBase(test.TestCase):
# FIXME(ja): this is not the auth url - this is the service url
# FIXME(ja): this needs fixed in nova as well
self.auth_url = 'http://%s:%s/v2' % (self.osapi.host, self.osapi.port)
- LOG.warn(self.auth_url)
+ LOG.warning(self.auth_url)
def _get_flags(self):
"""An opportunity to setup flags, before the services are started."""
server = {}
image = self.api.get_images()[0]
- LOG.debug("Image: %s" % image)
+ LOG.debug("Image: %s", image)
if 'imageRef' in image:
image_href = image['imageRef']
# Set a valid flavorId
flavor = self.api.get_flavors()[0]
- LOG.debug("Using flavor: %s" % flavor)
+ LOG.debug("Using flavor: %s", flavor)
server['flavorRef'] = 'http://fake.server/%s' % flavor['id']
# Set a valid server name
self.osapi = service.WSGIService("osapi_volume")
self.osapi.start()
self.auth_url = 'http://%s:%s/v2' % (self.osapi.host, self.osapi.port)
- LOG.warn(self.auth_url)
+ LOG.warning(self.auth_url)
def _get_flags(self):
f = super(VolumesTest, self)._get_flags()
"""Simple check that listing volumes works."""
volumes = self.api.get_volumes(False)
for volume in volumes:
- LOG.debug("volume: %s" % volume)
+ LOG.debug("volume: %s", volume)
def test_get_volumes(self):
"""Simple check that listing volumes works."""
volumes = self.api.get_volumes()
for volume in volumes:
- LOG.debug("volume: %s" % volume)
+ LOG.debug("volume: %s", volume)
def _poll_while(self, volume_id, continue_states, max_retries=5):
"""Poll (briefly) while the state is in continue_states."""
LOG.debug("Got 404, proceeding")
break
- LOG.debug("Found %s" % found_volume)
+ LOG.debug("Found %s", found_volume)
self.assertEqual(volume_id, found_volume['id'])
# Create volume
created_volume = self.api.post_volume({'volume': {'size': 1}})
- LOG.debug("created_volume: %s" % created_volume)
+ LOG.debug("created_volume: %s", created_volume)
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Should be gone
self.assertFalse(found_volume)
- LOG.debug("Logs: %s" % fake_driver.LoggingVolumeDriver.all_logs())
+ LOG.debug("Logs: %s", fake_driver.LoggingVolumeDriver.all_logs())
create_actions = fake_driver.LoggingVolumeDriver.logs_like(
'create_volume',
id=created_volume_id)
- LOG.debug("Create_Actions: %s" % create_actions)
+ LOG.debug("Create_Actions: %s", create_actions)
self.assertEqual(1, len(create_actions))
create_action = create_actions[0]
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'metadata': metadata}})
- LOG.debug("created_volume: %s" % created_volume)
+ LOG.debug("created_volume: %s", created_volume)
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'availability_zone': availability_zone}})
- LOG.debug("created_volume: %s" % created_volume)
+ LOG.debug("created_volume: %s", created_volume)
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
response = self.api.api_request('/volumes', headers=headers,
stream=True)
data = response.raw
- LOG.warn("data: %s" % data)
+ LOG.warning("data: %s", data)
root = etree.parse(data).getroot()
self.assertEqual(root.nsmap.get(None), common.XML_NS_V2)
self.connections = []
def deleteVolume(self, name):
- LOG.info("delVolume: name %s" % name)
+ LOG.info("delVolume: name %s", name)
volume = self.getVolume(name)
if volume:
- LOG.info("deleteVolume: deleted name %s provider %s"
- % (volume['name'], volume['provider_location']))
+ LOG.info("deleteVolume: deleted name %s provider %s",
+ volume['name'], volume['provider_location'])
self.volumes.remove(volume)
return True
else:
return False
def deleteVolumebyProvider(self, provider):
- LOG.info("delVolumeP: provider %s" % provider)
+ LOG.info("delVolumeP: provider %s", provider)
volume = self.getVolumebyProvider(provider)
if volume:
- LOG.info("deleteVolumeP: deleted name %s provider %s"
- % (volume['name'], volume['provider_location']))
+ LOG.info("deleteVolumeP: deleted name %s provider %s",
+ volume['name'], volume['provider_location'])
self.volumes.remove(volume)
return True
else:
return self.volumes
def getVolume(self, name):
- LOG.info("getVolume: find by name %s" % name)
+ LOG.info("getVolume: find by name %s", name)
if self.volumes:
for volume in self.volumes:
if str(volume['name']) == name:
- LOG.info("getVolume: found name %s provider %s"
- % (volume['name'], volume['provider_location']))
+ LOG.info("getVolume: found name %s provider %s",
+ volume['name'], volume['provider_location'])
return volume
else:
LOG.info("getVolume: no volumes")
return None
def getVolumebyProvider(self, provider):
- LOG.info("getVolumeP: find by provider %s" % provider)
+ LOG.info("getVolumeP: find by provider %s", provider)
if self.volumes:
for volume in self.volumes:
if str(volume['provider_location']) == provider:
- LOG.info("getVolumeP: found name %s provider %s"
- % (volume['name'], volume['provider_location']))
+ LOG.info("getVolumeP: found name %s provider %s",
+ volume['name'], volume['provider_location'])
return volume
else:
LOG.info("getVolumeP: no volumes")
return None
def createVolume(self, name, provider, sizeMiB, comment):
- LOG.info("createVolume: name %s provider %s comment %s"
- % (name, provider, comment))
+ LOG.info("createVolume: name %s provider %s comment %s",
+ name, provider, comment)
new_vol = {'additionalStates': [],
'adminSpace': {'freeMiB': 0,
def delete_lu(self, cmd, ip0, user, pw, hdp, lun):
_out = ""
id = "myID"
- LOG.info("Delete_Lu: check lun %s id %s" % (lun, id))
+ LOG.info("Delete_Lu: check lun %s id %s", lun, id)
if self.deleteVolumebyProvider(id + '.' + str(lun)):
- LOG.warn("Delete_Lu: failed to delete lun %s id %s" % (lun, id))
+ LOG.warning("Delete_Lu: failed to delete lun %s id %s", lun, id)
return _out
def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
(self.start_lun, size))
id = name
- LOG.info("HNAS Create_Dup: %d" % self.start_lun)
+ LOG.info("HNAS Create_Dup: %d", self.start_lun)
self.createVolume(name, id + '.' + str(self.start_lun), size,
"create-dup")
self.start_lun += 1
self.init_index += 1
self.target_index += 1
self.hlun += 1
- LOG.debug("Created connection %d" % self.init_index)
+ LOG.debug("Created connection %d", self.init_index)
self.connections.append(conn)
return _out
_out = ("LUN: %s successfully extended to %s MB" % (lu, size))
id = name
self.out = _out
- LOG.info("extend_vol: lu: %s %d -> %s" % (lu, int(size), self.out))
+ LOG.info("extend_vol: lu: %s %d -> %s", lu, int(size), self.out)
v = self.getVolumebyProvider(id + '.' + str(lu))
if v:
v['sizeMiB'] = size
- LOG.info("extend_vol: out %s %s" % (self.out, self))
+ LOG.info("extend_vol: out %s %s", self.out, self)
return _out
def get_luns(self):
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
- LOG.warn(msg, {'s_pid': context.project_id,
- 's_size': volume['size'],
- 'd_consumed': _consumed(over),
- 'd_quota': quotas[over]})
+ LOG.warning(msg, {'s_pid': context.project_id,
+ 's_size': volume['size'],
+ 'd_consumed': _consumed(over),
+ 'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
- LOG.warn(msg, {'s_pid': context.project_id,
- 'd_consumed': _consumed(over)})
+ LOG.warning(msg, {'s_pid': context.project_id,
+ 'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank.")
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters.")
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters.")
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
{'initiator': initiator, 'rc': rc, 'ret': ret})
hardwareIdList = ret['HardwareID']
else:
- LOG.warn(_LW("CreateStorageHardwareID failed. initiator: "
- "%(initiator)s, rc=%(rc)d, ret=%(ret)s."),
- {'initiator': initiator, 'rc': rc, 'ret': ret})
+ LOG.warning(_LW("CreateStorageHardwareID failed. initiator: "
+ "%(initiator)s, rc=%(rc)d, ret=%(ret)s."),
+ {'initiator': initiator, 'rc': rc, 'ret': ret})
return hardwareIdList
def _get_hardware_type(
if 'iqn' in initiator.lower():
hardwareTypeId = 5
if hardwareTypeId == 0:
- LOG.warn(_LW("Cannot determine the hardware type."))
+ LOG.warning(_LW("Cannot determine the hardware type."))
return hardwareTypeId
from cinder import exception
from cinder import flow_utils
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
from cinder import policy
from cinder import quota
from cinder import utils
for (k, v) in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key %s greater than 255 "
"characters") % k
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property key %s value greater than"
" 255 characters") % k
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
def _extract_availability_zone(self, availability_zone, snapshot,
availability_zone = CONF.storage_availability_zone
if availability_zone not in self.availability_zones:
msg = _("Availability zone '%s' is invalid") % (availability_zone)
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.InvalidInput(reason=msg)
# If the configuration only allows cloning to the same availability
current_volume_type_id = volume_type.get('id')
if (current_volume_type_id !=
snapshot['volume_type_id']):
- msg = _("Volume type will be changed to "
- "be the same as the source volume.")
- LOG.warn(msg)
+ msg = _LW("Volume type will be changed to "
+ "be the same as the source volume.")
+ LOG.warning(msg)
volume_type_id = snapshot['volume_type_id']
else:
volume_type_id = volume_type.get('id')
return False
if _is_over('gigabytes'):
- msg = _("Quota exceeded for %(s_pid)s, tried to create "
- "%(s_size)sG volume (%(d_consumed)dG "
- "of %(d_quota)dG already consumed)")
- LOG.warn(msg % {'s_pid': context.project_id,
- 's_size': size,
- 'd_consumed': _consumed('gigabytes'),
- 'd_quota': quotas['gigabytes']})
+ msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+ "%(s_size)sG volume (%(d_consumed)dG "
+ "of %(d_quota)dG already consumed)")
+ LOG.warning(msg, {'s_pid': context.project_id,
+ 's_size': size,
+ 'd_consumed': _consumed('gigabytes'),
+ 'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif _is_over('volumes'):
- msg = _("Quota exceeded for %(s_pid)s, tried to create "
- "volume (%(d_consumed)d volumes "
- "already consumed)")
- LOG.warn(msg % {'s_pid': context.project_id,
- 'd_consumed': _consumed('volumes')})
+ msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+ "volume (%(d_consumed)d volumes "
+ "already consumed)")
+ LOG.warning(msg, {'s_pid': context.project_id,
+ 'd_consumed': _consumed('volumes')})
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
else:
# If nothing was reraised, ensure we reraise the initial error
values = dict(name=name, qos_specs=specs)
- LOG.debug("Dict for qos_specs: %s" % values)
+ LOG.debug("Dict for qos_specs: %s", values)
try:
qos_specs_ref = db.qos_specs_create(context, values)
- except db_exc.DBError as e:
- LOG.exception(_LE('DB error: %s') % e)
+ except db_exc.DBError:
+ LOG.exception(_LE('DB error:'))
raise exception.QoSSpecsCreateFailed(name=name,
qos_specs=specs)
return qos_specs_ref
LOG.debug('qos_specs.update(): specs %s' % specs)
try:
res = db.qos_specs_update(context, qos_specs_id, specs)
- except db_exc.DBError as e:
- LOG.exception(_LE('DB error: %s') % e)
+ except db_exc.DBError:
+ LOG.exception(_LE('DB error:'))
raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id,
qos_specs=specs)
try:
# query returns a list of volume types associated with qos specs
associates = db.qos_specs_associations_get(context, specs_id)
- except db_exc.DBError as e:
- LOG.exception(_LE('DB error: %s') % e)
+ except db_exc.DBError:
+ LOG.exception(_LE('DB error:'))
msg = _('Failed to get all associations of '
'qos specs %s') % specs_id
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.CinderException(message=msg)
result = []
raise exception.InvalidVolumeType(reason=msg)
else:
db.qos_specs_associate(context, specs_id, type_id)
- except db_exc.DBError as e:
- LOG.exception(_LE('DB error: %s') % e)
- LOG.warn(_LW('Failed to associate qos specs '
- '%(id)s with type: %(vol_type_id)s') %
- dict(id=specs_id, vol_type_id=type_id))
+ except db_exc.DBError:
+ LOG.exception(_LE('DB error:'))
+ LOG.warning(_LW('Failed to associate qos specs '
+ '%(id)s with type: %(vol_type_id)s'),
+ dict(id=specs_id, vol_type_id=type_id))
raise exception.QoSSpecsAssociateFailed(specs_id=specs_id,
type_id=type_id)
try:
get_qos_specs(context, specs_id)
db.qos_specs_disassociate(context, specs_id, type_id)
- except db_exc.DBError as e:
- LOG.exception(_LE('DB error: %s') % e)
- LOG.warn(_LW('Failed to disassociate qos specs '
- '%(id)s with type: %(vol_type_id)s') %
- dict(id=specs_id, vol_type_id=type_id))
+ except db_exc.DBError:
+ LOG.exception(_LE('DB error:'))
+ LOG.warning(_LW('Failed to disassociate qos specs '
+ '%(id)s with type: %(vol_type_id)s'),
+ dict(id=specs_id, vol_type_id=type_id))
raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
type_id=type_id)
try:
get_qos_specs(context, specs_id)
db.qos_specs_disassociate_all(context, specs_id)
- except db_exc.DBError as e:
- LOG.exception(_LE('DB error: %s') % e)
- LOG.warn(_LW('Failed to disassociate qos specs %s.') % specs_id)
+ except db_exc.DBError:
+ LOG.exception(_LE('DB error:'))
+ LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id)
raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
type_id=None)
qos_specs = db.qos_specs_get_all(context, inactive)
if search_opts:
- LOG.debug("Searching by: %s" % search_opts)
+ LOG.debug("Searching by: %s", search_opts)
def _check_specs_match(qos_specs, searchdict):
for k, v in searchdict.iteritems():
# Missing config file is unxepected sisuation. But we will create
# new config file during create_iscsi_target(). Just we warn the
# operator here.
- LOG.warn(_LW("Failed to find CHAP auth from config for "
- "%(vol_id)s. Config file %(conf)s does not exist."),
- {'vol_id': vol_id, 'conf': self.iet_conf})
+ LOG.warning(_LW("Failed to find CHAP auth from config for "
+ "%(vol_id)s. Config file %(conf)s does not "
+ "exist."),
+ {'vol_id': vol_id, 'conf': self.iet_conf})
return None
def create_iscsi_target(self, name, tid, lun, path,
iet_conf_text.writelines(new_iet_conf_txt)
except Exception:
LOG.exception(_LE("Failed to update %(conf)s for volume id "
- "%(vol_id) after removing iscsi target"),
+ "%(vol_id)s after removing iscsi target"),
{'conf': conf_file, 'vol_id': vol_id})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
else:
- LOG.warn(_LW("Failed to update %(conf)s for volume id %(vol_id) "
- "after removing iscsi target. "
- "%(conf)s does not exist."),
- {'conf': conf_file, 'vol_id': vol_id})
+ LOG.warning(_LW("Failed to update %(conf)s for volume id "
+ "%(vol_id)s after removing iscsi target. "
+ "%(conf)s does not exist."),
+ {'conf': conf_file, 'vol_id': vol_id})
def _find_sid_cid_for_target(self, tid, name, vol_id):
"""Find sid, cid for existing iscsi target"""
utils.execute('cgset', '-r', 'blkio.throttle.%s_bps_device=%s %d'
% (rw, dev, bps), self.cgroup, run_as_root=True)
except processutils.ProcessExecutionError:
- LOG.warn(_LW('Failed to setup blkio cgroup to throttle the '
- 'device \'%(device)s\'.'), {'device': dev})
+ LOG.warning(_LW('Failed to setup blkio cgroup to throttle the '
+ 'device \'%(device)s\'.'), {'device': dev})
def _set_limits(self, rw, devs):
total = sum(devs.itervalues())
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import exception
-from cinder.i18n import _, _LI
+from cinder.i18n import _LI, _LW
from cinder import rpc
from cinder import utils
from cinder.volume import throttling
raise ValueError
bs = strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
- msg = (_("Incorrect value error: %(blocksize)s, "
- "it may indicate that \'volume_dd_blocksize\' "
- "was configured incorrectly. Fall back to default.")
- % {'blocksize': blocksize})
- LOG.warn(msg)
+ LOG.warning(_LW("Incorrect value error: %(blocksize)s, "
+ "it may indicate that \'volume_dd_blocksize\' "
+ "was configured incorrectly. Fall back to default."),
+ {'blocksize': blocksize})
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
if duration < 1:
duration = 1
mbps = (size_in_m / duration)
- mesg = ("Volume copy details: src %(src)s, dest %(dest)s, "
- "size %(sz).2f MB, duration %(duration).2f sec")
- LOG.debug(mesg % {"src": srcstr,
- "dest": deststr,
- "sz": size_in_m,
- "duration": duration})
- mesg = _("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s")
- LOG.info(mesg % {'size_in_m': size_in_m, 'mbps': mbps})
+ LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, "
+ "size %(sz).2f MB, duration %(duration).2f sec",
+ {"src": srcstr,
+ "dest": deststr,
+ "sz": size_in_m,
+ "duration": duration})
+ LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"),
+ {'size_in_m': size_in_m, 'mbps': mbps})
def copy_volume(srcstr, deststr, size_in_m, blocksize, sync=False,
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
- LOG.info(_LI("Performing secure delete on volume: %s") % volume_path)
+ LOG.info(_LI("Performing secure delete on volume: %s"), volume_path)
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
- LOG.info(_LI('Elapsed time for clear volume: %.2f sec') % duration)
+ LOG.info(_LI('Elapsed time for clear volume: %.2f sec'), duration)
def supports_thin_provisioning():