# TODO(smcginnis): The following is temporary as a series
# of patches are done to address these issues. It should be
# removed completely when bug 1433216 is closed.
- ignore_dirs = [
- "cinder/openstack",
- "cinder/volume"]
+ ignore_dirs = ["cinder/openstack"]
for directory in ignore_dirs:
if directory in filename:
return
self._driver._refresh_mounts()
self.assertTrue(mock_unmount_shares.called)
- self.assertTrue(mock_logger.warn.called)
+ self.assertTrue(mock_logger.warning.called)
self.assertTrue(mock_ensure_shares_mounted.called)
mock_unmount_shares.reset_mock()
mock_ensure_shares_mounted.reset_mock()
mock_logger.reset_mock()
- mock_logger.warn.reset_mock()
+ mock_logger.warning.reset_mock()
mock_stderr = _("umount: <mnt_path>: some other error")
mock_unmount_shares.side_effect = \
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_do_create_volume'):
self._driver.create_volume(FakeVolume(host, 1))
- warn_msg = 'Extra spec netapp:raid_type is obsolete. ' \
- 'Use netapp_raid_type instead.'
- utils.LOG.warning.assert_called_once_with(warn_msg)
+ warn_msg = ('Extra spec %(old)s is obsolete. Use %(new)s '
+ 'instead.')
+ utils.LOG.warning.assert_called_once_with(
+ warn_msg, {'new': 'netapp_raid_type',
+ 'old': 'netapp:raid_type'})
@mock.patch.object(utils, 'LOG', mock.Mock())
def test_create_volume_deprecated_extra_spec(self):
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_do_create_volume'):
self._driver.create_volume(FakeVolume(host, 1))
- warn_msg = 'Extra spec netapp_thick_provisioned is ' \
- 'deprecated. Use netapp_thin_provisioned instead.'
- utils.LOG.warning.assert_called_once_with(warn_msg)
+ warn_msg = ('Extra spec %(old)s is deprecated. Use %(new)s '
+ 'instead.')
+ utils.LOG.warning.assert_called_once_with(
+ warn_msg, {'new': 'netapp_thin_provisioned',
+ 'old': 'netapp_thick_provisioned'})
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv._set_rw_permissions(self.TEST_FILE_NAME)
- self.assertFalse(LOG.warn.called)
+ self.assertFalse(LOG.warning.called)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
drv._set_rw_permissions(self.TEST_FILE_NAME)
- self.assertTrue(LOG.warn.called)
- warn_msg = "%s is being set with open permissions: ugo+rw" % \
- self.TEST_FILE_NAME
- LOG.warn.assert_called_once_with(warn_msg)
+ self.assertTrue(LOG.warning.called)
+ warn_msg = "%(path)s is being set with open permissions: %(perm)s"
+ LOG.warning.assert_called_once_with(
+ warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'})
@mock.patch('os.path.join')
@mock.patch('os.path.isfile', return_value=False)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
- self.assertTrue(LOG.warn.called)
+ self.assertTrue(LOG.warning.called)
def test_secure_file_operations_enabled_true(self):
"""Test nas_secure_file_operations = 'true'
self.assertEqual('true', drv.configuration.nas_secure_file_operations)
self.assertEqual('true', drv.configuration.nas_secure_file_permissions)
- self.assertFalse(LOG.warn.called)
+ self.assertFalse(LOG.warning.called)
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_false(self, LOG):
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
- self.assertTrue(LOG.warn.called)
+ self.assertTrue(LOG.warning.called)
def test_set_nas_security_options_exception_if_no_mounted_shares(self):
"""Ensure proper exception is raised if there are no mounted shares."""
mock_execute.assert_has_calls([mkdir_call, mount_call],
any_order=False)
- mock_LOG.warn.assert_called_once_with('%s is already mounted',
- self.TEST_QUOBYTE_VOLUME)
+ mock_LOG.warning.assert_called_once_with('%s is already mounted',
+ self.TEST_QUOBYTE_VOLUME)
def test_mount_quobyte_should_reraise_already_mounted_error(self):
"""Same as
'id': uuid.uuid4(),
'host': 'hostname@backend#vol1'})
- warn_msg = 'Extra spec netapp:raid_type is obsolete. ' \
- 'Use netapp_raid_type instead.'
- na_utils.LOG.warning.assert_called_once_with(warn_msg)
+ warn_msg = 'Extra spec %(old)s is obsolete. Use %(new)s instead.'
+ na_utils.LOG.warning.assert_called_once_with(
+ warn_msg, {'new': 'netapp_raid_type', 'old': 'netapp:raid_type'})
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_create_lun', mock.Mock())
'id': uuid.uuid4(),
'host': 'hostname@backend#vol1'})
- warn_msg = 'Extra spec netapp_thick_provisioned is deprecated. ' \
- 'Use netapp_thin_provisioned instead.'
- na_utils.LOG.warning.assert_called_once_with(warn_msg)
+ warn_msg = "Extra spec %(old)s is deprecated. Use %(new)s instead."
+ na_utils.LOG.warning.assert_called_once_with(
+ warn_msg, {'new': 'netapp_thin_provisioned',
+ 'old': 'netapp_thick_provisioned'})
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup_san_configured(self, mock_check_flags):
self.terminate_connection(volume, properties, force=force)
except Exception as err:
err_msg = (_('Unable to terminate volume connection: %(err)s')
- % {'err': err})
+ % {'err': six.text_type(err)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
- LOG.debug(("volume %s: removing export"), volume['id'])
+ LOG.debug("volume %s: removing export", volume['id'])
self.remove_export(context, volume)
except Exception as ex:
LOG.exception(_LE("Error detaching volume %(volume)s, "
cgroup_name)
except processutils.ProcessExecutionError as err:
LOG.warning(_LW('Failed to activate volume copy throttling: '
- '%(err)s'), {'err': six.text_type(err)})
+ '%(err)s'), {'err': err})
throttling.Throttle.set_default(self._throttle)
def get_version(self):
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
- LOG.debug(('copy_data_between_volumes %(src)s -> %(dest)s.')
- % {'src': src_vol['name'], 'dest': dest_vol['name']})
+ LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.', {
+ 'src': src_vol['name'], 'dest': dest_vol['name']})
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
remote=dest_remote)
except Exception:
with excutils.save_and_reraise_exception():
- msg = _("Failed to attach volume %(vol)s")
- LOG.error(msg % {'vol': dest_vol['id']})
+ LOG.error(_LE("Failed to attach volume %(vol)s"),
+ {'vol': dest_vol['id']})
self.db.volume_update(context, dest_vol['id'],
{'status': dest_orig_status})
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
- msg = _("Failed to attach volume %(vol)s")
- LOG.error(msg % {'vol': src_vol['id']})
+ LOG.error(_LE("Failed to attach volume %(vol)s"),
+ {'vol': src_vol['id']})
self.db.volume_update(context, src_vol['id'],
{'status': src_orig_status})
self._detach_volume(context, dest_attach_info, dest_vol,
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
- msg = _("Failed to copy volume %(src)s to %(dest)s.")
- LOG.error(msg % {'src': src_vol['id'], 'dest': dest_vol['id']})
+ LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
+ {'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
self._detach_volume(context, dest_attach_info, dest_vol,
properties, force=copy_error,
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
- LOG.debug(('copy_image_to_volume %s.') % volume['name'])
+ LOG.debug('copy_image_to_volume %s.', volume['name'])
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
- LOG.debug(('copy_volume_to_image %s.') % volume['name'])
+ LOG.debug('copy_volume_to_image %s.', volume['name'])
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
# clean this up in the future.
model_update = None
try:
- LOG.debug(("Volume %s: creating export"), volume['id'])
+ LOG.debug("Volume %s: creating export", volume['id'])
model_update = self.create_export(context, volume)
if model_update:
volume = self.db.volume_update(context, volume['id'],
if model_update:
LOG.exception(_LE("Failed updating model of volume "
"%(volume_id)s with driver provided "
- "model %(model)s") %
+ "model %(model)s"),
{'volume_id': volume['id'],
'model': model_update})
raise exception.ExportFailure(reason=ex)
except Exception as err:
try:
err_msg = (_('Unable to fetch connection information from '
- 'backend: %(err)s') % {'err': err})
+ 'backend: %(err)s') %
+ {'err': six.text_type(err)})
LOG.error(err_msg)
LOG.debug("Cleaning up failed connect initialization.")
self.remove_export(context, volume)
except Exception as ex:
ex_msg = (_('Error encountered during cleanup '
- 'of a failed attach: %(ex)s') % {'ex': ex})
+ 'of a failed attach: %(ex)s') %
+ {'ex': six.text_type(ex)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=ex_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
- LOG.debug(('Creating a new backup for volume %s.') %
- volume['name'])
+ LOG.debug('Creating a new backup for volume %s.', volume['name'])
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug(('Restoring backup %(backup)s to '
- 'volume %(volume)s.') %
+ 'volume %(volume)s.'),
{'backup': backup['id'],
'volume': volume['name']})
def _do_iscsi_discovery(self, volume):
# TODO(justinsb): Deprecate discovery and use stored info
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
- LOG.warn(_LW("ISCSI provider_location not "
- "stored, using discovery"))
+ LOG.warning(_LW("ISCSI provider_location not "
+ "stored, using discovery"))
volume_name = volume['name']
volume['host'].split('@')[0],
run_as_root=True)
except processutils.ProcessExecutionError as ex:
- LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
+ LOG.error(_LE("ISCSI discovery attempt failed for:%s"),
volume['host'].split('@')[0])
- LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr)
+ LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
return None
for target in out.splitlines():
(volume['name']))
raise exception.InvalidVolume(reason=msg)
- LOG.debug("ISCSI Discovery: Found %s" % (location))
+ LOG.debug("ISCSI Discovery: Found %s", location)
properties['target_discovered'] = True
results = location.split(" ")
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
- LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
- (iscsi_command, out, err))
+ LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
+ {'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
- LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
- (iscsi_command, out, err))
+ LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
+ {'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
# iSCSI drivers require the initiator information
required = 'initiator'
if required not in connector:
- err_msg = (_LE('The volume driver requires %(data)s '
- 'in the connector.'), {'data': required})
- LOG.error(*err_msg)
+ LOG.error(_LE('The volume driver requires %(data)s '
+ 'in the connector.'), {'data': required})
raise exception.InvalidConnectorException(missing=required)
def terminate_connection(self, volume, connector, **kwargs):
def validate_connector_has_setting(connector, setting):
"""Test for non-empty setting in connector."""
if setting not in connector or not connector[setting]:
- msg = (_LE(
+ LOG.error(_LE(
"FibreChannelDriver validate_connector failed. "
"No '%(setting)s'. Make sure HBA state is Online."),
{'setting': setting})
- LOG.error(*msg)
raise exception.InvalidConnectorException(missing=setting)
def get_volume_stats(self, refresh=False):
def create_volume(self, volume):
device = self.find_appropriate_size_device(volume['size'])
- LOG.info("Create %s on %s" % (volume['name'], device))
+ LOG.info(_LI("Create %(volume)s on %(device)s"),
+ {"volume": volume['name'], "device": device})
return {
'provider_location': device,
}
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
- LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
+ LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
device = self.find_appropriate_size_device(src_vref['size'])
volutils.copy_volume(
self.local_path(src_vref), device,
from oslo_utils import excutils
from oslo_utils import units
import requests
+import six
from cinder import exception
-from cinder.i18n import _, _LE, _LW
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import versionutils
from cinder import utils
from cinder.volume.drivers.san import san
try:
self._issue_api_request('volumes', 'delete', volume['id'])
except exception.NotFound:
- msg = _("Tried to delete volume %s, but it was not found in the "
- "Datera cluster. Continuing with delete.")
- LOG.info(msg, volume['id'])
+ LOG.info(_LI("Tried to delete volume %s, but it was not found in "
+ "the Datera cluster. Continuing with delete."),
+ volume['id'])
def _do_export(self, context, volume):
"""Gets the associated account, retrieves CHAP info and updates."""
self._issue_api_request('volumes', 'delete', resource=volume['id'],
action='export')
except exception.NotFound:
- msg = _("Tried to delete export for volume %s, but it was not "
- "found in the Datera cluster. Continuing with volume "
- "detach")
- LOG.info(msg, volume['id'])
+ LOG.info(_LI("Tried to delete export for volume %s, but it was "
+ "not found in the Datera cluster. Continuing with "
+ "volume detach"), volume['id'])
def delete_snapshot(self, snapshot):
try:
self._issue_api_request('snapshots', 'delete', snapshot['id'])
except exception.NotFound:
- msg = _("Tried to delete snapshot %s, but was not found in Datera "
- "cluster. Continuing with delete.")
- LOG.info(msg, snapshot['id'])
+ LOG.info(_LI("Tried to delete snapshot %s, but was not found in "
+ "Datera cluster. Continuing with delete."),
+ snapshot['id'])
def create_snapshot(self, snapshot):
body = {
try:
self._update_cluster_stats()
except exception.DateraAPIException:
- LOG.error('Failed to get updated stats from Datera cluster.')
+ LOG.error(_LE('Failed to get updated stats from Datera '
+ 'cluster.'))
pass
return self.cluster_stats
verify=False, cert=cert_data)
except requests.exceptions.RequestException as ex:
msg = _('Failed to make a request to Datera cluster endpoint due '
- 'to the following reason: %s') % ex.message
+ 'to the following reason: %s') % six.text_type(ex.message)
LOG.error(msg)
raise exception.DateraAPIException(msg)
conn_info = self.cli.initialize_connection(volume,
connector)
LOG.debug("Exit initialize_connection"
- " - Returning FC connection info: %(conn_info)s."
- % {'conn_info': conn_info})
+ " - Returning FC connection info: %(conn_info)s.",
+ {'conn_info': conn_info})
return conn_info
@zm_utils.RemoveFCZone
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)
LOG.debug("Exit terminate_connection"
- " - Returning FC connection info: %(conn_info)s."
- % {'conn_info': conn_info})
+ " - Returning FC connection info: %(conn_info)s.",
+ {'conn_info': conn_info})
return conn_info
def get_volume_stats(self, refresh=False):
'id':lun_id
}
"""
- LOG.debug("Reference lun id %s." % existing_ref['id'])
+ LOG.debug("Reference lun id %s.", existing_ref['id'])
self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
'id':lun_id
}
"""
- LOG.debug("Reference lun id %s." % existing_ref['id'])
+ LOG.debug("Reference lun id %s.", existing_ref['id'])
self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
device_number = device_info['hostlunid']
if device_number is None:
LOG.info(_LI("Volume %s is not mapped. No volume to unmap."),
- (volumename))
+ volumename)
return
vol_instance = self._find_lun(volume)
(self.masking
._check_if_rollback_action_for_masking_required(
self.conn, rollbackDict))
- exception_message = ("Error Attaching volume %(vol)s."
+ exception_message = (_("Error Attaching volume %(vol)s.")
% {'vol': volumeName})
raise exception.VolumeBackendAPIException(
data=exception_message)
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
- LOG.warn(_LW("The VMAX plugin only supports Retype. "
- "If a pool based migration is necessary "
- "this will happen on a Retype "
- "From the command line: "
- "cinder --os-volume-api-version 2 retype "
- "<volumeId> <volumeType> --migration-policy on-demand"))
+ LOG.warning(_LW("The VMAX plugin only supports Retype. "
+ "If a pool based migration is necessary "
+ "this will happen on a Retype "
+ "From the command line: "
+ "cinder --os-volume-api-version 2 retype <volumeId> "
+ "<volumeType> --migration-policy on-demand"))
return True, {}
def _migrate_volume(
if moved is False and sourceFastPolicyName is not None:
# Return the volume to the default source fast policy storage
# group because the migrate was unsuccessful.
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Failed to migrate: %(volumeName)s from "
"default source storage group "
"for FAST policy: %(sourceFastPolicyName)s. "
if not self._migrate_volume_fast_target(
volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Attempting a rollback of: %(volumeName)s to "
"original pool %(sourcePoolInstanceName)s."),
{'volumeName': volumeName,
:param extraSpecs: extra specifications
"""
- LOG.warn(_LW("_migrate_rollback on : %(volumeName)s."),
- {'volumeName': volumeName})
+ LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."),
+ {'volumeName': volumeName})
storageRelocationService = self.utils.find_storage_relocation_service(
conn, storageSystemName)
:param extraSpecs: extra specifications
"""
- LOG.warn(_LW("_migrate_cleanup on : %(volumeName)s."),
- {'volumeName': volumeName})
+ LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."),
+ {'volumeName': volumeName})
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
rc = self.provision.migrate_volume_to_storage_pool(
self.conn, storageRelocationService, volumeInstance.path,
targetPoolInstanceName, extraSpecs)
- except Exception as e:
+ except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
- LOG.error(_LE("Exception: %s"), e)
- LOG.error(_LE(
+ LOG.exception(_LE(
"Error migrating volume: %(volumename)s. "
"to target pool %(targetPoolName)s."),
{'volumename': volumeName,
conn, controllerConfigurationService,
volumeInstance.path, volumeName, sourceFastPolicyName,
extraSpecs))
- except Exception as ex:
- LOG.error(_LE("Exception: %s"), ex)
+ except Exception:
exceptionMessage = (_(
"Failed to remove: %(volumename)s. "
"from the default storage group for "
% {'volumename': volumeName,
'fastPolicyName': sourceFastPolicyName})
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if defaultStorageGroupInstanceName is None:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"The volume: %(volumename)s "
"was not first part of the default storage "
"group for FAST policy %(fastPolicyName)s."),
self.utils.get_storage_group_from_volume(
self.conn, volumeInstanceName))
if foundStorageGroupInstanceName is None:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Volume: %(volumeName)s is not currently "
"belonging to any storage group."),
{'volumeName': volumeName})
_rc, targetEndpoints = (
self.provision.get_target_endpoints(
self.conn, storageHardwareService, hardwareIdInstance))
- except Exception as ex:
- LOG.error(_LE("Exception: %s"), ex)
+ except Exception:
errorMessage = (_(
"Unable to get target endpoints for hardwareId "
"%(hardwareIdInstance)s.")
% {'hardwareIdInstance': hardwareIdInstance})
- LOG.error(errorMessage)
+ LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
if targetEndpoints:
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
- except Exception as e:
+ except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
- LOG.error(_LE("Exception: %s"), e)
errorMessage = (_(
"Rolling back %(volumeName)s by deleting it.")
% {'volumeName': volumeName})
- LOG.error(errorMessage)
+ LOG.exception(errorMessage)
self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
self.masking.get_associated_masking_groups_from_device(
self.conn, volumeInstanceName))
if storageGroupInstanceNames:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Pre check for deletion. "
"Volume: %(volumeName)s is part of a storage group. "
"Attempting removal from %(storageGroupInstanceNames)s."),
repservice = self.utils.find_replication_service(self.conn,
storageSystem)
if repservice is None:
- exception_message = (_LE(
+ exception_message = _(
"Cannot find Replication Service to"
- " delete snapshot %s.") %
- snapshotname)
+ " delete snapshot %s.") % snapshotname
raise exception.VolumeBackendAPIException(
data=exception_message)
# Break the replication relationship
self.conn, storageSystem)
self.provision.create_consistency_group(
self.conn, replicationService, cgName, extraSpecs)
- except Exception as ex:
- LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
+ except Exception:
exceptionMessage = (_("Failed to create consistency group:"
" %(cgName)s.")
% {'cgName': cgName})
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate
storageSystem, memberInstanceNames, storageConfigservice,
volumes, modelUpdate, extraSpecs[ISV3], extraSpecs)
- except Exception as ex:
- LOG.error(_LE("Exception: %s"), ex)
+ except Exception:
exceptionMessage = (_(
"Failed to delete consistency group: %(cgName)s.")
% {'cgName': cgName})
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate, volumes
rgSyncInstanceName,
extraSpecs)
- except Exception as ex:
+ except Exception:
modelUpdate['status'] = 'error'
self.utils.populate_cgsnapshot_status(
context, db, cgsnapshot['id'], modelUpdate['status'])
- LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
exceptionMessage = (_("Failed to create snapshot for cg:"
" %(cgName)s.")
% {'cgName': cgName})
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
snapshots = self.utils.populate_cgsnapshot_status(
modelUpdate, snapshots = self._delete_cg_and_members(
storageSystem, targetCgName, modelUpdate,
snapshots, extraSpecs)
- except Exception as ex:
+ except Exception:
modelUpdate['status'] = 'error_deleting'
self.utils.populate_cgsnapshot_status(
context, db, cgsnapshot['id'], modelUpdate['status'])
- LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
exceptionMessage = (_("Failed to delete snapshot for cg: "
"%(cgId)s.")
% {'cgId': cgsnapshot['consistencygroup_id']})
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
snapshots = self.utils.populate_cgsnapshot_status(
extraSpecs))
if not self.utils.is_in_range(
volumeSize, maximumVolumeSize, minimumVolumeSize):
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Volume: %(volume)s with size: %(volumeSize)s bits "
"is not in the Performance Capacity range: "
"%(minimumVolumeSize)s-%(maximumVolumeSize)s bits. "
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path))
if foundStorageGroupInstanceName is None:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Volume : %(volumeName)s is not currently "
"belonging to any storage group."),
{'volumeName': volumeName})
volumeInstance.path, volumeName, fastPolicyName,
extraSpecs))
if defaultStorageGroupInstanceName is None:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"The volume: %(volumename)s. was not first part of the "
"default storage group for FAST policy %(fastPolicyName)s"
"."),
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
- except Exception as e:
+ except Exception:
# If we cannot successfully delete the volume then we want to
# return the volume to the default storage group.
if (fastPolicyName is not None and
{'volumeName': volumeName,
'fastPolicyName': fastPolicyName})
- LOG.error(_LE("Exception: %s."), e)
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
{'volumeName': volumeName})
- LOG.error(errorMessage)
+ LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return rc
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
- except Exception as e:
+ except Exception:
# If we cannot successfully delete the volume, then we want to
# return the volume to the default storage group,
# which should be the SG it previously belonged to.
storageGroupInstanceName, volumeInstance, volumeName,
storageGroupName, extraSpecs)
- LOG.error(_LE("Exception: %s."), e)
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
{'volumeName': volumeName})
- LOG.error(errorMessage)
+ LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return rc
foundDefaultStorageGroupInstanceName = (
assocStorageGroupInstanceName)
else:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Volume: %(volumeName)s Does not belong "
"to storage storage group %(defaultSgGroupName)s."),
{'volumeName': volumeName,
if len(storageTierInstanceNames) == 0:
storageTierInstanceNames = None
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Unable to get storage tiers from tier policy rule."))
return storageTierInstanceNames
conn, tierPolicyServiceInstanceName,
storageGroupInstanceName, tierPolicyRuleInstanceName,
storageGroupName, fastPolicyName, extraSpecs)
- except Exception as ex:
- LOG.error(_LE("Exception: %s"), ex)
- LOG.error(_LE(
+ except Exception:
+ LOG.exception(_LE(
"Failed to add storage group %(storageGroupInstanceName)s "
"to tier policy rule %(tierPolicyRuleInstanceName)s."),
{'storageGroupInstanceName': storageGroupInstanceName,
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
else:
- LOG.warn(_LW("Volume %(volume)s is not in any masking view."),
- {'volume': volume['name']})
+ LOG.warning(_LW("Volume %(volume)s is not in any masking view."),
+ {'volume': volume['name']})
return data
def _build_initiator_target_map(self, storage_system, volume, connector):
iscsi_properties = self.smis_get_iscsi_properties(
volume, connector)
- LOG.info(_LI("Leaving initialize_connection: %s"), (iscsi_properties))
+ LOG.info(_LI("Leaving initialize_connection: %s"), iscsi_properties)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
" for volume %(volumeName)s.")
% {'volumeName': volume['name']})
- LOG.debug("ISCSI Discovery: Found %s", (location))
+ LOG.debug("ISCSI Discovery: Found %s", location)
properties['target_discovered'] = True
device_info = self.common.find_device_number(volume)
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
- LOG.info(_LI("AUTH properties: %s."), (properties))
+ LOG.info(_LI("AUTH properties: %s."), properties)
return properties
maskingViewDict['workload'])
if assocStorageGroupName != defaultSgGroupName:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Volume: %(volumeName)s Does not belong "
"to storage storage group %(defaultSgGroupName)s."),
{'volumeName': volumeName,
if self._is_volume_in_storage_group(
conn, storageGroupInstanceName,
volumeInstance):
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Volume: %(volumeName)s is already part "
"of storage group %(sgGroupName)s."),
{'volumeName': volumeName,
{'view': maskingViewName,
'masking': foundStorageGroupInstanceName})
else:
- LOG.warn(_LW("Unable to find Masking view: %(view)s."),
- {'view': maskingViewName})
+ LOG.warning(_LW("Unable to find Masking view: %(view)s."),
+ {'view': maskingViewName})
return foundStorageGroupInstanceName
# Volume is not associated with any storage group so add
# it back to the default.
if len(foundStorageGroupInstanceName) == 0:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"No storage group found. "
"Performing rollback on Volume: %(volumeName)s "
"To return it to the default storage group for FAST "
rollbackDict['fastPolicyName'],
rollbackDict['volumeName'], rollbackDict['extraSpecs'],
False)
- except Exception as e:
- LOG.error(_LE("Exception: %s."), e)
+ except Exception:
errorMessage = (_(
"Rollback for Volume: %(volumeName)s has failed. "
"Please contact your system administrator to manually return "
"%(fastPolicyName)s failed.")
% {'volumeName': rollbackDict['volumeName'],
'fastPolicyName': rollbackDict['fastPolicyName']})
- LOG.error(errorMessage)
+ LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
def _find_new_initiator_group(self, conn, maskingGroupDict):
{'view': maskingViewName,
'masking': foundInitiatorMaskingGroupInstanceName})
else:
- LOG.warn(_LW("Unable to find Masking view: %(view)s."),
- {'view': maskingViewName})
+ LOG.warning(_LW("Unable to find Masking view: %(view)s."),
+ {'view': maskingViewName})
return foundInitiatorMaskingGroupInstanceName
volumeName, fastPolicyName))
if defaultStorageGroupInstanceName is None:
- LOG.warn(_LW(
+ LOG.warning(_LW(
"Volume %(volumeName)s was not first part of the default "
"storage group for the FAST Policy."),
{'volumeName': volumeName})
if numVolInMaskingView == 1:
# Last volume in the storage group.
- LOG.warn(_LW("Only one volume remains in storage group "
- "%(sgname)s. Driver will attempt cleanup."),
- {'sgname': storageGroupName})
+ LOG.warning(_LW("Only one volume remains in storage group "
+ "%(sgname)s. Driver will attempt cleanup."),
+ {'sgname': storageGroupName})
mvInstanceName = self.get_masking_view_from_storage_group(
conn, storageGroupInstanceName)
if mvInstanceName is None:
- LOG.warn(_LW("Unable to get masking view %(maskingView)s "
- "from storage group."),
- {'maskingView': mvInstanceName})
+ LOG.warning(_LW("Unable to get masking view %(maskingView)s "
+ "from storage group."),
+ {'maskingView': mvInstanceName})
else:
maskingViewInstance = conn.GetInstance(
mvInstanceName, LocalOnly=False)
ResultClass='Symm_FCSCSIProtocolEndpoint')
numberOfPorts = len(targetPortInstanceNames)
if numberOfPorts <= 0:
- LOG.warn(_LW("No target ports found in "
- "masking view %(maskingView)s."),
- {'numPorts': len(targetPortInstanceNames),
- 'maskingView': mvInstanceName})
+ LOG.warning(_LW("No target ports found in "
+ "masking view %(maskingView)s."),
+ {'numPorts': len(targetPortInstanceNames),
+ 'maskingView': mvInstanceName})
for targetPortInstanceName in targetPortInstanceNames:
targetWwns.append(targetPortInstanceName['Name'])
return targetWwns
'mv': maskingViewInstanceName})
return portGroupInstanceNames[0]
else:
- LOG.warn(_LW("No port group found in masking view %(mv)s."),
- {'mv': maskingViewInstanceName})
+ LOG.warning(_LW("No port group found in masking view %(mv)s."),
+ {'mv': maskingViewInstanceName})
def get_initiator_group_from_masking_view(
self, conn, maskingViewInstanceName):
'mv': maskingViewInstanceName})
return initiatorGroupInstanceNames[0]
else:
- LOG.warn(_LW("No port group found in masking view %(mv)s."),
- {'mv': maskingViewInstanceName})
+ LOG.warning(_LW("No port group found in masking view %(mv)s."),
+ {'mv': maskingViewInstanceName})
def _get_sg_or_mv_associated_with_initiator(
self, conn, controllerConfigService, volumeInstanceName,
import six
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _
from cinder.volume.drivers.emc import emc_vmax_utils
try:
rc = self._terminate_migrate_session(
conn, volumeInstanceName, extraSpecs)
- except Exception as ex:
- LOG.error(_LE('Exception: %s.'), ex)
+ except Exception:
exceptionMessage = (_(
"Failed to terminate migrate session."))
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
try:
conn, storageRelocationServiceInstanceName,
volumeInstanceName, targetPoolInstanceName,
extraSpecs)
- except Exception as ex:
- LOG.error(_LE('Exception: %s'), ex)
+ except Exception:
exceptionMessage = (_(
"Failed to migrate volume for the second time."))
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
- LOG.error(_LE('Exception: %s'), ex)
exceptionMessage = (_(
"Failed to migrate volume for the first time."))
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
for elementCompositionService in elementCompositionServices:
if storageSystemName == elementCompositionService['SystemName']:
foundElementCompositionService = elementCompositionService
- LOG.debug("Found Element Composition Service:"
- "%(elementCompositionService)s."
- % {'elementCompositionService':
- elementCompositionService})
+ LOG.debug(
+ "Found Element Composition Service: "
+ "%(elementCompositionService)s.", {
+ 'elementCompositionService':
+ elementCompositionService})
break
if foundElementCompositionService is None:
exceptionMessage = (_("Element Composition Service not found "
if not wait_for_job_called:
if self._is_job_finished(conn, job):
kwargs['wait_for_job_called'] = True
- except Exception as e:
- LOG.error(_LE("Exception: %s.") % six.text_type(e))
+ except Exception:
exceptionMessage = (_("Issue encountered waiting for job."))
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage)
kwargs = {'retries': 0,
if not wait_for_sync_called:
if self._is_sync_complete(conn, syncName):
kwargs['wait_for_sync_called'] = True
- except Exception as e:
- LOG.error(_LE("Exception: %s") % six.text_type(e))
+ except Exception:
exceptionMessage = (_("Issue encountered waiting for "
"synchronization."))
- LOG.error(exceptionMessage)
+ LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage)
kwargs = {'retries': 0,
self.primary_storage_ip = self.active_storage_ip
self.secondary_storage_ip = configuration.san_secondary_ip
if self.secondary_storage_ip == self.primary_storage_ip:
- LOG.warning(_LE("san_secondary_ip is configured as "
+ LOG.warning(_LW("san_secondary_ip is configured as "
"the same value as san_ip."))
self.secondary_storage_ip = None
if not configuration.san_ip:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on enable compression on lun %s."),
- six.text_type(ex))
+ ex)
# handle consistency group
try:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on adding lun to consistency"
- " group. %s"), six.text_type(ex))
+ " group. %s"), ex)
return data
def create_lun_by_cmd(self, cmd, name):
'_wait_for_a_condition: %(method_name)s '
'execution failed for %(exception)s',
{'method_name': testmethod.__name__,
- 'exception': six.text_type(ex)})
+ 'exception': ex})
if test_value:
raise loopingcall.LoopingCallDone()
self._client.delete_consistencygroup(cg_name)
except Exception:
with excutils.save_and_reraise_exception():
- msg = (_('Delete consistency group %s failed.')
- % cg_name)
- LOG.error(msg)
+ LOG.error(_LE('Delete consistency group %s failed.'), cg_name)
for volume_ref in volumes:
try:
dest_vol_lun_id,
None)
if not migrated:
- msg = (_LE("Migrate volume failed between source vol %(src)s"
- " and dest vol %(dst)s."),
+ msg = (_("Migrate volume failed between source vol %(src)s"
+ " and dest vol %(dst)s.") %
{'src': new_vol_name, 'dst': dest_vol_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if any(ln.startswith(('% Error', 'Error:')) for ln in out):
desc = _("Error executing EQL command")
cmdout = '\n'.join(out)
- LOG.error(cmdout)
+ LOG.error(_LE("%s"), cmdout)
raise processutils.ProcessExecutionError(
stdout=cmdout, cmd=command, description=desc)
return out
self._eql_execute('volume', 'select', volume['name'], 'offline')
self._eql_execute('volume', 'delete', volume['name'])
except exception.VolumeNotFound:
- LOG.warn(_LW('Volume %s was not found while trying to delete it.'),
- volume['name'])
+ LOG.warning(_LW('Volume %s was not found while trying to delete '
+ 'it.'), volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete '
try:
self._check_volume(volume)
except exception.VolumeNotFound:
- LOG.warn(_LW('Volume %s is not found!, it may have been deleted.'),
- volume['name'])
+ LOG.warning(_LW('Volume %s is not found!, it may have been '
+ 'deleted.'), volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume "%s".'),
if not config:
msg = (_("There's no Gluster config file configured (%s)") %
'glusterfs_shares_config')
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.GlusterfsException(msg)
if not os.path.exists(config):
msg = (_("Gluster config file at %(config)s doesn't exist") %
{'config': config})
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.GlusterfsException(msg)
self.shares = {}
try:
self._do_umount(True, share)
except Exception as exc:
- LOG.warning(_LE('Exception during unmounting %s') % (exc))
+ LOG.warning(_LW('Exception during unmounting %s'), exc)
def _do_umount(self, ignore_not_mounted, share):
mount_path = self._get_mount_point_for_share(share)
self._unmount_shares()
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.stderr:
- LOG.warn(_LW("Failed to refresh mounts, reason=%s") %
- exc.stderr)
+ LOG.warning(_LW("Failed to refresh mounts, reason=%s"),
+ exc.stderr)
else:
raise
volume['provider_location'] = self._find_share(volume['size'])
- LOG.info(_LI('casted to %s') % volume['provider_location'])
+ LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
- "volume_size: %(size)s"
- % {'snap': snapshot['id'],
- 'vol': volume['id'],
- 'size': volume_size})
+ "volume_size: %(size)s",
+ {'snap': snapshot['id'],
+ 'vol': volume['id'],
+ 'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
path_to_new_vol = self._local_path_volume(volume)
- LOG.debug("will copy from snapshot at %s" % path_to_snap_img)
+ LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.glusterfs_qcow2_volumes:
out_format = 'qcow2'
"""Deletes a logical volume."""
if not volume['provider_location']:
- LOG.warn(_LW('Volume %s does not have '
- 'provider_location specified, '
- 'skipping'), volume['name'])
+ LOG.warning(_LW('Volume %s does not have '
+ 'provider_location specified, '
+ 'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
volume_path = self.local_path(volume)
volume_size = volume['size']
- LOG.debug("creating new volume at %s" % volume_path)
+ LOG.debug("creating new volume at %s", volume_path)
if os.path.exists(volume_path):
msg = _('file already exists at %s') % volume_path
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
- LOG.error(_LE('Exception during mounting %s') % (exc,))
+ LOG.error(_LE('Exception during mounting %s'), exc)
- LOG.debug('Available shares: %s' % self._mounted_shares)
+ LOG.debug('Available shares: %s', self._mounted_shares)
def _ensure_share_mounted(self, glusterfs_share):
"""Mount GlusterFS share.
info = self._qemu_img_info(active_file_path, volume['name'])
if info.backing_file is not None:
- msg = _('No snapshots found in database, but '
- '%(path)s has backing file '
- '%(backing_file)s!') % {'path': active_file_path,
- 'backing_file': info.backing_file}
- LOG.error(msg)
+ LOG.error(_LE('No snapshots found in database, but %(path)s has '
+ 'backing file %(backing_file)s!'),
+ {'path': active_file_path,
+ 'backing_file': info.backing_file})
raise exception.InvalidVolume(snap_error_msg)
if info.file_format != 'raw':
from oslo_utils import excutils
from cinder import exception
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _LE, _LI
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hds import hus_backend
def _do_lu_range_check(start, end, maxlun):
"""Validate array allocation range."""
- LOG.debug("Range: start LU: %(start)s, end LU: %(end)s"
- % {'start': start,
- 'end': end})
+ LOG.debug("Range: start LU: %(start)s, end LU: %(end)s",
+ {'start': start, 'end': end})
if int(start) < 0:
msg = 'start LU limit too low: ' + start
raise exception.InvalidInput(reason=msg)
raise exception.InvalidInput(reason=msg)
if int(end) > int(maxlun):
end = maxlun
- LOG.debug("setting LU upper (end) limit to %s" % maxlun)
+ LOG.debug("setting LU upper (end) limit to %s", maxlun)
return (start, end)
"""Read an xml element."""
try:
val = root.findtext(element)
- LOG.info(_LI("%(element)s: %(val)s")
- % {'element': element,
- 'val': val})
+ LOG.info(_LI("%(element)s: %(val)s"),
+ {'element': element, 'val': val})
if val:
return val.strip()
if check:
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("XML exception reading parameter: %s") % element)
+ LOG.error(_LE("XML exception reading parameter: %s"), element)
else:
- LOG.info(_LI("XML exception reading parameter: %s") % element)
+ LOG.info(_LI("XML exception reading parameter: %s"), element)
return None
conf[ip]['ctl'] = ctl
conf[ip]['port'] = port
conf[ip]['iscsi_port'] = ipp # HUS default: 3260
- msg = _('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s')
- LOG.debug(msg
- % {'ip': ip,
- 'ipp': ipp,
- 'ctl': ctl,
- 'port': port})
+ LOG.debug('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: '
+ '%(port)s', {'ip': ip, 'ipp': ipp,
+ 'ctl': ctl, 'port': port})
return conf
def _get_service(self, volume):
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
svc['port'], svc['hdp']) # ip, ipp, ctl, port, hdp
else:
- LOG.error(_LE("No configuration found for service: %s") % label)
+ LOG.error(_LE("No configuration found for service: %s"), label)
raise exception.ParameterNotFound(param=label)
return service
lst.extend([self.config['snapshot_hdp'], ])
for hdp in lst:
if hdp not in hdpl:
- LOG.error(_LE("HDP not found: %s") % hdp)
+ LOG.error(_LE("HDP not found: %s"), hdp)
err = "HDP not found: " + hdp
raise exception.ParameterNotFound(param=err)
iscsi_info[svc_ip]['iscsi_port'])
else: # config iscsi address not found on device!
LOG.error(_LE("iSCSI portal not found "
- "for service: %s") % svc_ip)
+ "for service: %s"), svc_ip)
raise exception.ParameterNotFound(param=svc_ip)
return
'%s' % (int(volume['size']) * 1024))
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
- LOG.debug("LUN %(lun)s of size %(sz)s MB is created."
- % {'lun': lun,
- 'sz': sz})
+ LOG.debug("LUN %(lun)s of size %(sz)s MB is created.",
+ {'lun': lun, 'sz': sz})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
- LOG.debug("LUN %(lun)s of size %(size)s MB is cloned."
- % {'lun': lun,
- 'size': size})
+ LOG.debug("LUN %(lun)s of size %(size)s MB is cloned.",
+ {'lun': lun, 'size': size})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
self.config['password'],
arid, lun,
'%s' % (new_size * 1024))
- LOG.debug("LUN %(lun)s extended to %(size)s GB."
- % {'lun': lun,
- 'size': new_size})
+ LOG.debug("LUN %(lun)s extended to %(size)s GB.",
+ {'lun': lun, 'size': new_size})
@utils.synchronized('hds_hus', external=True)
def delete_volume(self, volume):
arid, lun, ctl, port, iqn,
'')
name = self.hus_name
- LOG.debug("delete lun %(lun)s on %(name)s"
- % {'lun': lun,
- 'name': name})
+ LOG.debug("delete lun %(lun)s on %(name)s",
+ {'lun': lun, 'name': name})
self.bend.delete_lu(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
- LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot."
- % {'lun': lun,
- 'sz': sz})
+ LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot.",
+ {'lun': lun, 'sz': sz})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
- LOG.debug("LUN %(lun)s of size %(size)s MB is created as snapshot."
- % {'lun': lun,
- 'size': size})
+ LOG.debug("LUN %(lun)s of size %(size)s MB is created as snapshot.",
+ {'lun': lun, 'size': size})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
self.config['username'],
self.config['password'],
arid, lun)
- LOG.debug("LUN %s is deleted." % lun)
+ LOG.debug("LUN %s is deleted.", lun)
return
@utils.synchronized('hds_hus', external=True)
'--version', '1',
run_as_root=True,
check_exit_code=True)
- LOG.debug('get_version: ' + out + ' -- ' + err)
+ LOG.debug('get_version: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
def get_iscsi_info(self, cmd, ver, ip0, ip1, user, pw):
'--password', pw,
'--iscsi', '1',
check_exit_code=True)
- LOG.debug('get_iscsi_info: ' + out + ' -- ' + err)
+ LOG.debug('get_iscsi_info: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
def get_hdp_info(self, cmd, ver, ip0, ip1, user, pw):
'--password', pw,
'--hdp', '1',
check_exit_code=True)
- LOG.debug('get_hdp_info: ' + out + ' -- ' + err)
+ LOG.debug('get_hdp_info: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
def create_lu(self, cmd, ver, ip0, ip1, user, pw, id, hdp, start,
'--end', end,
'--size', size,
check_exit_code=True)
- LOG.debug('create_lu: ' + out + ' -- ' + err)
+ LOG.debug('create_lu: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
def delete_lu(self, cmd, ver, ip0, ip1, user, pw, id, lun):
'--lun', lun,
'--force', 1,
check_exit_code=True)
- LOG.debug('delete_lu: ' + out + ' -- ' + err)
+ LOG.debug('delete_lu: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
def create_dup(self, cmd, ver, ip0, ip1, user, pw, id, src_lun,
'--end', end,
'--size', size,
check_exit_code=True)
- LOG.debug('create_dup: ' + out + ' -- ' + err)
+ LOG.debug('create_dup: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
def extend_vol(self, cmd, ver, ip0, ip1, user, pw, id, lun, new_size):
'--lun', lun,
'--size', new_size,
check_exit_code=True)
- LOG.debug('extend_vol: ' + out + ' -- ' + err)
+ LOG.debug('extend_vol: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
def add_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port,
'--target', iqn,
'--initiator', initiator,
check_exit_code=True)
- LOG.debug('add_iscsi_conn: ' + out + ' -- ' + err)
+ LOG.debug('add_iscsi_conn: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
def del_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port,
'--initiator', initiator,
'--force', 1,
check_exit_code=True)
- LOG.debug('del_iscsi_conn: ' + out + ' -- ' + err)
+ LOG.debug('del_iscsi_conn: %(out)s -- %(err)s',
+ {'out': out, 'err': err})
return out
conf[ip]['ctl'] = ctl
conf[ip]['port'] = port
conf[ip]['iscsi_port'] = ipp
- msg = "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s"
- LOG.debug(msg, {'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port})
+ LOG.debug("portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s",
+ {'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port})
return conf
# value and use a temporary dummy password.
if 'iscsi_secret' not in svc:
# Warns in the first time
- LOG.info(_LE("CHAP authentication disabled"))
+ LOG.info(_LI("CHAP authentication disabled"))
svc['iscsi_secret'] = ""
self.config['password'],
pool['hdp'])
- LOG.debug('Query for pool %s: %s', pool['pool_name'], out)
+ LOG.debug('Query for pool %(pool)s: %(out)s',
+ {'pool': pool['pool_name'], 'out': out})
(hdp, size, _ign, used) = out.split()[1:5] # in MB
pool['total_capacity_gb'] = int(size) / units.Ki
if 'tgt' in info.keys(): # spurious repeat connection
# print info.keys()
- LOG.debug("initiate_conn: tgt already set %s" % info['tgt'])
+ LOG.debug("initiate_conn: tgt already set %s", info['tgt'])
(arid, lun) = info['id_lu']
loc = arid + '.' + lun
# sps, use target if provided
info = _loc_info(volume['provider_location'])
if 'tgt' not in info.keys(): # spurious disconnection
- LOG.warn(_LW("terminate_conn: provider location empty."))
+ LOG.warning(_LW("terminate_conn: provider location empty."))
return
(arid, lun) = info['id_lu']
(_portal, iqn, loc, ctl, port, hlun) = info['tgt']
conf[key]['path'] = path
conf[key]['hdp'] = hdp
conf[key]['fslabel'] = fslabel
- msg = _("nfs_info: %(key)s: %(path)s, HDP: \
- %(fslabel)s FSID: %(hdp)s")
- LOG.info(msg, {'key': key, 'path': path, 'fslabel': fslabel,
- 'hdp': hdp})
+ LOG.info(_LI("nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s "
+ "FSID: %(hdp)s"),
+ {'key': key, 'path': path,
+ 'fslabel': fslabel, 'hdp': hdp})
return conf
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder import utils
SMPL = 1
def output_err(msg_id, **kwargs):
msg = HBSD_ERR_MSG.get(msg_id) % kwargs
- LOG.error("MSGID%04d-E: %s", msg_id, msg)
+ LOG.error(_LE("MSGID%(id)04d-E: %(msg)s"), {'id': msg_id, 'msg': msg})
return msg
def exec_command(self, cmd, args=None, printflag=True):
if printflag:
if args:
- LOG.debug('cmd: %(cmd)s, args: %(args)s' %
+ LOG.debug('cmd: %(cmd)s, args: %(args)s',
{'cmd': cmd, 'args': args})
else:
- LOG.debug('cmd: %s' % cmd)
+ LOG.debug('cmd: %s', cmd)
cmd = [cmd]
stdout = e.stdout
stderr = e.stderr
- LOG.debug('cmd: %s' % six.text_type(cmd))
- LOG.debug('from: %s' % six.text_type(inspect.stack()[2]))
- LOG.debug('ret: %d' % ret)
- LOG.debug('stdout: %s' % stdout.replace(os.linesep, ' '))
- LOG.debug('stderr: %s' % stderr.replace(os.linesep, ' '))
+ LOG.debug('cmd: %s', cmd)
+ LOG.debug('from: %s', inspect.stack()[2])
+ LOG.debug('ret: %d', ret)
+ LOG.debug('stdout: %s', stdout.replace(os.linesep, ' '))
+ LOG.debug('stderr: %s', stderr.replace(os.linesep, ' '))
return ret, stdout, stderr
import six
from cinder import exception
-from cinder.i18n import _LE, _LW
+from cinder.i18n import _LE, _LI, _LW
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_horcm as horcm
essential_inherited_param = ['volume_backend_name', 'volume_driver']
conf = self.configuration
- msg = basic_lib.set_msg(1, config_group=conf.config_group)
- LOG.info(msg)
+ LOG.info(basic_lib.set_msg(1, config_group=conf.config_group))
version = self.command.get_comm_version()
if conf.hitachi_unit_name:
prefix = 'HSNM2 version'
else:
prefix = 'RAID Manager version'
- LOG.info('\t%-35s%s' % (prefix + ': ', six.text_type(version)))
+ LOG.info(_LI('\t%(prefix)-35s : %(version)s'),
+ {'prefix': prefix, 'version': version})
for param in essential_inherited_param:
value = conf.safe_get(param)
- LOG.info('\t%-35s%s' % (param + ': ', six.text_type(value)))
+ LOG.info(_LI('\t%(param)-35s : %(value)s'),
+ {'param': param, 'value': value})
for opt in volume_opts:
if not opt.secret:
value = getattr(conf, opt.name)
- LOG.info('\t%-35s%s' % (opt.name + ': ',
- six.text_type(value)))
+ LOG.info(_LI('\t%(name)-35s : %(value)s'),
+ {'name': opt.name, 'value': value})
if storage_protocol == 'iSCSI':
value = getattr(conf, 'hitachi_group_request')
- LOG.info('\t%-35s%s' % ('hitachi_group_request: ',
- six.text_type(value)))
+ LOG.info(_LI('\t%(request)-35s : %(value)s'),
+ {'request': 'hitachi_group_request', 'value': value})
def check_param(self):
conf = self.configuration
def delete_pair(self, ldev, all_split=True, is_vvol=None):
paired_info = self.command.get_paired_info(ldev)
- LOG.debug('paired_info: %s' % six.text_type(paired_info))
+ LOG.debug('paired_info: %s', paired_info)
pvol = paired_info['pvol']
svols = paired_info['svol']
driver = self.generated_from
try:
self.command.restart_pair_horcm()
except Exception as e:
- LOG.warning(_LW('Failed to restart horcm: %s') %
- six.text_type(e))
+ LOG.warning(_LW('Failed to restart horcm: %s'), e)
else:
if (all_split or is_vvol) and restart:
try:
self.command.restart_pair_horcm()
except Exception as e:
- LOG.warning(_LW('Failed to restart horcm: %s') %
- six.text_type(e))
+ LOG.warning(_LW('Failed to restart horcm: %s'), e)
def copy_async_data(self, pvol, svol, is_vvol):
path_list = []
try:
driver.pair_terminate_connection(ldev)
except Exception as ex:
- msg = basic_lib.set_msg(
- 310, ldev=ldev, reason=six.text_type(ex))
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(310, ldev=ldev,
+ reason=ex))
def copy_sync_data(self, src_ldev, dest_ldev, size):
src_vol = {'provider_location': six.text_type(src_ldev),
try:
self.delete_ldev(svol, is_vvol)
except Exception as ex:
- msg = basic_lib.set_msg(
- 313, ldev=svol, reason=six.text_type(ex))
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(313, ldev=svol,
+ reason=ex))
return six.text_type(svol), type
def create_ldev(self, size, ldev_range, pool_id, is_vvol):
LOG.debug('create start (normal)')
for i in basic_lib.DEFAULT_TRY_RANGE:
- LOG.debug('Try number: %(tries)s / %(max_tries)s' %
+ LOG.debug('Try number: %(tries)s / %(max_tries)s',
{'tries': i + 1,
'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)})
new_ldev = self._get_unused_volume_num(ldev_range)
try:
self._add_ldev(new_ldev, size, pool_id, is_vvol)
except exception.HBSDNotFound:
- msg = basic_lib.set_msg(312, resource='LDEV')
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(312, resource='LDEV'))
continue
else:
break
else:
msg = basic_lib.output_err(636)
raise exception.HBSDError(message=msg)
- LOG.debug('create end (normal: %s)' % six.text_type(new_ldev))
+ LOG.debug('create end (normal: %s)', new_ldev)
self.init_volinfo(self.volume_info, new_ldev)
return new_ldev
'metadata': volume_metadata}
def delete_ldev(self, ldev, is_vvol):
- LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)'
- % {'ldev': ldev, 'vvol': is_vvol})
+ LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)',
+ {'ldev': ldev, 'vvol': is_vvol})
with self.pair_flock:
self.delete_pair(ldev)
self.command.comm_delete_ldev(ldev, is_vvol)
if ldev in self.volume_info:
self.volume_info.pop(ldev)
LOG.debug('delete_ldev is finished '
- '(LDEV: %(ldev)d, is_vvol: %(vvol)s)'
- % {'ldev': ldev, 'vvol': is_vvol})
+ '(LDEV: %(ldev)d, is_vvol: %(vvol)s)',
+ {'ldev': ldev, 'vvol': is_vvol})
def delete_volume(self, volume):
ldev = self.get_ldev(volume)
if ldev is None:
- msg = basic_lib.set_msg(
- 304, method='delete_volume', id=volume['id'])
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(304, method='delete_volume',
+ id=volume['id']))
return
self.add_volinfo(ldev, volume['id'])
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
with self.volinfo_lock:
if ldev in self.volume_info:
self.volume_info.pop(ldev)
- msg = basic_lib.set_msg(
- 305, type='volume', id=volume['id'])
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 305, type='volume', id=volume['id']))
except exception.HBSDBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
finally:
def delete_snapshot(self, snapshot):
ldev = self.get_ldev(snapshot)
if ldev is None:
- msg = basic_lib.set_msg(
- 304, method='delete_snapshot', id=snapshot['id'])
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 304, method='delete_snapshot', id=snapshot['id']))
return
self.add_volinfo(ldev, id=snapshot['id'], type='snapshot')
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
with self.volinfo_lock:
if ldev in self.volume_info:
self.volume_info.pop(ldev)
- msg = basic_lib.set_msg(
- 305, type='snapshot', id=snapshot['id'])
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 305, type='snapshot', id=snapshot['id']))
except exception.HBSDBusy:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
finally:
def output_backend_available_once(self):
if self.output_first:
self.output_first = False
- msg = basic_lib.set_msg(
- 3, config_group=self.configuration.config_group)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 3, config_group=self.configuration.config_group))
def update_volume_stats(self, storage_protocol):
data = {}
total_gb, free_gb = self.command.comm_get_dp_pool(
self.configuration.hitachi_pool_id)
except Exception as ex:
- LOG.error(_LE('Failed to update volume status: %s') %
- six.text_type(ex))
+ LOG.error(_LE('Failed to update volume status: %s'), ex)
return None
data['total_capacity_gb'] = total_gb
'reserved_percentage')
data['QoS_support'] = False
- LOG.debug('Updating volume status (%s)' % data)
+ LOG.debug('Updating volume status (%s)', data)
return data
ldev = self._string2int(existing_ref.get('ldev'))
- msg = basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev)
- LOG.info(msg)
+ LOG.info(basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev))
return {'provider_location': ldev}
except exception.HBSDBusy:
raise exception.HBSDVolumeIsBusy(volume_name=volume['name'])
else:
- msg = basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev)
- LOG.info(msg)
+ LOG.info(basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev))
finally:
if ldev in self.volume_info:
self.volume_info[ldev]['in_use'].lock.release()
import six
from cinder import exception
-from cinder.i18n import _LW
+from cinder.i18n import _LI, _LW
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
- LOG.info('\t%-35s%s' %
- (opt.name + ': ', six.text_type(value)))
+ LOG.info(_LI('\t%(name)-35s : %(value)s'),
+ {'name': opt.name, 'value': value})
self.common.command.output_param_to_log(self.configuration)
def _add_wwn(self, hgs, port, gid, wwns):
detected = self.common.command.is_detected(port, wwn)
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
'detected': detected})
- LOG.debug('Create host group for %s' % hgs)
+ LOG.debug('Create host group for %s', hgs)
def _add_lun(self, hostgroups, ldev):
if hostgroups is self.pair_hostgroups:
try:
self.common.command.comm_delete_lun(hostgroups, ldev)
except exception.HBSDNotFound:
- msg = basic_lib.set_msg(301, ldev=ldev)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(301, ldev=ldev))
def _get_hgname_gid(self, port, host_grp_name):
return self.common.command.get_hgname_gid(port, host_grp_name)
def _fill_group(self, hgs, port, host_grp_name, wwns):
added_hostgroup = False
LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
- 'name: %(name)s wwns: %(wwns)s)'
- % {'hgs': hgs, 'port': port,
- 'name': host_grp_name, 'wwns': wwns})
+ 'name: %(name)s wwns: %(wwns)s)',
+ {'hgs': hgs, 'port': port,
+ 'name': host_grp_name, 'wwns': wwns})
gid = self._get_hgname_gid(port, host_grp_name)
if gid is None:
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
added_hostgroup = True
except exception.HBSDNotFound:
gid = None
- msg = basic_lib.set_msg(312, resource='GID')
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(312, resource='GID'))
continue
else:
LOG.debug('Completed to add host target'
- '(port: %(port)s gid: %(gid)d)'
- % {'port': port, 'gid': gid})
+ '(port: %(port)s gid: %(gid)d)',
+ {'port': port, 'gid': gid})
break
else:
msg = basic_lib.output_err(641)
try:
self._fill_group(hgs, port, host_grp_name, wwns_copy)
except Exception as ex:
- LOG.warning(_LW('Failed to add host group: %s') %
- six.text_type(ex))
- msg = basic_lib.set_msg(
- 308, port=port, name=host_grp_name)
- LOG.warning(msg)
+ LOG.warning(_LW('Failed to add host group: %s'), ex)
+ LOG.warning(basic_lib.set_msg(
+ 308, port=port, name=host_grp_name))
if not hgs:
- msg = basic_lib.output_err(649)
- raise exception.HBSDError(message=msg)
+ raise exception.HBSDError(message=basic_lib.output_err(649))
def add_hostgroup_pair(self, pair_hostgroups):
if self.configuration.hitachi_unit_name:
if 'wwpns' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
- LOG.debug("wwpns: %s" % properties['wwpns'])
+ LOG.debug("wwpns: %s", properties['wwpns'])
hostgroups = []
security_ports = self._get_hostgroup_info(
self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
except Exception:
with excutils.save_and_reraise_exception():
- msg = basic_lib.set_msg(
- 306, port=port, gid=gid, name=host_grp_name)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 306, port=port, gid=gid, name=host_grp_name))
def _check_volume_mapping(self, hostgroup):
port = hostgroup['port']
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
- "(config_group: %(group)s ldev: %(ldev)d)"
- % {'group': self.configuration.config_group, 'ldev': ldev})
+ "(config_group: %(group)s ldev: %(ldev)d)",
+ {'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs is self.pair_hostgroups:
hostgroups = src_hgs
else:
try:
self._add_lun(hostgroups, ldev)
except exception.HBSDNotFound:
- msg = basic_lib.set_msg(311, ldev=ldev)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(311, ldev=ldev))
for i in range(self.max_hostgroups + 1):
self.pair_hostnum += 1
pair_hostgroups = []
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
properties = self._get_properties(volume, hostgroups)
- LOG.debug('Initialize volume_info: %s'
- % self.common.volume_info)
+ LOG.debug('Initialize volume_info: %s',
+ self.common.volume_info)
- LOG.debug('HFCDrv: properties=%s' % properties)
+ LOG.debug('HFCDrv: properties=%s', properties)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
- LOG.debug("Call _terminate_connection(config_group: %s)"
- % self.configuration.config_group)
+ LOG.debug("Call _terminate_connection(config_group: %s)",
+ self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun(hostgroups, ldev)
LOG.debug("*** _terminate_ ***")
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
- msg = basic_lib.set_msg(302, volume_id=volume['id'])
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
return
if 'wwpns' not in connector:
self._terminate_connection(ldev, connector, hostgroups)
properties = self._get_properties(volume, hostgroups,
terminate=True)
- LOG.debug('Terminate volume_info: %s' % self.common.volume_info)
+ LOG.debug('Terminate volume_info: %s', self.common.volume_info)
return {
'driver_volume_type': 'fibre_channel',
import six
from cinder import exception
-from cinder.i18n import _LE, _LW
+from cinder.i18n import _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
continue
target_wwns[port] = line[10]
- LOG.debug('target wwns: %s' % target_wwns)
+ LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected):
if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and
not self.comm_get_snapshot(ldev) or
re.search('SSB=%s' % HOST_IO_SSB, stderr)):
- msg = basic_lib.set_msg(310, ldev=ldev, reason=stderr)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=stderr))
if time.time() - start >= LUN_DELETE_WAITTIME:
msg = basic_lib.output_err(
if is_once:
break
else:
- msg = basic_lib.set_msg(
- 314, ldev=ldev, lun=lun, port=port, id=gid)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 314, ldev=ldev, lun=lun, port=port, id=gid))
finally:
self.comm_unlock()
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
- msg = basic_lib.set_msg(315, ldev=ldev, reason=stderr)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(315, ldev=ldev, reason=stderr))
finally:
self.comm_unlock()
def discard_zero_page(self, ldev):
try:
self.comm_modify_ldev(ldev)
- except Exception as e:
- LOG.warning(_LW('Failed to discard zero page: %s') %
- six.text_type(e))
+ except Exception as ex:
+ LOG.warning(_LW('Failed to discard zero page: %s'), ex)
@storage_synchronized
def comm_add_snapshot(self, pvol, svol):
[basic_lib.PSUS], timeout,
interval, check_svol=True)
except Exception as ex:
- LOG.warning(_LW('Failed to create pair: %s') %
- six.text_type(ex))
+ LOG.warning(_LW('Failed to create pair: %s'), ex)
try:
self.comm_pairsplit(copy_group, ldev_name)
[basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
- LOG.warning(_LW('Failed to create pair: %s') %
- six.text_type(ex))
+ LOG.warning(_LW('Failed to create pair: %s'), ex)
if self.is_smpl(copy_group, ldev_name):
try:
self.delete_pair_config(pvol, svol, copy_group,
ldev_name)
except Exception as ex:
- LOG.warning(_LW('Failed to create pair: %s') %
- six.text_type(ex))
+ LOG.warning(_LW('Failed to create pair: %s'), ex)
if restart:
try:
self.restart_pair_horcm()
except Exception as ex:
- LOG.warning(_LW('Failed to restart horcm: %s') %
- six.text_type(ex))
+ LOG.warning(_LW('Failed to restart horcm: %s'), ex)
else:
self.check_snap_count(pvol)
pvol, svol, [basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
- LOG.warning(_LW('Failed to create pair: %s') %
- six.text_type(ex))
+ LOG.warning(_LW('Failed to create pair: %s'), ex)
def delete_pair(self, pvol, svol, is_vvol):
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
for opt in volume_opts:
if not opt.secret:
value = getattr(conf, opt.name)
- LOG.info('\t%-35s%s' % (opt.name + ': ',
- six.text_type(value)))
+ LOG.info(_LI('\t%(name)-35s : %(value)s'),
+ {'name': opt.name, 'value': value})
def create_lock_file(self):
inst = self.conf.hitachi_horcm_numbers[0]
import six
from cinder import exception
-from cinder.i18n import _LE
+from cinder.i18n import _LE, _LI
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
self.configuration.append_config_values(volume_opts)
if (self.configuration.hitachi_auth_method and
self.configuration.hitachi_auth_method not in CHAP_METHOD):
- msg = basic_lib.output_err(601, param='hitachi_auth_method')
- raise exception.HBSDError(message=msg)
+ raise exception.HBSDError(
+ message=basic_lib.output_err(601, param='hitachi_auth_method'))
if self.configuration.hitachi_auth_method == 'None':
self.configuration.hitachi_auth_method = None
for opt in volume_opts:
except exception.HBSDError:
raise
except Exception as ex:
- msg = basic_lib.output_err(601, param=six.text_type(ex))
- raise exception.HBSDError(message=msg)
+ raise exception.HBSDError(
+ message=basic_lib.output_err(601, param=six.text_type(ex)))
def output_param_to_log(self):
lock = basic_lib.get_process_lock(self.common.system_lock_file)
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
- LOG.info('\t%-35s%s' % (opt.name + ': ',
- six.text_type(value)))
+ LOG.info(_LI('\t%(name)-35s : %(value)s'),
+ {'name': opt.name, 'value': value})
def _delete_lun_iscsi(self, hostgroups, ldev):
try:
self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
except exception.HBSDNotFound:
- msg = basic_lib.set_msg(301, ldev=ldev)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(301, ldev=ldev))
def _add_target(self, hostgroups, ldev):
self.common.add_lun('autargetmap', hostgroups, ldev)
def _add_initiator(self, hgs, port, gid, host_iqn):
self.common.command.comm_add_initiator(port, gid, host_iqn)
hgs.append({'port': port, 'gid': int(gid), 'detected': True})
- LOG.debug("Create iSCSI target for %s" % hgs)
+ LOG.debug("Create iSCSI target for %s", hgs)
def _get_unused_gid_iscsi(self, port):
group_range = self.configuration.hitachi_group_range
ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
port, target_no, target_alias)
if ret:
- msg = basic_lib.set_msg(
- 307, port=port, tno=target_no, alias=target_alias)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 307, port=port, tno=target_no, alias=target_alias))
def _delete_chap_user(self, port):
ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
if ret:
- msg = basic_lib.set_msg(
- 303, user=self.configuration.hitachi_auth_user)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 303, user=self.configuration.hitachi_auth_user))
def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
return self.common.command.comm_get_hostgroup_info_iscsi(
hostgroup['ip_addr'] = ip_addr
hostgroup['ip_port'] = ip_port
hostgroup['target_iqn'] = target_iqn
- LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s"
- % {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
+ LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s",
+ {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
for port in ports:
added_user = False
LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
'target_iqn: %(tiqn)s target_alias: %(alias)s '
- 'add_iqn: %(aiqn)s)' %
+ 'add_iqn: %(aiqn)s)',
{'hgs': hgs, 'port': port, 'tiqn': target_iqn,
'alias': target_alias, 'aiqn': add_iqn})
gid = self.common.command.get_gid_from_targetiqn(
port, gid, target_alias, target_iqn)
added_hostgroup = True
except exception.HBSDNotFound:
- msg = basic_lib.set_msg(312, resource='GID')
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(312, resource='GID'))
continue
except Exception as ex:
- msg = basic_lib.set_msg(
+ LOG.warning(basic_lib.set_msg(
309, port=port, alias=target_alias,
- reason=six.text_type(ex))
- LOG.warning(msg)
+ reason=ex))
break
else:
LOG.debug('Completed to add target'
- '(port: %(port)s gid: %(gid)d)'
- % {'port': port, 'gid': gid})
+ '(port: %(port)s gid: %(gid)d)',
+ {'port': port, 'gid': gid})
break
if gid is None:
- LOG.error(_LE('Failed to add target(port: %s)') % port)
+ LOG.error(_LE('Failed to add target(port: %s)'), port)
continue
try:
if added_hostgroup:
port, target_alias)
self._add_initiator(hgs, port, gid, add_iqn)
except Exception as ex:
- msg = basic_lib.set_msg(
- 316, port=port, reason=six.text_type(ex))
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 316, port=port, reason=ex))
if added_hostgroup:
if added_user:
self._delete_chap_user(port)
self.add_hostgroup_core(hgs, diff_ports, target_iqn,
target_alias, master_iqn)
if not hgs:
- msg = basic_lib.output_err(649)
- raise exception.HBSDError(message=msg)
+ raise exception.HBSDError(message=basic_lib.output_err(649))
def add_hostgroup(self):
properties = utils.brick_get_connector_properties()
if 'initiator' not in properties:
- msg = basic_lib.output_err(650, resource='HBA')
- raise exception.HBSDError(message=msg)
- LOG.debug("initiator: %s" % properties['initiator'])
+ raise exception.HBSDError(
+ message=basic_lib.output_err(650, resource='HBA'))
+ LOG.debug("initiator: %s", properties['initiator'])
hostgroups = []
security_ports = self._get_hostgroup_info_iscsi(
hostgroups, properties['initiator'])
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
- "(config_group: %(group)s ldev: %(ldev)d)"
- % {'group': self.configuration.config_group, 'ldev': ldev})
+ "(config_group: %(group)s ldev: %(ldev)d)",
+ {'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs:
hostgroups = src_hgs[:]
else:
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
- msg = basic_lib.output_err(619, volume_id=volume['id'])
- raise exception.HBSDError(message=msg)
+ raise exception.HBSDError(
+ message=basic_lib.output_err(619, volume_id=volume['id']))
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
protocol = 'iscsi'
properties = self._get_properties(volume, hostgroups)
- LOG.debug('Initialize volume_info: %s'
- % self.common.volume_info)
+ LOG.debug('Initialize volume_info: %s',
+ self.common.volume_info)
- LOG.debug('HFCDrv: properties=%s' % properties)
+ LOG.debug('HFCDrv: properties=%s', properties)
return {
'driver_volume_type': protocol,
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
- LOG.debug("Call _terminate_connection(config_group: %s)"
- % self.configuration.config_group)
+ LOG.debug("Call _terminate_connection(config_group: %s)",
+ self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun_iscsi(hostgroups, ldev)
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
- msg = basic_lib.set_msg(302, volume_id=volume['id'])
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
return
if 'initiator' not in connector:
- msg = basic_lib.output_err(650, resource='HBA')
- raise exception.HBSDError(message=msg)
+ raise exception.HBSDError(
+ message=basic_lib.output_err(650, resource='HBA'))
hostgroups = []
self._get_hostgroup_info_iscsi(hostgroups,
connector['initiator'])
if not hostgroups:
- msg = basic_lib.output_err(649)
- raise exception.HBSDError(message=msg)
+ raise exception.HBSDError(message=basic_lib.output_err(649))
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.do_setup_status.wait()
if volume['volume_attachment']:
desc = 'volume %s' % volume['id']
- msg = basic_lib.output_err(660, desc=desc)
- raise exception.HBSDError(message=msg)
+ raise exception.HBSDError(
+ message=basic_lib.output_err(660, desc=desc))
super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
image_service,
image_meta)
if int(line[3]) == ldev:
hlu = int(line[2])
LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
- '(hlun: %(hlu)d)')
- % {'ldev': ldev, 'hlu': hlu})
+ '(hlun: %(hlu)d)'),
+ {'ldev': ldev, 'hlu': hlu})
return hlu
return None
else:
target_wwns[port] = line[3]
- LOG.debug('target wwns: %s' % target_wwns)
+ LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login):
no_lun_cnt = 0
deleted_hostgroups = []
for hostgroup in hostgroups:
- LOG.debug('comm_delete_lun: hostgroup is %s' % hostgroup)
+ LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup)
port = hostgroup['port']
gid = hostgroup['gid']
ctl_no = port[0]
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
deleted_hostgroups.append({'port': port, 'gid': gid})
- LOG.debug('comm_delete_lun is over (%d)' % lun)
+ LOG.debug('comm_delete_lun is over (%d)', lun)
def comm_delete_lun(self, hostgroups, ldev):
self.comm_delete_lun_core('auhgmap', hostgroups, ldev)
if is_once:
break
else:
- msg = basic_lib.set_msg(
- 314, ldev=ldev, lun=hlu, port=port, id=gid)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 314, ldev=ldev, lun=hlu, port=port, id=gid))
if not is_ok:
if stderr:
if added_flag:
_ret, _stdout, _stderr = self.delete_chap_user(port)
if _ret:
- msg = basic_lib.set_msg(303, user=auth_username)
- LOG.warning(msg)
+ LOG.warning(basic_lib.set_msg(
+ 303, user=auth_username))
msg = basic_lib.output_err(
600, cmd='auchapuser', ret=ret, out=stdout, err=stderr)
gid = int(shlex.split(line)[0][0:3])
hostgroups.append(
{'port': port, 'gid': gid, 'detected': True})
- LOG.debug('Find port=%(port)s gid=%(gid)d'
- % {'port': port, 'gid': gid})
+ LOG.debug('Find port=%(port)s gid=%(gid)d',
+ {'port': port, 'gid': gid})
if port not in security_ports:
security_ports.append(port)
conf_file = self.configuration.cinder_huawei_conf_file
(product, protocol) = self._get_conf_info(conf_file)
- LOG.info(_LI(
- '_instantiate_driver: Loading %(protocol)s driver for '
- 'Huawei OceanStor %(product)s series storage arrays.')
- % {'protocol': protocol,
- 'product': product})
+ LOG.info(_LI('_instantiate_driver: Loading %(protocol)s driver for '
+ 'Huawei OceanStor %(product)s series storage arrays.'),
+ {'protocol': protocol,
+ 'product': product})
# Map HVS to 18000
if product in MAPPING:
- LOG.warn(_LW("Product name %s is deprecated, update your "
- "configuration to the new product name."), product)
+ LOG.warning(_LW("Product name %s is deprecated, update your "
+ "configuration to the new product name."),
+ product)
product = MAPPING[product]
driver_module = self._product[product]
root = tree.getroot()
return root
except IOError as err:
- LOG.error(_LE('parse_xml_file: %s') % err)
- raise err
+ LOG.error(_LE('parse_xml_file: %s'), err)
+ raise
def get_xml_item(xml_root, item):
if not host_os:
host_os = os_type['Linux'] # default os type
- LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.'
- % {'ip': host_ip, 'os': host_os})
+ LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.',
+ {'ip': host_ip, 'os': host_os})
return host_os
'res': res})
except Exception as err:
- LOG.error(_LE('\nBad response from server: %s.') % err)
+ LOG.error(_LE('\nBad response from server: %s.'), err)
raise
try:
res_json = json.loads(res)
except Exception as err:
- err_msg = (_LE('JSON transfer error: %s.') % err)
- LOG.error(err_msg)
+ LOG.error(_LE('JSON transfer error: %s.'), err)
raise
return res_json
volume_description = volume['name']
volume_size = self._get_volume_size(volume)
- LOG.info(_LI(
- 'Create Volume: %(volume)s Size: %(size)s.')
- % {'volume': volume_name,
- 'size': volume_size})
+ LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s.'),
+ {'volume': volume_name, 'size': volume_size})
params = self._get_lun_conf_params()
params['pool_id'] = poolinfo['ID']
name = self._encode_name(volume['id'])
lun_id = volume.get('provider_location', None)
- LOG.info(_LI('Delete Volume: %(name)s array lun id: %(lun_id)s.')
- % {'name': name, 'lun_id': lun_id})
+ LOG.info(_LI('Delete Volume: %(name)s array lun id: %(lun_id)s.'),
+ {'name': name, 'lun_id': lun_id})
if lun_id:
if self._check_lun_exist(lun_id) is True:
# Get qos_id by lun_id.
tree = ET.parse(filename)
root = tree.getroot()
except Exception as err:
- LOG.error(_LE('_read_xml: %s') % err)
+ LOG.error(_LE('_read_xml: %s'), err)
raise
return root
snapshot_description = snapshot['id']
volume_name = self._encode_name(snapshot['volume_id'])
- LOG.info(_LI(
- '_create_snapshot:snapshot name: %(snapshot)s, '
- 'volume name: %(volume)s.')
- % {'snapshot': snapshot_name,
- 'volume': volume_name})
+ LOG.info(_LI('_create_snapshot:snapshot name: %(snapshot)s, '
+ 'volume name: %(volume)s.'),
+ {'snapshot': snapshot_name,
+ 'volume': volume_name})
lun_id = self._get_volume_by_name(volume_name)
if lun_id is None:
snapshot_name = self._encode_name(snapshot['id'])
volume_name = self._encode_name(snapshot['volume_id'])
- LOG.info(_LI(
- 'stop_snapshot:snapshot name: %(snapshot)s, '
- 'volume name: %(volume)s.')
- % {'snapshot': snapshot_name,
- 'volume': volume_name})
+ LOG.info(_LI('stop_snapshot:snapshot name: %(snapshot)s, '
+ 'volume name: %(volume)s.'),
+ {'snapshot': snapshot_name,
+ 'volume': volume_name})
snapshot_id = snapshot.get('provider_location', None)
if snapshot_id is None:
tgt_lun_id = lun_info['ID']
luncopy_name = self._encode_name(volume['id'])
- LOG.info(_LI(
- 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '
- 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s')
- % {'src_lun_id': snapshot_id,
- 'tgt_lun_id': tgt_lun_id,
- 'copy_name': luncopy_name})
+ LOG.info(_LI('create_volume_from_snapshot: src_lun_id: '
+ '%(src_lun_id)s, tgt_lun_id: %(tgt_lun_id)s, '
+ 'copy_name: %(copy_name)s'),
+ {'src_lun_id': snapshot_id,
+ 'tgt_lun_id': tgt_lun_id,
+ 'copy_name': luncopy_name})
event_type = 'LUNReadyWaitInterval'
wait_interval = self._get_wait_interval(event_type)
# Delete snapshot.
self.delete_snapshot(snapshot)
except exception.CinderException:
- LOG.warning(_LW(
- 'Failure deleting the snapshot %(snapshot_id)s '
- 'of volume %(volume_id)s.')
- % {'snapshot_id': snapshot['id'],
- 'volume_id': src_vref['id']})
+ LOG.warning(_LW('Failure deleting the snapshot '
+ '%(snapshot_id)s of volume %(volume_id)s.'),
+ {'snapshot_id': snapshot['id'],
+ 'volume_id': src_vref['id']})
return lun_info
host_group_name = HOSTGROUP_PREFIX + host_id
hostgroup_id = self._find_hostgroup(host_group_name)
- LOG.info(_LI(
- '_add_host_into_hostgroup, hostgroup name: %(name)s, '
- 'hostgroup id: %(id)s.')
- % {'name': host_group_name,
- 'id': hostgroup_id})
+ LOG.info(_LI('_add_host_into_hostgroup, hostgroup name: %(name)s, '
+ 'hostgroup id: %(id)s.'),
+ {'name': host_group_name,
+ 'id': hostgroup_id})
if hostgroup_id is None:
hostgroup_id = self._create_hostgroup(host_group_name)
lun_id = self._get_volume_by_name(volume_name)
view_id = self._find_mapping_view(mapping_view_name)
- LOG.info(_LI(
- '_mapping_hostgroup_and_lungroup, lun_group: %(lun_group)s, '
- 'view_id: %(view_id)s, lun_id: %(lun_id)s.')
- % {'lun_group': six.text_type(lungroup_id),
- 'view_id': six.text_type(view_id),
- 'lun_id': six.text_type(lun_id)})
+ LOG.info(_LI('_mapping_hostgroup_and_lungroup, lun_group: '
+ '%(lun_group)s, view_id: %(view_id)s, lun_id: '
+ '%(lun_id)s.'), {'lun_group': lungroup_id,
+ 'view_id': view_id,
+ 'lun_id': lun_id})
try:
# Create lungroup and add LUN into to lungroup.
except Exception:
with excutils.save_and_reraise_exception():
- err_msg = (_LE(
- 'Error occurred when adding hostgroup and lungroup to '
- 'view. Remove lun from lungroup now.'))
- LOG.error(err_msg)
+ LOG.error(_LE('Error occurred when adding hostgroup and '
+ 'lungroup to view. Remove lun from lungroup '
+ 'now.'))
self._remove_lun_from_lungroup(lungroup_id, lun_id)
return lun_id
initiator_name = connector['initiator']
volume_name = self._encode_name(volume['id'])
- LOG.info(_LI(
- 'initiator name: %(initiator_name)s, '
- 'volume name: %(volume)s.')
- % {'initiator_name': initiator_name,
- 'volume': volume_name})
+ LOG.info(_LI('initiator name: %(initiator_name)s, '
+ 'volume name: %(volume)s.'),
+ {'initiator_name': initiator_name,
+ 'volume': volume_name})
(iscsi_iqn, target_ip) = self._get_iscsi_params(connector)
- LOG.info(_LI(
- 'initialize_connection_iscsi,iscsi_iqn: %(iscsi_iqn)s, '
- 'target_ip: %(target_ip)s.')
- % {'iscsi_iqn': iscsi_iqn,
- 'target_ip': target_ip})
+ LOG.info(_LI('initialize_connection_iscsi,iscsi_iqn: %(iscsi_iqn)s, '
+ 'target_ip: %(target_ip)s.'),
+ {'iscsi_iqn': iscsi_iqn,
+ 'target_ip': target_ip})
# Create host_group if not exist.
host_name = connector['host']
hostlunid = self._find_host_lun_id(hostid, lun_id)
- LOG.info(_LI("initialize_connection_iscsi, host lun id is: %s.")
- % hostlunid)
+ LOG.info(_LI("initialize_connection_iscsi, host lun id is: %s."),
+ hostlunid)
# Return iSCSI properties.
properties = {}
properties['target_lun'] = int(hostlunid)
properties['volume_id'] = volume['id']
- LOG.info(_LI("initialize_connection_iscsi success. Return data: %s.")
- % properties)
+ LOG.info(_LI("initialize_connection_iscsi success. Return data: %s."),
+ properties)
return {'driver_volume_type': 'iscsi', 'data': properties}
@utils.synchronized('huawei', external=True)
host_name = connector['host']
volume_name = self._encode_name(volume['id'])
- LOG.info(_LI(
- 'initialize_connection_fc, initiator: %(initiator_name)s,'
- ' volume name: %(volume)s.')
- % {'initiator_name': wwns,
- 'volume': volume_name})
+ LOG.info(_LI('initialize_connection_fc, initiator: %(initiator_name)s,'
+ ' volume name: %(volume)s.'),
+ {'initiator_name': wwns,
+ 'volume': volume_name})
# Create host_group if not exist.
hostid = self._find_host(host_name)
hostgroup_id = self._add_host_into_hostgroup(hostid)
free_wwns = self._get_connected_free_wwns()
- LOG.info(_LI("initialize_connection_fc, the array has free wwns: %s")
- % free_wwns)
+ LOG.info(_LI("initialize_connection_fc, the array has free wwns: %s"),
+ free_wwns)
for wwn in wwns:
if wwn in free_wwns:
self._add_fc_port_to_host(hostid, wwn)
'volume_id': volume['id'],
'initiator_target_map': init_targ_map}}
- LOG.info(_LI("initialize_connection_fc, return data is: %s.")
- % info)
+ LOG.info(_LI("initialize_connection_fc, return data is: %s."), info)
return info
host_lun_id = hostassoinfo['HostLUNID']
break
except Exception as err:
- msg = (_LE("JSON transfer data error. %s") % err)
- LOG.error(msg)
+ LOG.error(_LE("JSON transfer data error. %s"), err)
raise
return host_lun_id
initiator_name = connector['initiator']
volume_name = self._encode_name(volume['id'])
lun_id = volume.get('provider_location', None)
- LOG.info(_LI(
- 'terminate_connection:volume name: %(volume)s, '
- 'initiator name: %(ini)s, '
- 'lun_id: %(lunid)s.')
- % {'volume': volume_name,
- 'ini': initiator_name,
- 'lunid': lun_id})
+ LOG.info(_LI('terminate_connection:volume name: %(volume)s, '
+ 'initiator name: %(ini)s, lun_id: %(lunid)s.'),
+ {'volume': volume_name,
+ 'ini': initiator_name,
+ 'lunid': lun_id})
if lun_id:
if self._check_lun_exist(lun_id) is True:
ip_info = self._get_iscsi_port_info(iscsiip)
iqn_prefix = self._get_iscsi_tgt_port()
- LOG.info(_LI('Request ip info is: %s.') % ip_info)
+ LOG.info(_LI('Request ip info is: %s.'), ip_info)
split_list = ip_info.split(".")
newstr = split_list[1] + split_list[2]
- LOG.info(_LI('New str info is: %s.') % newstr)
+ LOG.info(_LI('New str info is: %s.'), newstr)
if ip_info:
if newstr[0] == 'A':
iqn_suffix = iqn_suffix[i:]
break
iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsiip
- LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.') % iqn)
+ LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.'), iqn)
return iqn
else:
return None
try:
tree.write(filename, 'UTF-8')
except Exception as err:
- LOG.warning(_LW('Unable to access config file. %s') % err)
+ LOG.warning(_LW('Unable to access config file. %s'), err)
return logininfo
new_volume_size = int(new_size) * units.Gi / 512
volume_name = self._encode_name(volume['id'])
- LOG.info(_LI(
- 'Extend Volume: %(volumename)s, oldsize:'
- ' %(oldsize)s newsize: %(newsize)s.')
- % {'volumename': volume_name,
- 'oldsize': volume_size,
- 'newsize': new_volume_size})
+ LOG.info(_LI('Extend Volume: %(volumename)s, oldsize: %(oldsize)s '
+ 'newsize: %(newsize)s.'),
+ {'volumename': volume_name,
+ 'oldsize': volume_size,
+ 'newsize': new_volume_size})
lun_id = self._get_volume_by_name(volume_name)
else:
kvs = specs
- LOG.info(_LI('The QoS sepcs is: %s.') % kvs)
+ LOG.info(_LI('The QoS sepcs is: %s.'), kvs)
for key, value in kvs.iteritems():
if key in huawei_valid_keys:
qos[key.upper()] = value
out, err = self._ssh(ssh_cmd)
except processutils.ProcessExecutionError:
LOG.warning(_LW('_execute_command_and_parse_attributes: Failed to '
- 'run command: %s.'), six.text_type(ssh_cmd))
+ 'run command: %s.'), ssh_cmd)
# Does not raise exception when command encounters error.
# Only return and the upper logic decides what to do.
return None
def validate_connector(self, connector):
"""Check connector."""
if 'FC' == self._protocol and 'wwpns' not in connector:
- msg = (_LE('The connector does not contain the '
- 'required information: wwpns is missing'))
- LOG.error(msg)
+ LOG.error(_LE('The connector does not contain the '
+ 'required information: wwpns is missing'))
raise exception.InvalidConnectorException(missing='wwpns')
def create_volume(self, volume):
gpfs_state = lines[1].split(':')[state_token]
if gpfs_state != 'active':
LOG.error(_LE('GPFS is not active. Detailed output: %s.'), out)
- exception_message = (_('GPFS is not running, state: %s.') %
- gpfs_state)
- raise exception.VolumeBackendAPIException(data=exception_message)
+ raise exception.VolumeBackendAPIException(
+ data=_('GPFS is not running, state: %s.') % gpfs_state)
def _get_filesystem_from_path(self, path):
"""Return filesystem for specified path."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
+import six
from cinder import exception
from cinder.i18n import _, _LI, _LW
self._run_ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_('Failed in _ssh_operation while execution of ssh_cmd:'
- '%(cmd)s. Error: %(error)s') % {'cmd': ssh_cmd, 'error': e})
+ '%(cmd)s. Error: %(error)s') %
+ {'cmd': ssh_cmd, 'error': six.text_type(e)})
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
msg = (_("Failed to resize volume "
"%(volume_id)s, error: %(error)s") %
{'volume_id': os.path.basename(path).split('-')[1],
- 'error': e.stderr})
+ 'error': six.text_type(e.stderr)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return True
try:
(out, _err) = self._run_ssh(ssh_cmd, check_exit_code=False)
except processutils.ProcessExecutionError as e:
- msg = (_("Failed in _delete_snapfiles. Error: %s") % e.stderr)
+ msg = (_("Failed in _delete_snapfiles. Error: %s") %
+ six.text_type(e.stderr))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
fparent = None
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
- LOG.warn(_LW('Volume %s does not have '
- 'provider_location specified, '
- 'skipping.'), volume['name'])
+ LOG.warning(_LW('Volume %s does not have '
+ 'provider_location specified, '
+ 'skipping.'), volume['name'])
return
export_path = self._get_export_path(volume['id'])
from cinder import context
from cinder import exception
-from cinder.i18n import _, _LE, _LW
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
"""
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
if not volume_defined:
- LOG.error(_LE('ensure_export: Volume %s not found on storage')
- % volume['name'])
+ LOG.error(_LE('ensure_export: Volume %s not found on storage'),
+ volume['name'])
def create_export(self, ctxt, volume):
model_update = None
if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
valid = True
if not valid:
- msg = (_LE('The connector does not contain the required '
- 'information.'))
- LOG.error(msg)
+ LOG.error(_LE('The connector does not contain the required '
+ 'information.'))
raise exception.InvalidConnectorException(
missing='initiator or wwpns')
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error(_LE('Did not find expected column name in '
- 'lsvdisk: %s') % e)
- msg = (_('initialize_connection: Missing volume '
- 'attribute for volume %s') % volume_name)
- raise exception.VolumeBackendAPIException(data=msg)
+ 'lsvdisk: %s'), e)
+ raise exception.VolumeBackendAPIException(
+ data=_('initialize_connection: Missing volume attribute for '
+ 'volume %s') % volume_name)
try:
# Get preferred node and other nodes in I/O group
if not preferred_node_entry and not vol_opts['multipath']:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
- LOG.warn(_LW('initialize_connection: Did not find a preferred '
- 'node for volume %s') % volume_name)
+ LOG.warning(_LW('initialize_connection: Did not find a '
+ 'preferred node for volume %s'), volume_name)
properties = {}
properties['target_discovered'] = False
LOG.warning(_LW('Unable to find a preferred node match'
' for node %(node)s in the list of '
'available WWPNs on %(host)s. '
- 'Using first available.') %
+ 'Using first available.'),
{'node': preferred_node,
'host': host_name})
properties['target_wwn'] = conn_wwpns[0]
return replica_status
def extend_volume(self, volume, new_size):
- LOG.debug('enter: extend_volume: volume %s' % volume['id'])
+ LOG.debug('enter: extend_volume: volume %s', volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
allow_snaps=False)
if not ret:
extend_amt = int(new_size) - volume['size']
self._helpers.extend_vdisk(volume['name'], extend_amt)
- LOG.debug('leave: extend_volume: volume %s' % volume['id'])
+ LOG.debug('leave: extend_volume: volume %s', volume['id'])
def add_vdisk_copy(self, volume, dest_pool, vol_type):
return self._helpers.add_vdisk_copy(volume, dest_pool,
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except KeyError:
- msg = (_('_rm_vdisk_copy_op: Volume %s does not have any '
- 'registered vdisk copy operations.') % volume['id'])
- LOG.error(msg)
+ LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any '
+ 'registered vdisk copy operations.'), volume['id'])
return
except ValueError:
- msg = (_('_rm_vdisk_copy_op: Volume %(vol)s does not have the '
- 'specified vdisk copy operation: orig=%(orig)s '
- 'new=%(new)s.')
- % {'vol': volume['id'], 'orig': orig_copy_id,
- 'new': new_copy_id})
- LOG.error(msg)
+ LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have '
+ 'the specified vdisk copy operation: orig=%(orig)s '
+ 'new=%(new)s.'),
+ {'vol': volume['id'], 'orig': orig_copy_id,
+ 'new': new_copy_id})
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
- msg = (_('_rm_vdisk_copy_op: Volume metadata %s does not have any '
- 'registered vdisk copy operations.') % volume['id'])
- LOG.error(msg)
+ LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not '
+ 'have any registered vdisk copy operations.'),
+ volume['id'])
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
- msg = (_('_rm_vdisk_copy_op: Volume %(vol)s metadata does not '
- 'have the specified vdisk copy operation: orig=%(orig)s '
- 'new=%(new)s.')
- % {'vol': volume['id'], 'orig': orig_copy_id,
- 'new': new_copy_id})
- LOG.error(msg)
+ LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
+ 'not have the specified vdisk copy operation: '
+ 'orig=%(orig)s new=%(new)s.'),
+ {'vol': volume['id'], 'orig': orig_copy_id,
+ 'new': new_copy_id})
return
if len(curr_ops_list):
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
- LOG.warn(_LW('Volume %s does not exist.'), vol_id)
+ LOG.warning(_LW('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
- msg = (_('_check_volume_copy_ops: Volume %(vol)s does not '
- 'have the specified vdisk copy operation: '
- 'orig=%(orig)s new=%(new)s.')
- % {'vol': volume['id'], 'orig': copy_op[0],
+ LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does '
+ 'not have the specified vdisk copy '
+ 'operation: orig=%(orig)s new=%(new)s.'),
+ {'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
- LOG.info(msg)
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
- LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s' %
+ LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
self._check_volume_copy_ops()
new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
- LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s' %
+ LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
return (True, None)
self._state, (new, old))
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
- 'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
- 'new_type': new_type,
- 'diff': diff,
- 'host': host})
+ 'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
+ 'new_type': new_type,
+ 'diff': diff,
+ 'host': host})
ignore_keys = ['protocol', 'multipath']
no_copy_keys = ['warning', 'autoexpand', 'easytier']
new_type)
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
- 'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
- 'new_type': new_type,
- 'diff': diff,
- 'host': host['host']})
+ 'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
+ 'new_type': new_type,
+ 'diff': diff,
+ 'host': host['host']})
return True, model_update
def manage_existing(self, volume, ref):
if 'active' == s:
wwpns.add(i)
node['WWPN'] = list(wwpns)
- LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s')
- % {'node': node['id'], 'wwpn': node['WWPN']})
+ LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s'),
+ {'node': node['id'], 'wwpn': node['WWPN']})
def add_chap_secret_to_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
def get_host_from_connector(self, connector):
"""Return the Storwize host described by the connector."""
- LOG.debug('enter: get_host_from_connector: %s' % connector)
+ LOG.debug('enter: get_host_from_connector: %s', connector)
# If we have FC information, we have a faster lookup option
host_name = None
self.handle_keyerror('lsfabric', wwpn_info)
if host_name:
- LOG.debug('leave: get_host_from_connector: host %s' % host_name)
+ LOG.debug('leave: get_host_from_connector: host %s', host_name)
return host_name
# That didn't work, so try exhaustive search
if found:
break
- LOG.debug('leave: get_host_from_connector: host %s' % host_name)
+ LOG.debug('leave: get_host_from_connector: host %s', host_name)
return host_name
def create_host(self, connector):
host name (at most 55 characters), plus a random 8-character suffix to
avoid collisions. The total length should be at most 63 characters.
"""
- LOG.debug('enter: create_host: host %s' % connector['host'])
+ LOG.debug('enter: create_host: host %s', connector['host'])
# Before we start, make sure host name is a string and that we have at
# least one port.
for port in ports:
self.ssh.addhostport(host_name, port[0], port[1])
- LOG.debug('leave: create_host: host %(host)s - %(host_name)s' %
+ LOG.debug('leave: create_host: host %(host)s - %(host_name)s',
{'host': connector['host'], 'host_name': host_name})
return host_name
"""Create a mapping between a volume to a host."""
LOG.debug('enter: map_vol_to_host: volume %(volume_name)s to '
- 'host %(host_name)s'
- % {'volume_name': volume_name, 'host_name': host_name})
+ 'host %(host_name)s',
+ {'volume_name': volume_name, 'host_name': host_name})
# Check if this volume is already mapped to this host
mapped = False
multihostmap)
LOG.debug('leave: map_vol_to_host: LUN %(result_lun)s, volume '
- '%(volume_name)s, host %(host_name)s' %
+ '%(volume_name)s, host %(host_name)s',
{'result_lun': result_lun,
'volume_name': volume_name,
'host_name': host_name})
"""Unmap the volume and delete the host if it has no more mappings."""
LOG.debug('enter: unmap_vol_from_host: volume %(volume_name)s from '
- 'host %(host_name)s'
- % {'volume_name': volume_name, 'host_name': host_name})
+ 'host %(host_name)s',
+ {'volume_name': volume_name, 'host_name': host_name})
# Check if the mapping exists
resp = self.ssh.lsvdiskhostmap(volume_name)
if not len(resp):
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
- '%(vol_name)s to any host found.') %
+ '%(vol_name)s to any host found.'),
{'vol_name': volume_name})
return
if host_name is None:
if len(resp) > 1:
LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
'volume %(vol_name)s found, no host '
- 'specified.') % {'vol_name': volume_name})
+ 'specified.'), {'vol_name': volume_name})
return
else:
host_name = resp[0]['host_name']
found = True
if not found:
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
- '%(vol_name)s to host %(host)s found.') %
+ '%(vol_name)s to host %(host)s found.'),
{'vol_name': volume_name, 'host': host_name})
# We now know that the mapping exists
self.delete_host(host_name)
LOG.debug('leave: unmap_vol_from_host: volume %(volume_name)s from '
- 'host %(host_name)s'
- % {'volume_name': volume_name, 'host_name': host_name})
+ 'host %(host_name)s',
+ {'volume_name': volume_name, 'host_name': host_name})
@staticmethod
def build_default_opts(config):
return params
def create_vdisk(self, name, size, units, pool, opts):
- LOG.debug('enter: create_vdisk: vdisk %s ' % name)
+ LOG.debug('enter: create_vdisk: vdisk %s ', name)
params = self._get_vdisk_create_params(opts)
self.ssh.mkvdisk(name, size, units, pool, opts, params)
- LOG.debug('leave: _create_vdisk: volume %s ' % name)
+ LOG.debug('leave: _create_vdisk: volume %s ', name)
def get_vdisk_attributes(self, vdisk):
attrs = self.ssh.lsvdisk(vdisk)
def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
"""FlashCopy mapping check helper."""
- LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s' % name)
+ LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s', name)
mapping_ids = self._get_vdisk_fc_mappings(name)
wait_for_copy = False
for map_id in mapping_ids:
# before it finishes. Don't set the sleep interval shorter
# than the heartbeat. Otherwise volume service heartbeat
# will not be serviced.
- LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s'
- % name)
+ LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s',
+ name)
ret = timer.start(interval=self.check_fcmapping_interval).wait()
timer.stop()
return ret
def delete_vdisk(self, vdisk, force):
"""Ensures that vdisk is not part of FC mapping and deletes it."""
- LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk)
+ LOG.debug('enter: delete_vdisk: vdisk %s', vdisk)
if not self.is_vdisk_defined(vdisk):
- LOG.info(_LI('Tried to delete non-existent vdisk %s.') % vdisk)
+ LOG.info(_LI('Tried to delete non-existent vdisk %s.'), vdisk)
return
self.ensure_vdisk_no_fc_mappings(vdisk)
self.ssh.rmvdisk(vdisk, force=force)
- LOG.debug('leave: delete_vdisk: vdisk %s' % vdisk)
+ LOG.debug('leave: delete_vdisk: vdisk %s', vdisk)
def create_copy(self, src, tgt, src_id, config, opts,
full_copy, pool=None):
"""Create a new snapshot using FlashCopy."""
- LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s' %
+ LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s',
{'tgt': tgt, 'src': src})
src_attrs = self.get_vdisk_attributes(src)
self.delete_vdisk(tgt, True)
LOG.debug('leave: _create_copy: snapshot %(tgt)s from '
- 'vdisk %(src)s' %
+ 'vdisk %(src)s',
{'tgt': tgt, 'src': src})
def extend_vdisk(self, vdisk, amount):
def change_vdisk_iogrp(self, vdisk, state, iogrp):
if state['code_level'] < (6, 4, 0, 0):
LOG.debug('Ignore change IO group as storage code level is '
- '%(code_level)s, below the required 6.4.0.0' %
+ '%(code_level)s, below the required 6.4.0.0',
{'code_level': state['code_level']})
else:
self.ssh.movevdisk(vdisk, str(iogrp[0]))
from oslo_log import log as logging
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
self.driver._helpers.rm_vdisk_copy(volume['name'],
secondary['copy_id'])
else:
- LOG.info(('Could not find replica to delete of'
- ' volume %(vol)s.') % {'vol': vdisk})
+ LOG.info(_LI('Could not find replica to delete of'
+ ' volume %(vol)s.'), {'vol': vdisk})
def test_replica(self, tgt_volume, src_volume):
vdisk = src_volume['name']
if not multihostmap:
LOG.error(_LE('storwize_svc_multihostmap_enabled is set '
'to False, not allowing multi host mapping.'))
- msg = 'CMMVC6071E The VDisk-to-host mapping '\
- 'was not created because the VDisk is '\
- 'already mapped to a host.\n"'
- raise exception.VolumeDriverException(message=msg)
+ raise exception.VolumeDriverException(
+ message=_('CMMVC6071E The VDisk-to-host mapping was not '
+ 'created because the VDisk is already mapped '
+ 'to a host.\n"'))
ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
return self.run_ssh_check_created(ssh_cmd)
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
+import six
from cinder.brick import exception as brick_exception
from cinder.brick.local_dev import lvm as lvm
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
- msg = (_LE('Volume device file path %s does not exist.')
+ msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('volume_size') or volume.get('size')
if size_in_g is None:
- msg = (_LE("Size for volume: %s not found, "
- "cannot secure delete.") % volume['id'])
+ msg = (_("Size for volume: %s not found, cannot secure delete.")
+ % volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
- LOG.debug(("Updating volume stats"))
+ LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_LW('Unable to update stats on non-initialized '
'Volume Group: %s'),
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
- % exc.stderr)
+ % six.text_type(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
return True
if self.vg.lv_has_snapshot(volume['name']):
- LOG.error(_LE('Unabled to delete due to existing snapshot '
- 'for volume: %s') % volume['name'])
+ LOG.error(_LE('Unable to delete due to existing snapshot '
+ 'for volume: %s'), volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
- "skipping delete operations") % snapshot['name'])
+ "skipping delete operations"), snapshot['name'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
return True
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
- LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
+ LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
try:
(vg for vg in vg_list if vg['name'] == dest_vg).next()
except StopIteration:
- message = (_LE("Destination Volume Group %s does not exist") %
- dest_vg)
- LOG.error(message)
+ LOG.error(_LE("Destination Volume Group %s does not exist"),
+ dest_vg)
return false_ret
helper = utils.get_root_helper()
else:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
- "destination are the same Volume Group: %(name)s."),
+ "destination are the same Volume Group: %(name)s.") %
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.exception(message)
raise exception.VolumeBackendAPIException(data=message)
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
app_version = na_utils.OpenStackInfo().info()
- LOG.info(_LI('OpenStack OS Version Info: %(info)s') % {
- 'info': app_version})
+ LOG.info(_LI('OpenStack OS Version Info: %(info)s'),
+ {'info': app_version})
kwargs['app_version'] = app_version
return NetAppDriver.create_driver(config.netapp_storage_family,
fmt = {'storage_family': storage_family,
'storage_protocol': storage_protocol}
LOG.info(_LI('Requested unified config: %(storage_family)s and '
- '%(storage_protocol)s.') % fmt)
+ '%(storage_protocol)s.'), fmt)
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
if family_meta is None:
kwargs['netapp_mode'] = 'proxy'
driver = importutils.import_object(driver_loc, *args, **kwargs)
LOG.info(_LI('NetApp driver of family %(storage_family)s and protocol '
- '%(storage_protocol)s loaded.') % fmt)
+ '%(storage_protocol)s loaded.'), fmt)
return driver
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import units
-import six
from cinder import exception
from cinder.i18n import _, _LW
if self._get_vol_option(volume_name, 'root') == 'true':
return volume_name
LOG.warning(_LW('Could not determine root volume name '
- 'on %s.') % self._get_owner())
+ 'on %s.'), self._get_owner())
return None
def _get_owner(self):
self.vol_refresh_time = timeutils.utcnow()
except Exception as e:
LOG.warning(_LW("Error refreshing volume info. Message: %s"),
- six.text_type(e))
+ e)
finally:
na_utils.set_safe_attr(self, 'vol_refresh_running', False)
if prop in self.metadata:
return self.metadata[prop]
name = self.name
- msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
- msg_fmt = {'prop': prop, 'name': name}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("No metadata property %(prop)s defined for the LUN %(name)s",
+ {'prop': prop, 'name': name})
def __str__(self, *args, **kwargs):
return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
def create_volume(self, volume):
"""Driver entry point for creating a new volume (Data ONTAP LUN)."""
- LOG.debug('create_volume on %s' % volume['host'])
+ LOG.debug('create_volume on %s', volume['host'])
# get Data ONTAP volume name as pool name
ontap_volume_name = volume_utils.extract_host(volume['host'],
self._create_lun(ontap_volume_name, lun_name, size,
metadata, qos_policy_group)
- LOG.debug('Created LUN with name %s' % lun_name)
+ LOG.debug('Created LUN with name %s', lun_name)
metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
metadata['Volume'] = ontap_volume_name
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
if not metadata:
- msg = _LW("No entry in LUN table for volume/snapshot %(name)s.")
- msg_fmt = {'name': name}
- LOG.warning(msg % msg_fmt)
+ LOG.warning(_LW("No entry in LUN table for volume/snapshot"
+ " %(name)s."), {'name': name})
return
self.zapi_client.destroy_lun(metadata['Path'])
self.lun_table.pop(name)
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
- LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
+ LOG.debug("Snapshot %s deletion successful", snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for creating a new volume from a snapshot.
except exception.VolumeNotFound as e:
LOG.error(_LE("Message: %s"), e.msg)
except Exception as e:
- LOG.error(_LE("Error getting LUN attribute. Exception: %s"),
- e.__str__())
+ LOG.error(_LE("Error getting LUN attribute. Exception: %s"), e)
return None
def _create_lun_meta(self, lun):
else:
LOG.error(_LE("Unknown exception in"
" post clone resize LUN %s."), seg[-1])
- LOG.error(_LE("Exception details: %s") % (e.__str__()))
+ LOG.error(_LE("Exception details: %s"), e)
def _get_lun_block_count(self, path):
"""Gets block counts for the LUN."""
name = volume['name']
lun_id = self._map_lun(name, [initiator_name], 'iscsi', None)
- msg = "Mapped LUN %(name)s to the initiator %(initiator_name)s"
- msg_fmt = {'name': name, 'initiator_name': initiator_name}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Mapped LUN %(name)s to the initiator %(initiator_name)s",
+ {'name': name, 'initiator_name': initiator_name})
target_list = self.zapi_client.get_iscsi_target_details()
if not target_list:
- msg = _('Failed to get LUN target list for the LUN %s')
- raise exception.VolumeBackendAPIException(data=msg % name)
+ raise exception.VolumeBackendAPIException(
+ data=_('Failed to get LUN target list for the LUN %s') % name)
- msg = ("Successfully fetched target list for LUN %(name)s and "
- "initiator %(initiator_name)s")
- msg_fmt = {'name': name, 'initiator_name': initiator_name}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Successfully fetched target list for LUN %(name)s and "
+ "initiator %(initiator_name)s",
+ {'name': name, 'initiator_name': initiator_name})
preferred_target = self._get_preferred_target_from_list(
target_list)
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, [initiator_name])
- msg = _("Unmapped LUN %(name)s from the initiator %(initiator_name)s")
- msg_fmt = {'name': name, 'initiator_name': initiator_name}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Unmapped LUN %(name)s from the initiator "
+ "%(initiator_name)s",
+ {'name': name, 'initiator_name': initiator_name})
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
lun_id = self._map_lun(volume_name, initiators, 'fcp', None)
- msg = _("Mapped LUN %(name)s to the initiator(s) %(initiators)s")
- msg_fmt = {'name': volume_name, 'initiators': initiators}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s",
+ {'name': volume_name, 'initiators': initiators})
target_wwpns, initiator_target_map, num_paths = \
self._build_initiator_target_map(connector)
if target_wwpns:
- msg = _("Successfully fetched target details for LUN %(name)s "
- "and initiator(s) %(initiators)s")
- msg_fmt = {'name': volume_name, 'initiators': initiators}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Successfully fetched target details for LUN %(name)s "
+ "and initiator(s) %(initiators)s",
+ {'name': volume_name, 'initiators': initiators})
else:
- msg = _('Failed to get LUN target details for the LUN %s')
- raise exception.VolumeBackendAPIException(data=msg % volume_name)
+ raise exception.VolumeBackendAPIException(
+ data=_('Failed to get LUN target details for '
+ 'the LUN %s') % volume_name)
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
self._unmap_lun(path, initiators)
- msg = _("Unmapped LUN %(name)s from the initiator %(initiators)s")
- msg_fmt = {'name': name, 'initiators': initiators}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Unmapped LUN %(name)s from the initiator %(initiators)s",
+ {'name': name, 'initiators': initiators})
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
volume = metadata['Volume']
self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
src_block=0, dest_block=0, block_count=0)
- LOG.debug("Cloned LUN with new name %s" % new_name)
+ LOG.debug("Cloned LUN with new name %s", new_name)
lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
path='/vol/%s/%s'
% (volume, new_name))
self._password = password
self._refresh_conn = True
- LOG.debug('Using NetApp controller: %s' % self._host)
+ LOG.debug('Using NetApp controller: %s', self._host)
def get_transport_type(self):
"""Get the transport type protocol."""
lun_list.extend(luns)
except netapp_api.NaApiError:
LOG.warning(_LW("Error finding LUNs for volume %s."
- " Verify volume exists.") % vol)
+ " Verify volume exists."), vol)
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug("Clone operation with src %(name)s"
- " and dest %(new_name)s completed" % fmt)
+ " and dest %(new_name)s completed", fmt)
else:
LOG.debug("Clone operation with src %(name)s"
- " and dest %(new_name)s failed" % fmt)
+ " and dest %(new_name)s failed", fmt)
raise netapp_api.NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
% (export_path))
def clone_file(self, src_path, dest_path):
- msg_fmt = {'src_path': src_path, 'dest_path': dest_path}
- LOG.debug("""Cloning with src %(src_path)s, dest %(dest_path)s"""
- % msg_fmt)
+ LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s",
+ {'src_path': src_path, 'dest_path': dest_path})
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start',
**{'source-path': src_path,
'file-usage-get', **{'path': path})
res = self.connection.invoke_successfully(file_use)
bytes = res.get_child_content('unique-bytes')
- LOG.debug('file-usage for path %(path)s is %(bytes)s'
- % {'path': path, 'bytes': bytes})
+ LOG.debug('file-usage for path %(path)s is %(bytes)s',
+ {'path': path, 'bytes': bytes})
return bytes
def get_ifconfig(self):
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
- msg = _LE("Error provisioning volume %(lun_name)s on "
- "%(volume_name)s. Details: %(ex)s")
- msg_args = {'lun_name': lun_name,
- 'volume_name': volume_name,
- 'ex': six.text_type(ex)}
- LOG.error(msg % msg_args)
+ LOG.error(_LE("Error provisioning volume %(lun_name)s on "
+ "%(volume_name)s. Details: %(ex)s"),
+ {'lun_name': lun_name,
+ 'volume_name': volume_name,
+ 'ex': ex})
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
- LOG.debug("Destroyed LUN %s" % seg[-1])
+ LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
except netapp_api.NaApiError as e:
code = e.code
message = e.message
- msg = _LW('Error mapping LUN. Code :%(code)s, Message:%(message)s')
- msg_fmt = {'code': code, 'message': message}
- LOG.warning(msg % msg_fmt)
+ LOG.warning(_LW('Error mapping LUN. Code :%(code)s, Message: '
+ '%(message)s'), {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
- msg = _LW("Error unmapping LUN. Code :%(code)s,"
- " Message:%(message)s")
- msg_fmt = {'code': e.code, 'message': e.message}
exc_info = sys.exc_info()
- LOG.warning(msg % msg_fmt)
+ LOG.warning(_LW("Error unmapping LUN. Code :%(code)s, Message: "
+ "%(message)s"), {'code': e.code,
+ 'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
- LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s")
- % {'path': path, 'msg': e.message})
+ LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s"),
+ {'path': path, 'msg': e.message})
return geometry
def get_volume_options(self, volume_name):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
- LOG.debug("Moving LUN %(name)s to %(new_name)s."
- % {'name': seg[-1], 'new_name': new_seg[-1]})
+ LOG.debug("Moving LUN %(name)s to %(new_name)s.",
+ {'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
na_server.invoke_successfully(ems, True)
LOG.debug("ems executed successfully.")
except netapp_api.NaApiError as e:
- LOG.warning(_LW("Failed to invoke ems. Message : %s") % e)
+ LOG.warning(_LW("Failed to invoke ems. Message : %s"), e)
finally:
requester.last_ems = timeutils.utcnow()
attr_list = result.get_child_by_name('attributes-list')
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
return iscsi_service.get_child_content('node-name')
- LOG.debug('No iSCSI service found for vserver %s' % (self.vserver))
+ LOG.debug('No iSCSI service found for vserver %s', self.vserver)
return None
def get_lun_list(self):
def clone_file(self, flex_vol, src_path, dest_path, vserver,
dest_exists=False):
"""Clones file on vserver."""
- msg = ("Cloning with params volume %(volume)s, src %(src_path)s,"
- "dest %(dest_path)s, vserver %(vserver)s")
- msg_fmt = {'volume': flex_vol, 'src_path': src_path,
- 'dest_path': dest_path, 'vserver': vserver}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, "
+ "dest %(dest_path)s, vserver %(vserver)s",
+ {'volume': flex_vol, 'src_path': src_path,
+ 'dest_path': dest_path, 'vserver': vserver})
clone_create = netapp_api.NaElement.create_node_with_children(
'clone-create',
**{'volume': flex_vol, 'source-path': src_path,
'file-usage-get', **{'path': path})
res = self._invoke_vserver_api(file_use, vserver)
unique_bytes = res.get_child_content('unique-bytes')
- LOG.debug('file-usage for path %(path)s is %(bytes)s'
- % {'path': path, 'bytes': unique_bytes})
+ LOG.debug('file-usage for path %(path)s is %(bytes)s',
+ {'path': path, 'bytes': unique_bytes})
return unique_bytes
def get_vserver_ips(self, vserver):
from oslo_log import log as logging
from oslo_utils import units
-import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
:param volume: volume reference
"""
- LOG.debug('create_volume on %s' % volume['host'])
+ LOG.debug('create_volume on %s', volume['host'])
self._ensure_shares_mounted()
# get share as pool name
raise exception.InvalidHost(reason=msg)
volume['provider_location'] = share
- LOG.info(_LI('Creating volume at location %s')
- % volume['provider_location'])
+ LOG.info(_LI('Creating volume at location %s'),
+ volume['provider_location'])
try:
self._do_create_volume(volume)
except Exception as ex:
LOG.error(_LE("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s")
- % {'name': volume['name'],
- 'share': volume['provider_location'],
- 'ex': six.text_type(ex)})
+ "share %(share)s. Details: %(ex)s"),
+ {'name': volume['name'],
+ 'share': volume['provider_location'],
+ 'ex': ex})
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(
data=msg % (volume['name']))
volume['provider_location'], file_name)
except Exception as e:
LOG.warning(_LW('Exception while registering image %(image_id)s'
- ' in cache. Exception: %(exc)s')
- % {'image_id': image_id, 'exc': e.__str__()})
+ ' in cache. Exception: %(exc)s'),
+ {'image_id': image_id, 'exc': e})
def _find_image_in_cache(self, image_id):
"""Finds image in cache and returns list of shares with file name."""
file_path = '%s/%s' % (dir, file_name)
if os.path.exists(file_path):
LOG.debug('Found cache file for image %(image_id)s'
- ' on share %(share)s'
- % {'image_id': image_id, 'share': share})
+ ' on share %(share)s',
+ {'image_id': image_id, 'share': share})
result.append((share, file_name))
return result
continue
except Exception as e:
LOG.warning(_LW('Exception during cache cleaning'
- ' %(share)s. Message - %(ex)s')
- % {'share': share, 'ex': e.__str__()})
+ ' %(share)s. Message - %(ex)s'),
+ {'share': share, 'ex': e})
continue
finally:
LOG.debug('Image cache cleaning done.')
self._execute(*cmd, run_as_root=self._execute_as_root)
return True
except Exception as ex:
- LOG.warning(_LW('Exception during deleting %s'), ex.__str__())
+ LOG.warning(_LW('Exception during deleting %s'), ex)
return False
def clone_image(self, context, volume,
if cloned:
post_clone = self._post_clone_image(volume)
except Exception as e:
- msg = e.msg if getattr(e, 'msg', None) else e.__str__()
+ msg = e.msg if getattr(e, 'msg', None) else e
LOG.info(_LI('Image cloning unsuccessful for image'
- ' %(image_id)s. Message: %(msg)s')
- % {'image_id': image_id, 'msg': msg})
+ ' %(image_id)s. Message: %(msg)s'),
+ {'image_id': image_id, 'msg': msg})
vol_path = self.local_path(volume)
volume['provider_location'] = None
if os.path.exists(vol_path):
try:
return _move_file(source_path, dest_path)
except Exception as e:
- LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s')
- % {'src': source_path, 'e': e})
+ LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s'),
+ {'src': source_path, 'e': e})
return False
def _get_export_ip_path(self, volume_id=None, share=None):
:param volume: volume reference
"""
- LOG.debug('create_volume on %s' % volume['host'])
+ LOG.debug('create_volume on %s', volume['host'])
self._ensure_shares_mounted()
# get share as pool name
try:
volume['provider_location'] = share
- LOG.info(_LI('casted to %s') % volume['provider_location'])
+ LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
if qos_policy_group:
self._set_qos_policy_group_on_volume(volume, share,
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
- LOG.error(_LW("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s")
- % {'name': volume['name'],
- 'share': volume['provider_location'],
- 'ex': ex})
+ LOG.error(_LE("Exception creating vol %(name)s on "
+ "share %(share)s. Details: %(ex)s"),
+ {'name': volume['name'],
+ 'share': volume['provider_location'],
+ 'ex': ex})
volume['provider_location'] = None
finally:
if self.ssc_enabled:
def _is_share_vol_type_match(self, volume, share):
"""Checks if share matches volume type."""
netapp_vol = self._get_vol_for_share(share)
- LOG.debug("Found volume %(vol)s for share %(share)s."
- % {'vol': netapp_vol, 'share': share})
+ LOG.debug("Found volume %(vol)s for share %(share)s.",
+ {'vol': netapp_vol, 'share': share})
extra_specs = na_utils.get_volume_extra_specs(volume)
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols
self._try_copyoffload(context, volume, image_service, image_id)
copy_success = True
LOG.info(_LI('Copied image %(img)s to volume %(vol)s using '
- 'copy offload workflow.')
- % {'img': image_id, 'vol': volume['id']})
+ 'copy offload workflow.'),
+ {'img': image_id, 'vol': volume['id']})
else:
LOG.debug("Copy offload either not configured or"
" unsupported.")
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
- LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
- % {'img': image_id, 'tmp': tmp_img_file})
+ LOG.debug('Copied image %(img)s to tmp file %(tmp)s.',
+ {'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % image_id)
if img_info['disk_format'] == 'raw':
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
- LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
- % {'img': image_id, 'vol': volume['id']})
+ LOG.debug('Copied raw image %(img)s to volume %(vol)s.',
+ {'img': image_id, 'vol': volume['id']})
else:
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = six.text_type(uuid.uuid4())
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug('Copied locally converted raw image'
- ' %(img)s to volume %(vol)s.'
- % {'img': image_id, 'vol': volume['id']})
+ ' %(img)s to volume %(vol)s.',
+ {'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
self._delete_file(dst_img_conv_local)
vols.add(vol)
except KeyError as e:
LOG.debug('Unexpected error while creating'
- ' ssc vol list. Message - %s' % six.text_type(e))
+ ' ssc vol list. Message - %s', e)
continue
return vols
def refresh_stale_ssc():
stale_vols = backend._update_stale_vols(reset=True)
LOG.info(_LI('Running stale ssc refresh job for %(server)s'
- ' and vserver %(vs)s')
- % {'server': na_server, 'vs': vserver})
+ ' and vserver %(vs)s'),
+ {'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
LOG.info(_LI('Successfully completed stale refresh job for'
- ' %(server)s and vserver %(vs)s')
- % {'server': na_server, 'vs': vserver})
+ ' %(server)s and vserver %(vs)s'),
+ {'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
@utils.synchronized(lock_pr)
def get_latest_ssc():
LOG.info(_LI('Running cluster latest ssc job for %(server)s'
- ' and vserver %(vs)s')
- % {'server': na_server, 'vs': vserver})
+ ' and vserver %(vs)s'),
+ {'server': na_server, 'vs': vserver})
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
LOG.info(_LI('Successfully completed ssc job for %(server)s'
- ' and vserver %(vs)s')
- % {'server': na_server, 'vs': vserver})
+ ' and vserver %(vs)s'),
+ {'server': na_server, 'vs': vserver})
get_latest_ssc()
finally:
if 'storedPassword' in scrubbed_data:
scrubbed_data['storedPassword'] = "****"
- params = {'m': method, 'p': path, 'd': scrubbed_data,
- 'sys': use_system, 't': timeout, 'v': verify, 'k': kwargs}
LOG.debug("Invoking rest with method: %(m)s, path: %(p)s,"
" data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
- " verify: %(v)s, kwargs: %(k)s." % (params))
+ " verify: %(v)s, kwargs: %(k)s.",
+ {'m': method, 'p': path, 'd': scrubbed_data,
+ 'sys': use_system, 't': timeout, 'v': verify, 'k': kwargs})
url = self._get_resource_url(path, use_system, **kwargs)
if self._content_type == 'json':
headers = {'Accept': 'application/json',
def map_volume_to_single_host(client, volume, eseries_vol, host,
vol_map):
"""Maps the e-series volume to host with initiator."""
- msg = "Attempting to map volume %s to single host."
- LOG.debug(msg % volume['id'])
+ LOG.debug("Attempting to map volume %s to single host." % volume['id'])
# If volume is not mapped on the backend, map directly to host
if not vol_map:
# If volume is not currently attached according to Cinder, it is
# safe to delete the mapping
if not (volume['attach_status'] == 'attached'):
- msg = (_("Volume %(vol)s is not currently attached, "
- "moving existing mapping to host %(host)s.")
- % {'vol': volume['id'], 'host': host['label']})
- LOG.debug(msg)
+ LOG.debug("Volume %(vol)s is not currently attached, moving "
+ "existing mapping to host %(host)s.",
+ {'vol': volume['id'], 'host': host['label']})
mappings = _get_vol_mapping_for_host_frm_array(
client, host['hostRef'])
lun = _get_free_lun(client, host, mappings)
mapping):
"""Maps the e-series volume to multiattach host group."""
- msg = "Attempting to map volume %s to multiple hosts."
- LOG.debug(msg % volume['id'])
+ LOG.debug("Attempting to map volume %s to multiple hosts." % volume['id'])
# If volume is already mapped to desired host, return the mapping
if mapping['mapRef'] == target_host['hostRef']:
# Once both existing and target hosts are in the multiattach host group,
# move the volume mapping to said group.
if not mapped_host_group:
- msg = "Moving mapping for volume %s to multiattach host group."
- LOG.debug(msg % volume['id'])
+ LOG.debug("Moving mapping for volume %s to multiattach host group.",
+ volume['id'])
return client.move_volume_mapping_via_symbol(
mapping.get('lunMappingRef'),
multiattach_host_group['clusterRef'],
def unmap_volume_from_host(client, volume, host, mapping):
# Volume is mapped directly to host, so delete the mapping
if mapping.get('mapRef') == host['hostRef']:
- msg = ("Volume %(vol)s is mapped directly to host %(host)s; removing "
- "mapping.")
- LOG.debug(msg % {'vol': volume['id'], 'host': host['label']})
+ LOG.debug("Volume %(vol)s is mapped directly to host %(host)s; "
+ "removing mapping.", {'vol': volume['id'],
+ 'host': host['label']})
client.delete_volume_mapping(mapping['lunMappingRef'])
return
# Remove mapping if volume should no longer be attached after this
# operation.
if volume['status'] == 'detaching':
- msg = ("Volume %s is mapped directly to multiattach host group "
- "but is not currently attached; removing mapping.")
- LOG.debug(msg % volume['id'])
+ LOG.debug("Volume %s is mapped directly to multiattach host group but "
+ "is not currently attached; removing mapping.", volume['id'])
client.delete_volume_mapping(mapping['lunMappingRef'])
def _check_multipath(self):
if not self.configuration.use_multipath_for_image_xfer:
- msg = _LW('Production use of "%(backend)s" backend requires the '
- 'Cinder controller to have multipathing properly set up '
- 'and the configuration option "%(mpflag)s" to be set to '
- '"True".') % {'backend': self._backend_name,
- 'mpflag': 'use_multipath_for_image_xfer'}
- LOG.warning(msg)
+ LOG.warning(_LW('Production use of "%(backend)s" backend requires '
+ 'the Cinder controller to have multipathing '
+ 'properly set up and the configuration option '
+ '"%(mpflag)s" to be set to "True".'),
+ {'backend': self._backend_name,
+ 'mpflag': 'use_multipath_for_image_xfer'})
def _ensure_multi_attach_host_group_exists(self):
try:
host_group = self._client.get_host_group_by_name(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
- msg = _LI("The multi-attach E-Series host group '%(label)s' "
- "already exists with clusterRef %(clusterRef)s")
- LOG.info(msg % host_group)
+ LOG.info(_LI("The multi-attach E-Series host group '%(label)s' "
+ "already exists with clusterRef %(clusterRef)s"),
+ host_group)
except exception.NotFound:
host_group = self._client.create_host_group(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
- msg = _LI("Created multi-attach E-Series host group '%(label)s' "
- "with clusterRef %(clusterRef)s")
- LOG.info(msg % host_group)
+ LOG.info(_LI("Created multi-attach E-Series host group %(label)s "
+ "with clusterRef %(clusterRef)s"), host_group)
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
ip = na_utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
- LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
- % {'host': host, 'e': e})
+ LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'),
+ {'host': host, 'e': e})
raise exception.NoValidHost(
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
- % {'host': host, 'e': e})
+ % {'host': host, 'e': six.text_type(e)})
ips = self.configuration.netapp_controller_ips
ips = [i.strip() for i in ips.split(",")]
system = self._client.list_storage_system()
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
- msg = _LI("System with controller addresses [%s] is not"
- " registered with web service.")
- LOG.info(msg % self.configuration.netapp_controller_ips)
+ LOG.info(_LI("System with controller addresses [%s] is not "
+ "registered with web service."),
+ self.configuration.netapp_controller_ips)
password_not_in_sync = False
if system.get('status', '').lower() == 'passwordoutofsync':
password_not_in_sync = True
msg_dict = {'id': system.get('id'), 'status': status}
if (status == 'passwordoutofsync' or status == 'notsupported' or
status == 'offline'):
- msg = _("System %(id)s found with bad status - %(status)s.")
- raise exception.NetAppDriverException(msg % msg_dict)
- LOG.info(_LI("System %(id)s has %(status)s status.") % msg_dict)
+ raise exception.NetAppDriverException(
+ _("System %(id)s found with bad status - "
+ "%(status)s.") % msg_dict)
+ LOG.info(_LI("System %(id)s has %(status)s status."), msg_dict)
return True
def _populate_system_objects(self):
def create_volume(self, volume):
"""Creates a volume."""
- LOG.debug('create_volume on %s' % volume['host'])
+ LOG.debug('create_volume on %s', volume['host'])
# get E-series pool label as pool name
eseries_pool_label = volume_utils.extract_host(volume['host'],
"label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error creating volume. Msg - %s."),
- six.text_type(e))
+ LOG.error(_LE("Error creating volume. Msg - %s."), e)
return vol
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
- LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
- % {'src': src_vol['label'], 'dst': dst_vol['label']})
+ LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s."),
+ {'src': src_vol['label'], 'dst': dst_vol['label']})
try:
job = None
job = self._client.create_volume_copy_job(src_vol['id'],
'pending' or j_st['status'] == 'unknown'):
time.sleep(self.SLEEP_SECS)
continue
- if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
+ if j_st['status'] == 'failed' or j_st['status'] == 'halted':
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
- msg = _("Vol copy job for dest %s failed.")\
- % dst_vol['label']
- raise exception.NetAppDriverException(msg)
- LOG.info(_LI("Vol copy job completed for dest %s.")
- % dst_vol['label'])
+ raise exception.NetAppDriverException(
+ _("Vol copy job for dest %s failed.") %
+ dst_vol['label'])
+ LOG.info(_LI("Vol copy job completed for dest %s."),
+ dst_vol['label'])
break
finally:
if job:
try:
snap_grp = self._get_cached_snapshot_grp(snapshot['id'])
except KeyError:
- LOG.warning(_LW("Snapshot %s already deleted.") % snapshot['id'])
+ LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id'])
return
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
snapshot_name = snap_grp['label']
current_map)
lun_id = mapping['lun']
- msg = _("Mapped volume %(id)s to the initiator %(initiator_name)s.")
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Mapped volume %(id)s to the initiator %(initiator_name)s.",
+ msg_fmt)
iscsi_details = self._get_iscsi_service_details()
iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
iscsi_details)
- msg = _("Successfully fetched target details for volume %(id)s and "
- "initiator %(initiator_name)s.")
- LOG.debug(msg % msg_fmt)
+ LOG.debug("Successfully fetched target details for volume %(id)s and "
+ "initiator %(initiator_name)s.", msg_fmt)
iqn = iscsi_portal['iqn']
address = iscsi_portal['ip']
port = iscsi_portal['tcp_port']
host = self._client.update_host_type(
host['hostRef'], ht_def)
except exception.NetAppDriverException as e:
- msg = _LW("Unable to update host type for host with "
- "label %(l)s. %(e)s")
- LOG.warning(msg % {'l': host['label'], 'e': e.msg})
+ LOG.warning(_LW("Unable to update host type for host with "
+ "label %(l)s. %(e)s"),
+ {'l': host['label'], 'e': e.msg})
return host
except exception.NotFound as e:
LOG.warning(_LW("Message - %s."), e.msg)
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
"""
LOG.info(_LI("Updating storage service catalog information for "
- "backend '%s'") % self._backend_name)
+ "backend '%s'"), self._backend_name)
self._ssc_stats = \
self._update_ssc_disk_encryption(self._objects["disk_pool_refs"])
self._ssc_stats = \
(int(x.get('totalRaidedSpace', 0)) -
int(x.get('usedSpace', 0) >= size))]
if not avl_pools:
- msg = _LW("No storage pool found with available capacity %s.")
- LOG.warning(msg % size_gb)
+ LOG.warning(_LW("No storage pool found with available capacity "
+ "%s."), size_gb)
return avl_pools
def extend_volume(self, volume, new_size):
def log_extra_spec_warnings(extra_specs):
for spec in (set(extra_specs.keys() if extra_specs else []) &
set(OBSOLETE_SSC_SPECS.keys())):
- msg = _LW('Extra spec %(old)s is obsolete. Use %(new)s instead.')
- args = {'old': spec, 'new': OBSOLETE_SSC_SPECS[spec]}
- LOG.warning(msg % args)
+ LOG.warning(_LW('Extra spec %(old)s is obsolete. Use %(new)s '
+ 'instead.'), {'old': spec,
+ 'new': OBSOLETE_SSC_SPECS[spec]})
for spec in (set(extra_specs.keys() if extra_specs else []) &
set(DEPRECATED_SSC_SPECS.keys())):
- msg = _LW('Extra spec %(old)s is deprecated. Use %(new)s '
- 'instead.')
- args = {'old': spec, 'new': DEPRECATED_SSC_SPECS[spec]}
- LOG.warning(msg % args)
+ LOG.warning(_LW('Extra spec %(old)s is deprecated. Use %(new)s '
+ 'instead.'), {'old': spec,
+ 'new': DEPRECATED_SSC_SPECS[spec]})
def get_iscsi_connection_properties(lun_id, volume, iqn,
"'%{version}\t%{release}\t%{vendor}'",
self.PACKAGE_NAME)
if not out:
- LOG.info(_LI('No rpm info found for %(pkg)s package.') % {
+ LOG.info(_LI('No rpm info found for %(pkg)s package.'), {
'pkg': self.PACKAGE_NAME})
return False
parts = out.split()
self._vendor = ' '.join(parts[2::])
return True
except Exception as e:
- LOG.info(_LI('Could not run rpm command: %(msg)s.') % {'msg': e})
+ LOG.info(_LI('Could not run rpm command: %(msg)s.'), {'msg': e})
return False
# ubuntu, mirantis on ubuntu
out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
self.PACKAGE_NAME)
if not out:
- LOG.info(_LI('No dpkg-query info found for %(pkg)s package.')
- % {'pkg': self.PACKAGE_NAME})
+ LOG.info(_LI('No dpkg-query info found for %(pkg)s package.'),
+ {'pkg': self.PACKAGE_NAME})
return False
# debian format: [epoch:]upstream_version[-debian_revision]
deb_version = out
self._vendor = _vendor
return True
except Exception as e:
- LOG.info(_LI('Could not run dpkg-query command: %(msg)s.') % {
+ LOG.info(_LI('Could not run dpkg-query command: %(msg)s.'), {
'msg': e})
return False
if not config:
msg = (_("There's no NFS config file configured (%s)") %
'nfs_shares_config')
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.NfsException(msg)
if not os.path.exists(config):
msg = (_("NFS config file at %(config)s doesn't exist") %
{'config': config})
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.NfsException(msg)
if not self.configuration.nfs_oversub_ratio > 0:
msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: "
except Exception as e:
if attempt == (num_attempts - 1):
LOG.error(_LE('Mount failure for %(share)s after '
- '%(count)d attempts.') % {
+ '%(count)d attempts.'), {
'share': nfs_share,
'count': num_attempts})
- raise exception.NfsException(e)
- LOG.debug('Mount attempt %d failed: %s.\nRetrying mount ...' %
- (attempt, six.text_type(e)))
+ raise exception.NfsException(six.text_type(e))
+ LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n'
+ 'Retrying mount ...',
+ {'attempt': attempt, 'exc': e})
time.sleep(1)
def _find_share(self, volume_size_in_gib):
self.configuration.nas_secure_file_permissions,
nfs_mount, is_new_cinder_install)
- LOG.debug('NAS variable secure_file_permissions setting is: %s' %
+ LOG.debug('NAS variable secure_file_permissions setting is: %s',
self.configuration.nas_secure_file_permissions)
if self.configuration.nas_secure_file_permissions == 'false':
- LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
- "other/world read & write access). "
- "This is considered an insecure NAS environment. "
- "Please see %s for information on a secure "
- "NFS configuration.") %
- doc_html)
+ LOG.warning(_LW("The NAS file permissions mode will be 666 "
+ "(allowing other/world read & write access). "
+ "This is considered an insecure NAS environment. "
+ "Please see %s for information on a secure "
+ "NFS configuration."),
+ doc_html)
self.configuration.nas_secure_file_operations = \
self._determine_nas_security_option_setting(
if self.configuration.nas_secure_file_operations == 'true':
self._execute_as_root = False
- LOG.debug('NAS variable secure_file_operations setting is: %s' %
+ LOG.debug('NAS variable secure_file_operations setting is: %s',
self.configuration.nas_secure_file_operations)
if self.configuration.nas_secure_file_operations == 'false':
- LOG.warn(_LW("The NAS file operations will be run as "
- "root: allowing root level access at the storage "
- "backend. This is considered an insecure NAS "
- "environment. Please see %s "
- "for information on a secure NAS configuration.") %
- doc_html)
+ LOG.warning(_LW("The NAS file operations will be run as "
+ "root: allowing root level access at the storage "
+ "backend. This is considered an insecure NAS "
+ "environment. Please see %s "
+ "for information on a secure NAS configuration."),
+ doc_html)
def _get_discovery_ip(self, netconfig):
"""Get discovery ip."""
subnet_label = self.configuration.nimble_subnet_label
- LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s'
- % {'netlabel': subnet_label, 'netconf': netconfig})
+ LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
+ {'netlabel': subnet_label, 'netconf': netconfig})
ret_discovery_ip = ''
for subnet in netconfig['subnet-list']:
- LOG.info(_LI('Exploring array subnet label %s') % subnet['label'])
+ LOG.info(_LI('Exploring array subnet label %s'), subnet['label'])
if subnet_label == '*':
# Use the first data subnet, save mgmt+data for later
- if (subnet['subnet-id']['type'] == SM_SUBNET_DATA):
+ if subnet['subnet-id']['type'] == SM_SUBNET_DATA:
LOG.info(_LI('Discovery ip %(disc_ip)s is used '
- 'on data subnet %(net_label)s')
- % {'disc_ip': subnet['discovery-ip'],
- 'net_label': subnet['label']})
+ 'on data subnet %(net_label)s'),
+ {'disc_ip': subnet['discovery-ip'],
+ 'net_label': subnet['label']})
return subnet['discovery-ip']
elif (subnet['subnet-id']['type'] ==
SM_SUBNET_MGMT_PLUS_DATA):
LOG.info(_LI('Discovery ip %(disc_ip)s is found'
- ' on mgmt+data subnet %(net_label)s')
- % {'disc_ip': subnet['discovery-ip'],
- 'net_label': subnet['label']})
+ ' on mgmt+data subnet %(net_label)s'),
+ {'disc_ip': subnet['discovery-ip'],
+ 'net_label': subnet['label']})
ret_discovery_ip = subnet['discovery-ip']
# If subnet is specified and found, use the subnet
elif subnet_label == subnet['label']:
LOG.info(_LI('Discovery ip %(disc_ip)s is used'
- ' on subnet %(net_label)s')
- % {'disc_ip': subnet['discovery-ip'],
- 'net_label': subnet['label']})
+ ' on subnet %(net_label)s'),
+ {'disc_ip': subnet['discovery-ip'],
+ 'net_label': subnet['label']})
return subnet['discovery-ip']
if ret_discovery_ip:
- LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet')
- % ret_discovery_ip)
+ LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet'),
+ ret_discovery_ip)
return ret_discovery_ip
else:
raise NimbleDriverException(_('No suitable discovery ip found'))
target_ipaddr = self._get_discovery_ip(netconfig)
iscsi_portal = target_ipaddr + ':3260'
provider_location = '%s %s %s' % (iscsi_portal, iqn, LUN_ID)
- LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s')
- % {'name': volume_name, 'loc': provider_location})
+ LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s'),
+ {'name': volume_name, 'loc': provider_location})
return provider_location
def _get_model_info(self, volume_name):
float(units.Gi))
free_space = total_capacity - used_space
LOG.debug('total_capacity=%(capacity)f '
- 'used_space=%(used)f free_space=%(free)f'
- % {'capacity': total_capacity,
- 'used': used_space,
- 'free': free_space})
+ 'used_space=%(used)f free_space=%(free)f',
+ {'capacity': total_capacity,
+ 'used': used_space,
+ 'free': free_space})
backend_name = self.configuration.safe_get(
'volume_backend_name') or self.__class__.__name__
self.group_stats = {'volume_backend_name': backend_name,
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
volume_name = volume['name']
- LOG.info(_LI('Entering extend_volume volume=%(vol)s new_size=%(size)s')
- % {'vol': volume_name, 'size': new_size})
+ LOG.info(_LI('Entering extend_volume volume=%(vol)s '
+ 'new_size=%(size)s'),
+ {'vol': volume_name, 'size': new_size})
vol_size = int(new_size) * units.Gi
reserve = not self.configuration.san_thin_provision
reserve_size = vol_size if reserve else 0
"""Creates igroup for an initiator and returns the igroup name."""
igrp_name = 'openstack-' + self._generate_random_string(12)
LOG.info(_LI('Creating initiator group %(grp)s '
- 'with initiator %(iname)s')
- % {'grp': igrp_name, 'iname': initiator_name})
+ 'with initiator %(iname)s'),
+ {'grp': igrp_name, 'iname': initiator_name})
self.APIExecutor.create_initiator_group(igrp_name, initiator_name)
return igrp_name
initiator_group['initiator-list'][0]['name'] ==
initiator_name):
LOG.info(_LI('igroup %(grp)s found for '
- 'initiator %(iname)s')
- % {'grp': initiator_group['name'],
- 'iname': initiator_name})
+ 'initiator %(iname)s'),
+ {'grp': initiator_group['name'],
+ 'iname': initiator_name})
return initiator_group['name']
- LOG.info(_LI('No igroup found for initiator %s') % initiator_name)
+ LOG.info(_LI('No igroup found for initiator %s'), initiator_name)
return ''
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance."""
LOG.info(_LI('Entering initialize_connection volume=%(vol)s'
- ' connector=%(conn)s location=%(loc)s')
- % {'vol': volume,
- 'conn': connector,
- 'loc': volume['provider_location']})
+ ' connector=%(conn)s location=%(loc)s'),
+ {'vol': volume,
+ 'conn': connector,
+ 'loc': volume['provider_location']})
initiator_name = connector['initiator']
initiator_group_name = self._get_igroupname_for_initiator(
initiator_name)
if not initiator_group_name:
initiator_group_name = self._create_igroup_for_initiator(
initiator_name)
- LOG.info(_LI('Initiator group name is %(grp)s for initiator %(iname)s')
- % {'grp': initiator_group_name, 'iname': initiator_name})
+ LOG.info(_LI('Initiator group name is %(grp)s for initiator '
+ '%(iname)s'),
+ {'grp': initiator_group_name, 'iname': initiator_name})
self.APIExecutor.add_acl(volume, initiator_group_name)
(iscsi_portal, iqn, lun_num) = volume['provider_location'].split()
properties = {}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
LOG.info(_LI('Entering terminate_connection volume=%(vol)s'
- ' connector=%(conn)s location=%(loc)s.')
- % {'vol': volume,
- 'conn': connector,
- 'loc': volume['provider_location']})
+ ' connector=%(conn)s location=%(loc)s.'),
+ {'vol': volume,
+ 'conn': connector,
+ 'loc': volume['provider_location']})
initiator_name = connector['initiator']
initiator_group_name = self._get_igroupname_for_initiator(
initiator_name)
self.login()
continue
else:
- LOG.error(_LE('Re-throwing Exception %s') % e)
+ LOG.error(_LE('Re-throwing Exception %s'), e)
raise
return inner_connection_checker
self.username = kwargs['username']
self.password = kwargs['password']
wsdl_url = 'https://%s/wsdl/NsGroupManagement.wsdl' % (kwargs['ip'])
- LOG.debug('Using Nimble wsdl_url: %s' % wsdl_url)
+ LOG.debug('Using Nimble wsdl_url: %s', wsdl_url)
self.err_string_dict = self._create_err_code_to_str_mapper(wsdl_url)
self.client = client.Client(wsdl_url,
username=self.username,
password=self.password)
soap_url = ('https://%(ip)s:%(port)s/soap' % {'ip': kwargs['ip'],
'port': SOAP_PORT})
- LOG.debug('Using Nimble soap_url: %s' % soap_url)
+ LOG.debug('Using Nimble soap_url: %s', soap_url)
self.client.set_options(location=soap_url)
self.login()
def login(self):
"""Execute Https Login API."""
response = self._execute_login()
- LOG.info(_LI('Successful login by user %s') % self.username)
+ LOG.info(_LI('Successful login by user %s'), self.username)
self.sid = response['authInfo']['sid']
@_connection_checker
LOG.info(_LI('Creating a new volume=%(vol)s size=%(size)s'
' reserve=%(reserve)s in pool=%(pool)s'
- ' description=%(description)s')
- % {'vol': volume['name'],
- 'size': volume_size,
- 'reserve': reserve,
- 'pool': pool_name,
- 'description': description})
+ ' description=%(description)s'),
+ {'vol': volume['name'],
+ 'size': volume_size,
+ 'reserve': reserve,
+ 'pool': pool_name,
+ 'description': description})
return self.client.service.createVol(
request={'sid': self.sid,
'attr': {'name': volume['name'],
def create_vol(self, volume, pool_name, reserve):
"""Execute createVol API."""
response = self._execute_create_vol(volume, pool_name, reserve)
- LOG.info(_LI('Successfully create volume %s') % response['name'])
+ LOG.info(_LI('Successfully create volume %s'), response['name'])
return response['name']
@_connection_checker
def add_acl(self, volume, initiator_group_name):
"""Execute addAcl API."""
LOG.info(_LI('Adding ACL to volume=%(vol)s with'
- ' initiator group name %(igrp)s')
- % {'vol': volume['name'],
- 'igrp': initiator_group_name})
+ ' initiator group name %(igrp)s'),
+ {'vol': volume['name'],
+ 'igrp': initiator_group_name})
return self.client.service.addVolAcl(
request={'sid': self.sid,
'volname': volume['name'],
def remove_acl(self, volume, initiator_group_name):
"""Execute removeVolAcl API."""
LOG.info(_LI('Removing ACL from volume=%(vol)s'
- ' for initiator group %(igrp)s')
- % {'vol': volume['name'],
- 'igrp': initiator_group_name})
+ ' for initiator group %(igrp)s'),
+ {'vol': volume['name'],
+ 'igrp': initiator_group_name})
return self.client.service.removeVolAcl(
request={'sid': self.sid,
'volname': volume['name'],
@_response_checker
def _execute_get_vol_info(self, vol_name):
LOG.info(_LI('Getting volume information '
- 'for vol_name=%s') % (vol_name))
+ 'for vol_name=%s'), vol_name)
return self.client.service.getVolInfo(request={'sid': self.sid,
'name': vol_name})
def get_vol_info(self, vol_name):
"""Execute getVolInfo API."""
response = self._execute_get_vol_info(vol_name)
- LOG.info(_LI('Successfully got volume information for volume %s')
- % vol_name)
+ LOG.info(_LI('Successfully got volume information for volume %s'),
+ vol_name)
return response['vol']
@_connection_checker
@_response_checker
def online_vol(self, vol_name, online_flag, *args, **kwargs):
"""Execute onlineVol API."""
- LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s')
- % {'vol': vol_name, 'flag': online_flag})
+ LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s'),
+ {'vol': vol_name, 'flag': online_flag})
return self.client.service.onlineVol(request={'sid': self.sid,
'name': vol_name,
'online': online_flag})
@_response_checker
def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs):
"""Execute onlineSnap API."""
- LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s')
- % {'snap': snap_name, 'flag': online_flag})
+ LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s'),
+ {'snap': snap_name, 'flag': online_flag})
return self.client.service.onlineSnap(request={'sid': self.sid,
'vol': vol_name,
'name': snap_name,
@_response_checker
def dissociate_volcoll(self, vol_name, *args, **kwargs):
"""Execute dissocProtPol API."""
- LOG.info(_LI('Dissociating volume %s ') % vol_name)
+ LOG.info(_LI('Dissociating volume %s '), vol_name)
return self.client.service.dissocProtPol(
request={'sid': self.sid,
'vol-name': vol_name})
@_response_checker
def delete_vol(self, vol_name, *args, **kwargs):
"""Execute deleteVol API."""
- LOG.info(_LI('Deleting volume %s ') % vol_name)
+ LOG.info(_LI('Deleting volume %s '), vol_name)
return self.client.service.deleteVol(request={'sid': self.sid,
'name': vol_name})
# Limit to 254 characters
snap_description = snap_description[:254]
LOG.info(_LI('Creating snapshot for volume_name=%(vol)s'
- ' snap_name=%(name)s snap_description=%(desc)s')
- % {'vol': volume_name,
- 'name': snap_name,
- 'desc': snap_description})
+ ' snap_name=%(name)s snap_description=%(desc)s'),
+ {'vol': volume_name,
+ 'name': snap_name,
+ 'desc': snap_description})
return self.client.service.snapVol(
request={'sid': self.sid,
'vol': volume_name,
@_response_checker
def delete_snap(self, vol_name, snap_name, *args, **kwargs):
"""Execute deleteSnap API."""
- LOG.info(_LI('Deleting snapshot %s ') % snap_name)
+ LOG.info(_LI('Deleting snapshot %s '), snap_name)
return self.client.service.deleteSnap(request={'sid': self.sid,
'vol': vol_name,
'name': snap_name})
reserve_size = snap_size * units.Gi if reserve else 0
LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s '
'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
- 'reserve=%(reserve)s')
- % {'vol': volume_name,
- 'snap': snap_name,
- 'clone': clone_name,
- 'size': snap_size,
- 'reserve': reserve})
+ 'reserve=%(reserve)s'),
+ {'vol': volume_name,
+ 'snap': snap_name,
+ 'clone': clone_name,
+ 'size': snap_size,
+ 'reserve': reserve})
clone_size = snap_size * units.Gi
return self.client.service.cloneVol(
request={'sid': self.sid,
@_response_checker
def edit_vol(self, vol_name, mask, attr):
"""Execute editVol API."""
- LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s')
- % {'vol': vol_name, 'mask': str(mask)})
+ LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s'),
+ {'vol': vol_name, 'mask': str(mask)})
return self.client.service.editVol(request={'sid': self.sid,
'name': vol_name,
'mask': mask,
def create_initiator_group(self, initiator_group_name, initiator_name):
"""Execute createInitiatorGrp API."""
LOG.info(_LI('Creating initiator group %(igrp)s'
- ' with one initiator %(iname)s')
- % {'igrp': initiator_group_name, 'iname': initiator_name})
+ ' with one initiator %(iname)s'),
+ {'igrp': initiator_group_name, 'iname': initiator_name})
return self.client.service.createInitiatorGrp(
request={'sid': self.sid,
'attr': {'name': initiator_group_name,
@_response_checker
def delete_initiator_group(self, initiator_group_name, *args, **kwargs):
"""Execute deleteInitiatorGrp API."""
- LOG.info(_LI('Deleting deleteInitiatorGrp %s ') % initiator_group_name)
+ LOG.info(_LI('Deleting deleteInitiatorGrp %s '), initiator_group_name)
return self.client.service.deleteInitiatorGrp(
request={'sid': self.sid,
'name': initiator_group_name})
Options come from CONF
"""
super(OVSVolumeDriver, self).__init__(*args, **kwargs)
- LOG.debug('INIT %s %s %s ', CONF.vpool_name, str(args),
- str(kwargs))
+ LOG.debug('INIT %(pool_name)s %(arg)s %(kwarg)s ',
+ {'pool_name': CONF.vpool_name, 'arg': args,
+ 'kwarg': kwargs})
self.configuration.append_config_values(OPTS)
self._vpool_name = self.configuration.vpool_name
if vpoollist is not None:
location = '{}/{}.raw'.format(mountpoint, name)
size = volume.size
- LOG.debug('DO_CREATE_VOLUME %s %s', location, size)
+ LOG.debug('DO_CREATE_VOLUME %(location)s %(size)s',
+ {'location': location, 'size': size})
vdisklib.VDiskController.create_volume(location = location,
size = size)
volume['provider_location'] = location
Downloads image from glance server into local .raw
:param volume: volume reference (sqlalchemy Model)
"""
- LOG.debug("CP_IMG_TO_VOL %s %s", image_service, image_id)
+ LOG.debug("CP_IMG_TO_VOL %(image_service)s %(image_id)s",
+ {'image_service': image_service, 'image_id': image_id})
name = volume.display_name
if not name:
Called on "cinder upload-to-image ...volume... ...image-name..."
:param volume: volume reference (sqlalchemy Model)
"""
- LOG.debug("CP_VOL_TO_IMG %s %s", image_service, image_meta)
+ LOG.debug("CP_VOL_TO_IMG %(image_service)s %(image_meta)s",
+ {'image_service': image_service, 'image_meta': image_meta})
super(OVSVolumeDriver, self).copy_volume_to_image(
context, volume, image_service, image_meta)
vdisk = vdiskhybrid.VDisk(disk_meta['diskguid'])
vdisk.cinder_id = volume.id
vdisk.name = name
- LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s',
- volume.id, name)
+ LOG.debug('[CREATE FROM TEMPLATE] Updating meta %(volume_id)s '
+ '%(name)s', {'volume_id': volume.id, 'name': name})
vdisk.save()
else:
LOG.debug('[THIN CLONE] VDisk %s is not a template',
'machineguid': source_ovs_disk.vmachine_guid,
'is_automatic': False}
- LOG.debug('CREATE_SNAP %s %s', name, str(metadata))
+ LOG.debug('CREATE_SNAP %(name)s %(metadata)s',
+ {'name': name, 'metadata': metadata})
snapshotid = vdisklib.VDiskController.create_snapshot(
diskguid = source_ovs_disk.guid,
metadata = metadata,
'machineguid': ovs_disk.vmachine_guid,
'is_automatic': False}
- LOG.debug('CREATE_SNAP %s %s', snapshot.display_name,
- str(metadata))
+ LOG.debug('CREATE_SNAP %(name)s %(metadata)s',
+ {'name': snapshot.display_name, 'metadata': metadata})
vdisklib.VDiskController.create_snapshot(diskguid = ovs_disk.guid,
metadata = metadata,
snapshotid =
pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
six.text_type(volume.host))
- LOG.debug('[CLONE FROM SNAP] %s %s %s %s',
- ovs_snap_disk.guid, snapshot.id, devicename,
- pmachineguid)
+ LOG.debug('[CLONE FROM SNAP] %(disk)s %(snapshot)s %(device)s '
+ '%(machine)s',
+ {'disk': ovs_snap_disk.guid, 'snapshot': snapshot.id,
+ 'device': devicename, 'machine': pmachineguid})
disk_meta = vdisklib.VDiskController.clone(
diskguid = ovs_snap_disk.guid,
snapshotid = snapshot.id,
The volume is a .raw file on a virtual filesystem.
Connection is always allowed based on POSIX permissions.
"""
- LOG.debug('TERM_CONN %s %s ', six.text_type(connector), force)
+ LOG.debug('TERM_CONN %(connector)s %(force)s ',
+ {'connector': six.text_type(connector), 'force': force})
def check_for_setup_error(self):
"""Validate driver setup"""
_location = "{0}/{1}".format(vsr.mountpoint,
vd.devicename)
if _location == location:
- LOG.debug('Location %s Disk found %s',
- (location, vd.guid))
+ LOG.debug('Location %(location)s Disk '
+ 'found %(id)s',
+ {'location': location,
+ 'id': vd.guid})
disk = vdiskhybrid.VDisk(vd.guid)
return disk
- msg = 'NO RESULT Attempt %s timeout %s max attempts %s'
- LOG.debug(msg, attempt, timeout, retry)
+ LOG.debug('NO RESULT Attempt %(attempt)s timeout %(timeout)s max '
+ 'attempts %(retry)s',
+ {'attempt': attempt, 'timeout': timeout, 'retry': retry})
if timeout:
time.sleep(timeout)
attempt += 1
:return guid: GUID
"""
hostname = self._get_real_hostname(hostname)
- LOG.debug('[_FIND OVS PMACHINE] Hostname %s' % (hostname))
+ LOG.debug('[_FIND OVS PMACHINE] Hostname %s', hostname)
mapping = [(pm.guid, six.text_type(sr.name))
for pm in pmachinelist.PMachineList.get_pmachines()
for sr in pm.storagerouters]
for item in mapping:
if item[1] == str(hostname):
- msg = 'Found pmachineguid %s for Hostname %s'
- LOG.debug(msg, item[0], hostname)
+ LOG.debug('Found pmachineguid %(item)s for Hostname %(host)s',
+ {'item': item[0], 'host': hostname})
return item[0]
- msg = (_('No PMachine guid found for Hostname %s'), hostname)
+ msg = (_('No PMachine guid found for Hostname %s') % hostname)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
"""Find OVS disk object based on snapshot id
:return VDisk: OVS DAL model object
"""
- LOG.debug('[_FIND OVS DISK] Snapshotid %s' % snapshotid)
+ LOG.debug('[_FIND OVS DISK] Snapshotid %s', snapshotid)
for disk in vdisklist.VDiskList.get_vdisks():
snaps_guid = [s['guid'] for s in disk.snapshots]
if str(snapshotid) in snaps_guid:
- LOG.debug('[_FIND OVS DISK] Snapshot id %s Disk found %s',
- (snapshotid, disk))
+ LOG.debug('[_FIND OVS DISK] Snapshot id %(snapshot)s Disk '
+ 'found %(disk)s',
+ {'snapshot': snapshotid, 'disk': disk})
return disk
- msg = (_('No disk found for snapshotid %s'), snapshotid)
+ msg = (_('No disk found for snapshotid %s') % snapshotid)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename):
ret = 0
output = ''
- msg = _('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s') \
- % {'volume': volumeid, 'wwpns': targetwwpns,
- 'iqn': initiatorwwpns, 'volumename': volumename}
- LOG.debug(msg)
+ LOG.debug('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s',
+ {'volume': volumeid, 'wwpns': targetwwpns,
+ 'iqn': initiatorwwpns, 'volumename': volumename})
try:
ret, output = self.dpl.assign_vdev_fc(
self._conver_uuid2hex(volumeid), targetwwpns,
ERR_MSG_NOT_EXIST in err.text:
# Happens if the volume does not exist.
ctxt.reraise = False
- LOG.warn(_LW("Volume deletion failed with message: %s"),
- err.text)
+ LOG.warning(_LW("Volume deletion failed with message: %s"),
+ err.text)
LOG.debug("Leave PureISCSIDriver.delete_volume.")
def create_snapshot(self, snapshot):
self._run_iscsiadm_bare(["-m", "discovery", "-t", "sendtargets",
"-p", self._iscsi_port["portal"]])
except processutils.ProcessExecutionError as err:
- LOG.warn(_LW("iSCSI discovery of port %(port_name)s at "
- "%(port_portal)s failed with error: %(err_msg)s"),
- {"port_name": self._iscsi_port["name"],
- "port_portal": self._iscsi_port["portal"],
- "err_msg": err.stderr})
+ LOG.warning(_LW("iSCSI discovery of port %(port_name)s at "
+ "%(port_portal)s failed with error: %(err_msg)s"),
+ {"port_name": self._iscsi_port["name"],
+ "port_portal": self._iscsi_port["portal"],
+ "err_msg": err.stderr})
self._iscsi_port = self._choose_target_iscsi_port()
return self._iscsi_port
"Connection already exists" in err.text):
# Happens if the volume is already connected to the host.
ctxt.reraise = False
- LOG.warn(_LW("Volume connection already exists with "
- "message: %s"), err.text)
+ LOG.warning(_LW("Volume connection already exists with "
+ "message: %s"), err.text)
# Get the info for the existing connection
connected_hosts = \
self._array.list_volume_private_connections(vol_name)
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
ctxt.reraise = False
- LOG.warn(_LW("Volume unmanage was unable to rename "
- "the volume, error message: %s"), err.text)
+ LOG.warning(_LW("Volume unmanage was unable to rename "
+ "the volume, error message: %s"), err.text)
def check_for_setup_error(self):
if not self.configuration.quobyte_volume_url:
- msg = (_LW("There's no Quobyte volume configured (%s). Example:"
- " quobyte://<DIR host>/<volume name>") %
+ msg = (_("There's no Quobyte volume configured (%s). Example:"
+ " quobyte://<DIR host>/<volume name>") %
'quobyte_volume_url')
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.VolumeDriverException(msg)
# Check if mount.quobyte is installed
qcow2.
"""
- LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
- "volume_size: %(size)s"
- % {'snap': snapshot['id'],
- 'vol': volume['id'],
- 'size': volume_size})
+ LOG.debug("snapshot: %(snap)s, volume: %(vol)s, ",
+ {'snap': snapshot['id'],
+ 'vol': volume['id'],
+ 'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
path_to_new_vol = self._local_path_volume(volume)
- LOG.debug("will copy from snapshot at %s" % path_to_snap_img)
+ LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.quobyte_qcow2_volumes:
out_format = 'qcow2'
"""Deletes a logical volume."""
if not volume['provider_location']:
- LOG.warn(_LW('Volume %s does not have provider_location '
- 'specified, skipping'), volume['name'])
+ LOG.warning(_LW('Volume %s does not have provider_location '
+ 'specified, skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
' one Quobyte volume.'
target_volume = self._mounted_shares[0]
- LOG.debug('Selected %s as target Quobyte volume.' % target_volume)
+ LOG.debug('Selected %s as target Quobyte volume.', target_volume)
return target_volume
mounted = False
try:
LOG.info(_LI('Fixing previous mount %s which was not'
- ' unmounted correctly.') % mount_path)
+ ' unmounted correctly.'), mount_path)
self._execute('umount.quobyte', mount_path,
run_as_root=False)
except processutils.ProcessExecutionError as exc:
- LOG.warn(_LW("Failed to unmount previous mount: %s"),
- exc)
+ LOG.warning(_LW("Failed to unmount previous mount: "
+ "%s"), exc)
else:
# TODO(quobyte): Extend exc analysis in here?
- LOG.warn(_LW("Unknown error occurred while checking mount"
- " point: %s Trying to continue."), exc)
+ LOG.warning(_LW("Unknown error occurred while checking "
+ "mount point: %s Trying to continue."),
+ exc)
if not mounted:
if not os.path.isdir(mount_path):
command.extend(['-c', self.configuration.quobyte_client_cfg])
try:
- LOG.info(_LI('Mounting volume: %s ...') % quobyte_volume)
+ LOG.info(_LI('Mounting volume: %s ...'), quobyte_volume)
self._execute(*command, run_as_root=False)
- LOG.info(_LI('Mounting volume: %s succeeded') % quobyte_volume)
+ LOG.info(_LI('Mounting volume: %s succeeded'), quobyte_volume)
mounted = True
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.stderr:
- LOG.warn(_LW("%s is already mounted"), quobyte_volume)
+ LOG.warning(_LW("%s is already mounted"), quobyte_volume)
else:
raise
raise exception.VolumeDriverException(msg)
if not os.access(mount_path, os.W_OK | os.X_OK):
- LOG.warn(_LW("Volume is not writable. Please broaden the file"
- " permissions. Mount: %s"), mount_path)
+ LOG.warning(_LW("Volume is not writable. Please broaden the file"
+ " permissions. Mount: %s"), mount_path)
return args
def _connect_to_rados(self, pool=None):
- LOG.debug("opening connection to ceph cluster (timeout=%s)." %
- (self.configuration.rados_connect_timeout))
+ LOG.debug("opening connection to ceph cluster (timeout=%s).",
+ self.configuration.rados_connect_timeout)
# NOTE (e0ne): rados is binding to C lbirary librados.
# It blocks eventlet loop so we need to run it in a native
# infinite is allowed.
if depth == CONF.rbd_max_clone_depth:
LOG.debug("maximum clone depth (%d) has been reached - "
- "flattening source volume" %
- (CONF.rbd_max_clone_depth))
+ "flattening source volume",
+ CONF.rbd_max_clone_depth)
flatten_parent = True
src_volume = self.rbd.Image(client.ioctx, src_name)
_pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
- LOG.debug("flattening source volume %s" % (src_name))
+ LOG.debug("flattening source volume %s", src_name)
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
- LOG.debug("creating snapshot='%s'" % (clone_snap))
+ LOG.debug("creating snapshot='%s'", clone_snap)
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception as exc:
# Now clone source volume snapshot
try:
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
- "'%(dest)s'" %
+ "'%(dest)s'",
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
if volume['size'] != src_vref['size']:
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
- "%(dst_size)d" %
+ "%(dst_size)d",
{'dst_vol': volume['name'], 'src_size': src_vref['size'],
'dst_size': volume['size']})
self._resize(volume)
"""Creates a logical volume."""
size = int(volume['size']) * units.Gi
- LOG.debug("creating volume '%s'" % (volume['name']))
+ LOG.debug("creating volume '%s'", volume['name'])
chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
features=client.features)
def _flatten(self, pool, volume_name):
- LOG.debug('flattening %(pool)s/%(img)s' %
+ LOG.debug('flattening %(pool)s/%(img)s',
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
- LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s' %
+ LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s',
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume['name']))
with RADOSClient(self, src_pool) as src_client:
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
- LOG.debug("volume %s is not a clone" % volume_name)
+ LOG.debug("volume %s is not a clone", volume_name)
volume.set_snap(None)
return (None, None, None)
parent_name,
parent_snap)
- LOG.debug("deleting parent snapshot %s" % (parent_snap))
+ LOG.debug("deleting parent snapshot %s", parent_snap)
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
- LOG.debug("deleting parent %s" % (parent_name))
+ LOG.debug("deleting parent %s", parent_name)
self.RBDProxy().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
- LOG.info(_LI("volume %s no longer exists in backend")
- % (volume_name))
+ LOG.info(_LI("volume %s no longer exists in backend"),
+ volume_name)
return
clone_snap = None
rbd_image.close()
if clone_snap is None:
- LOG.debug("deleting rbd volume %s" % (volume_name))
+ LOG.debug("deleting rbd volume %s", volume_name)
try:
self.RBDProxy().remove(client.ioctx, volume_name)
except self.rbd.ImageBusy:
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
- LOG.warn(msg)
+ LOG.warning(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
except self.rbd.ImageNotFound:
- msg = (_LI("RBD volume %s not found, allowing delete "
- "operation to proceed.") % volume_name)
- LOG.info(msg)
+ LOG.info(_LI("RBD volume %s not found, allowing delete "
+ "operation to proceed."), volume_name)
return
# If it is a clone, walk back up the parent chain deleting
return False
if self._get_fsid() != fsid:
- reason = ('%s is in a different ceph cluster') % image_location
- LOG.debug(reason)
+ LOG.debug('%s is in a different ceph cluster', image_location)
return False
if image_meta['disk_format'] != 'raw':
- reason = ("rbd image clone requires image format to be "
- "'raw' but image {0} is '{1}'").format(
- image_location, image_meta['disk_format'])
- LOG.debug(reason)
+ LOG.debug(("rbd image clone requires image format to be "
+ "'raw' but image {0} is '{1}'").format(
+ image_location, image_meta['disk_format']))
return False
# check that we can read the image
read_only=True):
return True
except self.rbd.Error as e:
- LOG.debug('Unable to open image %(loc)s: %(err)s' %
+ LOG.debug('Unable to open image %(loc)s: %(err)s',
dict(loc=image_location, err=e))
return False
CONF.image_conversion_dir or
tempfile.gettempdir())
- if (tmpdir == self.configuration.volume_tmp_dir):
- LOG.warn(_LW('volume_tmp_dir is now deprecated, please use '
- 'image_conversion_dir'))
+ if tmpdir == self.configuration.volume_tmp_dir:
+ LOG.warning(_LW('volume_tmp_dir is now deprecated, please use '
+ 'image_conversion_dir'))
# ensure temporary directory exists
if not os.path.exists(tmpdir):
volume['provider_location'] = self._find_share(volume['size'])
- LOG.info(_LI('casted to %s') % volume['provider_location'])
+ LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
- LOG.error(_LE('Exception during mounting %s') % (exc,))
+ LOG.error(_LE('Exception during mounting %s'), exc)
self._mounted_shares = mounted_shares
- LOG.debug('Available shares %s' % self._mounted_shares)
+ LOG.debug('Available shares %s', self._mounted_shares)
def create_cloned_volume(self, volume, src_vref):
raise NotImplementedError()
:param volume: volume reference
"""
if not volume['provider_location']:
- LOG.warn(_LW('Volume %s does not have '
- 'provider_location specified, '
- 'skipping'), volume['name'])
+ LOG.warning(_LW('Volume %s does not have '
+ 'provider_location specified, '
+ 'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
- LOG.debug('File path %s is being set with permissions: %s' %
- (path, permissions))
+ LOG.debug('File path %(path)s is being set with permissions: '
+ '%(permissions)s',
+ {'path': path, 'permissions': permissions})
else:
permissions = 'ugo+rw'
- parms = {'path': path, 'perm': permissions}
- LOG.warn(_LW('%(path)s is being set with open permissions: '
- '%(perm)s') % parms)
+ LOG.warning(_LW('%(path)s is being set with open permissions: '
+ '%(perm)s'), {'path': path, 'perm': permissions})
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
- LOG.debug('Loading shares from %s.' % share_file)
+ LOG.debug('Loading shares from %s.', share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
"/nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
- LOG.warn(_LW("The NAS file operations will be run as root: allowing "
- "root level access at the storage backend. This is "
- "considered an insecure NAS environment. "
- "Please see %s for information on a secure NAS "
- "configuration.") %
- doc_html)
+ LOG.warning(_LW("The NAS file operations will be run as root: "
+ "allowing root level access at the storage backend. "
+ "This is considered an insecure NAS environment. "
+ "Please see %s for information on a secure NAS "
+ "configuration."),
+ doc_html)
self.configuration.nas_secure_file_permissions = 'false'
- LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
- "other/world read & write access). This is considered an "
- "insecure NAS environment. Please see %s for information "
- "on a secure NFS configuration.") %
- doc_html)
+ LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing "
+ "other/world read & write access). This is considered "
+ "an insecure NAS environment. Please see %s for "
+ "information on a secure NFS configuration."),
+ doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
self._execute('chmod', '640', file_path,
run_as_root=False)
LOG.info(_LI('New Cinder secure environment indicator'
- ' file created at path %s.') % file_path)
+ ' file created at path %s.'), file_path)
except IOError as err:
LOG.error(_LE('Failed to created Cinder secure '
- 'environment indicator file: %s') %
- format(err))
+ 'environment indicator file: %s'),
+ err)
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
- LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s') %
+ LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'),
{'src': src_vref['id'],
'dst': volume['id']})
if (snapshot_file == active_file):
return
- LOG.info(_LI('Deleting stale snapshot: %s') % snapshot['id'])
+ LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
"""
- LOG.debug('Deleting snapshot %s:' % snapshot['id'])
+ LOG.debug('Deleting snapshot %s:', snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_LI('Snapshot record for %s is not present, allowing '
- 'snapshot_delete to proceed.') % snapshot['id'])
+ 'snapshot_delete to proceed.'), snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
- LOG.debug('snapshot_file for this snap is: %s' % snapshot_file)
+ LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
if base_file is None:
# There should always be at least the original volume
# file as base.
- msg = _('No backing file found for %s, allowing snapshot '
- 'to be deleted.') % snapshot_path
- LOG.warn(msg)
+ LOG.warning(_LW('No backing file found for %s, allowing '
+ 'snapshot to be deleted.'), snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
break
if base_id is None:
# This means we are deleting the oldest snapshot
- msg = 'No %(base_id)s found for %(file)s' % {
- 'base_id': 'base_id',
- 'file': snapshot_file}
- LOG.debug(msg)
+ LOG.debug('No %(base_id)s found for %(file)s',
+ {'base_id': 'base_id', 'file': snapshot_file})
online_delete_info = {
'active_file': active_file,
higher_file),
None)
if highest_file is None:
- msg = 'No file depends on %s.' % higher_file
- LOG.debug(msg)
+ LOG.debug('No file depends on %s.', higher_file)
# Committing higher_file into snapshot_file
# And update pointer in highest_file
context,
snapshot['volume_id'],
connection_info)
- LOG.debug('nova call result: %s' % result)
+ LOG.debug('nova call result: %s', result)
except Exception as e:
- LOG.error(_LE('Call to Nova to create snapshot failed'))
- LOG.exception(e)
- raise e
+ LOG.error(_LE('Call to Nova to create snapshot failed %s'), e)
+ raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
'while creating snapshot.')
raise exception.RemoteFSException(msg)
- LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
- 'id': snapshot['id'],
- 'status': s['status']
- })
+ LOG.debug('Status of snapshot %(id)s is now %(status)s',
+ {'id': snapshot['id'],
+ 'status': s['status']})
if 10 < seconds_elapsed <= 20:
increment = 2
snapshot['id'],
delete_info)
except Exception as e:
- LOG.error(_LE('Call to Nova delete snapshot failed'))
- LOG.exception(e)
- raise e
+ LOG.error(_LE('Call to Nova delete snapshot failed %s'), e)
+ raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# Nova tasks completed successfully
break
else:
- msg = ('status of snapshot %s is '
- 'still "deleting"... waiting') % snapshot['id']
- LOG.debug(msg)
+ LOG.debug('status of snapshot %s is still "deleting"... '
+ 'waiting', snapshot['id'])
time.sleep(increment)
seconds_elapsed += increment
else:
break
if found_vlun is None:
- msg = (_("3PAR vlun %(name)s not found on host %(host)s") %
- {'name': volume_name, 'host': hostname})
- LOG.info(msg)
+ LOG.info(_LI("3PAR vlun %(name)s not found on host %(host)s"),
+ {'name': volume_name, 'host': hostname})
return found_vlun
def create_vlun(self, volume, host, nsp=None):
if volume_name in vlun['volumeName']:
break
else:
- msg = (
- _("3PAR vlun for volume %(name)s not found on host %(host)s") %
- {'name': volume_name, 'host': hostname})
- LOG.info(msg)
+ LOG.info(_LI("3PAR vlun for volume %(name)s not found on host "
+ "%(host)s"), {'name': volume_name, 'host': hostname})
return
# VLUN Type of MATCHED_SET 4 requires the port to be provided
# for future needs (e.g. export volume to host set).
# The log info explains why the host was left alone.
- msg = (_("3PAR vlun for volume '%(name)s' was deleted, "
- "but the host '%(host)s' was not deleted because: "
- "%(reason)s") %
- {'name': volume_name,
- 'host': hostname,
- 'reason': ex.get_description()})
- LOG.info(msg)
+ LOG.info(_LI("3PAR vlun for volume '%(name)s' was deleted, "
+ "but the host '%(host)s' was not deleted "
+ "because: %(reason)s"),
+ {'name': volume_name, 'host': hostname,
+ 'reason': ex.get_description()})
def _get_volume_type(self, type_id):
ctxt = context.get_admin_context()
except exception.InvalidInput as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
except hpexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpexceptions.HTTPBadRequest as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
- LOG.error(ex)
- raise ex
+ LOG.error(_LE("Exception: %s"), ex)
+ raise
except exception.CinderException as ex:
- LOG.error(ex)
- raise ex
+ LOG.error(_LE("Exception: %s"), ex)
+ raise
except Exception as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return self._get_model_update(volume['host'], cpg)
except hpexceptions.HTTPNotFound:
raise exception.NotFound()
except Exception as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
def delete_volume(self, volume):
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
- LOG.error(ex)
- raise ex
+ LOG.error(_LE("Exception: %s"), ex)
+ raise
else:
- LOG.error(ex)
- raise ex
+ LOG.error(_LE("Exception: %s"), ex)
+ raise
except hpexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
else:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except hpexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
- msg = _("Delete volume id not found. Removing from cinder: "
- "%(id)s Ex: %(msg)s") % {'id': volume['id'], 'msg': ex}
- LOG.warning(msg)
+ LOG.warning(_LW("Delete volume id not found. Removing from "
+ "cinder: %(id)s Ex: %(msg)s"),
+ {'id': volume['id'], 'msg': ex})
except hpexceptions.HTTPForbidden as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized(ex.get_description())
except hpexceptions.HTTPConflict as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except Exception as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
def create_volume_from_snapshot(self, volume, snapshot):
except Exception as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
except hpexceptions.HTTPForbidden as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.NotFound()
except Exception as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return model_update
self.client.createSnapshot(snap_name, vol_name, optional)
except hpexceptions.HTTPForbidden as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.NotFound()
def update_volume_key_value_pair(self, volume, key, value):
volume_name = self._get_3par_vol_name(volume['id'])
self.client.removeVolumeMetaData(volume_name, key)
except Exception as ex:
- msg = _('Failure in clear_volume_key_value_pair:%s') % ex
+ msg = _('Failure in clear_volume_key_value_pair: '
+ '%s') % six.text_type(ex)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.error(msg)
raise exception.Duplicate(msg)
except hpexceptions.HTTPBadRequest as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
- LOG.error(ex)
- raise ex
+ LOG.error(_LE("Exception: %s"), ex)
+ raise
except exception.CinderException as ex:
- LOG.error(ex)
- raise ex
+ LOG.error(_LE("Exception: %s"), ex)
+ raise
except Exception as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return self._get_model_update(volume['host'], cpg)
snap_name = self._get_3par_snap_name(snapshot['id'])
self.client.deleteVolume(snap_name)
except hpexceptions.HTTPForbidden as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
- msg = _("Delete Snapshot id not found. Removing from cinder: "
- "%(id)s Ex: %(msg)s") % {'id': snapshot['id'], 'msg': ex}
- LOG.warning(msg)
+ LOG.warning(_LW("Delete Snapshot id not found. Removing from "
+ "cinder: %(id)s Ex: %(msg)s"),
+ {'id': snapshot['id'], 'msg': ex})
except hpexceptions.HTTPConflict as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
# no 3par host, re-throw
- if (hostname is None):
- LOG.error(e)
+ if hostname is None:
+ LOG.error(_LE("Exception: %s"), e)
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
- LOG.error(e)
+ LOG.error(_LE("Exception: %s"), e)
raise
# try again with name retrieved from 3par
if new_tpvv:
cop = self.CONVERT_TO_THIN
LOG.info(_LI("Converting %(volume_name)s to thin provisioning "
- "with userCPG=%(new_cpg)s") %
+ "with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
elif new_tdvv:
cop = self.CONVERT_TO_DEDUP
LOG.info(_LI("Converting %(volume_name)s to thin dedup "
- "provisioning with userCPG=%(new_cpg)s") %
+ "provisioning with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
else:
cop = self.CONVERT_TO_FULL
LOG.info(_LI("Converting %(volume_name)s to full provisioning "
- "with userCPG=%(new_cpg)s") %
+ "with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
try:
# info and then raise.
LOG.info(_LI("tunevv failed because the volume '%s' "
"has snapshots."), volume_name)
- raise ex
+ raise
task_id = body['taskid']
status = self.TaskWaiter(self.client, task_id).wait_for_task()
if ex.get_code() != 102:
LOG.error(_LE("Unexpected error when retype() tried to "
"deleteVolumeSet(%s)"), vvs_name)
- raise ex
+ raise
if new_vvs or new_qos or new_flash_cache:
common._add_volume_to_volume_set(
protocol = host['capabilities']['storage_protocol']
if protocol != 'FC':
LOG.debug("3PAR FC driver cannot migrate in-use volume "
- "to a host with storage_protocol=%s." % protocol)
+ "to a host with storage_protocol=%s.", protocol)
return False, None
common = self._login()
elif len(ip) == 2:
temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]}
else:
- msg = _("Invalid IP address format '%s'") % ip_addr
- LOG.warn(msg)
+ LOG.warning(_LW("Invalid IP address format '%s'"), ip_addr)
# add the single value iscsi_ip_address option to the IP dictionary.
# This way we can see if it's a valid iSCSI IP. If it's not valid,
# lets see if there are invalid iSCSI IPs left in the temp dict
if len(temp_iscsi_ip) > 0:
- msg = (_("Found invalid iSCSI IP address(s) in configuration "
- "option(s) hp3par_iscsi_ips or iscsi_ip_address '%s.'") %
- (", ".join(temp_iscsi_ip)))
- LOG.warn(msg)
+ LOG.warning(_LW("Found invalid iSCSI IP address(s) in "
+ "configuration option(s) hp3par_iscsi_ips or "
+ "iscsi_ip_address '%s.'"),
+ (", ".join(temp_iscsi_ip)))
if not len(self.iscsi_ips) > 0:
msg = _('At least one valid iSCSI IP address must be set.')
LOG.error(msg)
- raise exception.InvalidInput(reason=(msg))
+ raise exception.InvalidInput(reason=msg)
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
vlun = common.create_vlun(volume, host, least_used_nsp)
if least_used_nsp is None:
- msg = _("Least busy iSCSI port not found, "
- "using first iSCSI port in list.")
- LOG.warn(msg)
+ LOG.warning(_LW("Least busy iSCSI port not found, "
+ "using first iSCSI port in list."))
iscsi_ip = self.iscsi_ips.keys()[0]
else:
iscsi_ip = self._get_ip_using_nsp(least_used_nsp)
host = common._get_3par_host(hostname)
elif (not host['initiatorChapEnabled'] and
self.configuration.hp3par_iscsi_chap_enabled):
- LOG.warn(_LW("Host exists without CHAP credentials set "
- "and has iSCSI attachments but CHAP is "
- "enabled. Updating host with new CHAP "
- "credentials."))
+ LOG.warning(_LW("Host exists without CHAP credentials set and "
+ "has iSCSI attachments but CHAP is enabled. "
+ "Updating host with new CHAP credentials."))
self._set_3par_chaps(
common,
hostname,
host_info = common.client.getHost(chap_username)
if not host_info['initiatorChapEnabled']:
- LOG.warn(_LW("Host has no CHAP key, but CHAP is enabled."))
+ LOG.warning(_LW("Host has no CHAP key, but CHAP is enabled."))
except hpexceptions.HTTPNotFound:
chap_password = volume_utils.generate_password(16)
- LOG.warn(_LW("No host or VLUNs exist. Generating new CHAP key."))
+ LOG.warning(_LW("No host or VLUNs exist. Generating new "
+ "CHAP key."))
else:
# Get a list of all iSCSI VLUNs and see if there is already a CHAP
# key assigned to one of them. Use that CHAP key if present,
break
except hpexceptions.HTTPNotFound:
LOG.debug("The VLUN %s is missing CHAP credentials "
- "but CHAP is enabled. Skipping." %
+ "but CHAP is enabled. Skipping.",
vlun['remoteName'])
else:
- LOG.warn(_LW("Non-iSCSI VLUN detected."))
+ LOG.warning(_LW("Non-iSCSI VLUN detected."))
if not chap_exists:
chap_password = volume_utils.generate_password(16)
- LOG.warn(_LW("No VLUN contained CHAP credentials. "
- "Generating new CHAP key."))
+ LOG.warning(_LW("No VLUN contained CHAP credentials. "
+ "Generating new CHAP key."))
# Add CHAP credentials to the volume metadata
vol_name = common._get_3par_vol_name(volume['id'])
protocol = host['capabilities']['storage_protocol']
if protocol != 'iSCSI':
LOG.debug("3PAR ISCSI driver cannot migrate in-use volume "
- "to a host with storage_protocol=%s." % protocol)
+ "to a host with storage_protocol=%s.", protocol)
return False, None
common = self._login()
for k, v in status_node.attrib.items():
volume_attributes["permission." + k] = v
- LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s" %
+ LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s",
{'volume_name': volume_name,
'volume_attributes': volume_attributes})
return volume_attributes
for k, v in status_node.attrib.items():
snapshot_attributes["permission." + k] = v
- LOG.debug("Snapshot info: %(name)s => %(attributes)s" %
+ LOG.debug("Snapshot info: %(name)s => %(attributes)s",
{'name': snapshot_name, 'attributes': snapshot_attributes})
return snapshot_attributes
from oslo_log import log as logging
from cinder import exception
-from cinder.i18n import _LE, _LI
+from cinder.i18n import _, _LI
from cinder.volume import driver
from cinder.volume.drivers.san.hp import hp_lefthand_cliq_proxy as cliq_proxy
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy as rest_proxy
self.proxy = self._create_proxy(*self.args, **self.kwargs)
LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
- "proxy %(proxy_ver)s") % {
+ "proxy %(proxy_ver)s"), {
"driver_ver": self.VERSION,
"proxy_ver": self.proxy.get_version_string()})
# Check minimum client version for REST proxy
client_version = rest_proxy.hplefthandclient.version
- if (client_version < MIN_CLIENT_VERSION):
- ex_msg = (_LE("Invalid hplefthandclient version found ("
- "%(found)s). Version %(minimum)s or greater "
- "required.")
+ if client_version < MIN_CLIENT_VERSION:
+ ex_msg = (_("Invalid hplefthandclient version found ("
+ "%(found)s). Version %(minimum)s or greater "
+ "required.")
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)
client_options[client_key] = client_value
except KeyError:
LOG.error(_LE("'%(value)s' is an invalid value "
- "for extra spec '%(key)s'") %
+ "for extra spec '%(key)s'"),
{'value': value, 'key': key})
return client_options
chap_secret = server_info['chapTargetSecret']
if not chap_enabled and chap_secret:
LOG.warning(_LW('CHAP secret exists for host %s but CHAP is '
- 'disabled') % connector['host'])
+ 'disabled'), connector['host'])
if chap_enabled and chap_secret is None:
LOG.warning(_LW('CHAP is enabled, but server secret not '
- 'configured on server %s') % connector['host'])
+ 'configured on server %s'), connector['host'])
return server_info
except hpexceptions.HTTPNotFound:
# server does not exist, so create one
dictionary of its reported capabilities.
"""
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
- 'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
- 'new_type': new_type,
- 'diff': diff,
- 'host': host})
+ 'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
+ 'new_type': new_type,
+ 'diff': diff,
+ 'host': host})
client = self._login()
try:
volume_info = client.getVolumeByName(volume['name'])
new_extra_specs,
extra_specs_key_map.keys())
- LOG.debug('LH specs=%(specs)s' % {'specs': lh_extra_specs})
+ LOG.debug('LH specs=%(specs)s', {'specs': lh_extra_specs})
# only set the ones that have changed
changed_extra_specs = {}
except hpexceptions.HTTPNotFound:
raise exception.VolumeNotFound(volume_id=volume['id'])
except Exception as ex:
- LOG.warning("%s" % ex)
+ LOG.warning(_LW("%s"), ex)
finally:
self._logout(client)
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
- 'cluster=%(cluster)s' % {
+ 'cluster=%(cluster)s', {
'id': volume['id'],
'host': host,
'cluster': self.configuration.hplefthand_clustername})
try:
# get the cluster info, if it exists and compare
cluster_info = client.getClusterByName(cluster)
- LOG.debug('Cluster info: %s' % cluster_info)
+ LOG.debug('Cluster info: %s', cluster_info)
virtual_ips = cluster_info['virtualIPAddresses']
if driver != self.__class__.__name__:
LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because volume is from a different "
- "backend.") % volume['name'])
+ "backend."), volume['name'])
return false_ret
if vip != virtual_ips[0]['ipV4Address']:
LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because cluster exists in different "
- "management group.") % volume['name'])
+ "management group."), volume['name'])
return false_ret
except hpexceptions.HTTPNotFound:
LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because cluster exists in different "
- "management group.") % volume['name'])
+ "management group."), volume['name'])
return false_ret
finally:
self._logout(client)
client = self._login()
try:
volume_info = client.getVolumeByName(volume['name'])
- LOG.debug('Volume info: %s' % volume_info)
+ LOG.debug('Volume info: %s', volume_info)
# can't migrate if server is attached
if volume_info['iscsiSessions'] is not None:
LOG.info(_LI("Cannot provide backend assisted migration "
"for volume: %s because the volume has been "
- "exported.") % volume['name'])
+ "exported."), volume['name'])
return false_ret
# can't migrate if volume has snapshots
snap_info = client.getVolume(
volume_info['id'],
'fields=snapshots,snapshots[resource[members[name]]]')
- LOG.debug('Snapshot info: %s' % snap_info)
+ LOG.debug('Snapshot info: %s', snap_info)
if snap_info['snapshots']['resource'] is not None:
LOG.info(_LI("Cannot provide backend assisted migration "
"for volume: %s because the volume has "
- "snapshots.") % volume['name'])
+ "snapshots."), volume['name'])
return false_ret
options = {'clusterName': cluster}
except hpexceptions.HTTPNotFound:
LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because volume does not exist in this "
- "management group.") % volume['name'])
+ "management group."), volume['name'])
return false_ret
except hpexceptions.HTTPServerError as ex:
- LOG.error(ex)
+ LOG.error(_LE("Exception: %s"), ex)
return false_ret
finally:
self._logout(client)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error running SSH command: %s") % command)
+ LOG.error(_LE("Error running SSH command: %s"), command)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
config = self.configuration.scality_sofs_config
if not config:
msg = _("Value required for 'scality_sofs_config'")
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
# config can be a file path or a URL, check it
urllib2.urlopen(config, timeout=5).close()
except urllib2.URLError as e:
msg = _("Cannot access 'scality_sofs_config': %s") % e
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _("Cannot execute /sbin/mount.sofs")
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
@lockutils.synchronized('mount-sofs', 'cinder-sofs', external=True)
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _("Cannot mount Scality SOFS, check syslog for errors")
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _size_bytes(self, size_in_g):
self.configuration.scality_sofs_volume_dir)
if not os.path.isdir(voldir):
msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir
- LOG.warn(msg)
+ LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
def create_volume(self, volume):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_local_path = self.local_path(volume)
- LOG.info(_LI('Begin backup of volume %s.') % volume['name'])
+ LOG.info(_LI('Begin backup of volume %s.'), volume['name'])
qemu_img_info = image_utils.qemu_img_info(volume_local_path)
if qemu_img_info.file_format != 'raw':
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
- LOG.info(_LI('Restoring backup %(backup)s to volume %(volume)s.') %
+ LOG.info(_LI('Restoring backup %(backup)s to volume %(volume)s.'),
{'backup': backup['id'], 'volume': volume['name']})
volume_local_path = self.local_path(volume)
with utils.temporary_chown(volume_local_path):
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
- LOG.warn(_LW('Volume %s does not have provider_location '
- 'specified, skipping.'), volume['name'])
+ LOG.warning(_LW('Volume %s does not have provider_location '
+ 'specified, skipping.'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
if os.path.exists(mounted_path):
self._delete(mounted_path)
else:
- LOG.debug("Skipping deletion of volume %s as it does not exist." %
+ LOG.debug("Skipping deletion of volume %s as it does not exist.",
mounted_path)
info_path = self._local_path_volume_info(volume)
volume_path = self.local_path(volume)
volume_size = volume['size']
- LOG.debug("Creating new volume at %s." % volume_path)
+ LOG.debug("Creating new volume at %s.", volume_path)
if os.path.exists(volume_path):
msg = _('File already exists at %s.') % volume_path
raise exception.SmbfsNoSuitableShareFound(
volume_size=volume_size_in_gib)
- LOG.debug('Selected %s as target smbfs share.' % target_share)
+ LOG.debug('Selected %s as target smbfs share.', target_share)
return target_share
used = (total_size - total_available) / total_size
if used > used_ratio:
- LOG.debug('%s is above smbfs_used_ratio.' % smbfs_share)
+ LOG.debug('%s is above smbfs_used_ratio.', smbfs_share)
return False
if apparent_available <= requested_volume_size:
- LOG.debug('%s is above smbfs_oversub_ratio.' % smbfs_share)
+ LOG.debug('%s is above smbfs_oversub_ratio.', smbfs_share)
return False
if total_allocated / total_size >= oversub_ratio:
- LOG.debug('%s reserved space is above smbfs_oversub_ratio.' %
+ LOG.debug('%s reserved space is above smbfs_oversub_ratio.',
smbfs_share)
return False
return True
volume_path = self.local_path(volume)
self._check_extend_volume_support(volume, size_gb)
- LOG.info(_LI('Resizing file to %sG...') % size_gb)
+ LOG.info(_LI('Resizing file to %sG...'), size_gb)
self._do_extend_volume(volume_path, size_gb, volume['name'])
"""
LOG.debug("Snapshot: %(snap)s, volume: %(vol)s, "
- "volume_size: %(size)s" %
+ "volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
snapshot['volume']['name'])
path_to_snap_img = os.path.join(vol_dir, img_info.backing_file)
- LOG.debug("Will copy from snapshot at %s" % path_to_snap_img)
+ LOG.debug("Will copy from snapshot at %s", path_to_snap_img)
image_utils.convert_image(path_to_snap_img,
self.local_path(volume),
time.sleep(_delay)
_tries -= 1
_delay *= backoff
- LOG.debug('Retrying %s, (%s attempts remaining)...' %
- (args, _tries))
+ LOG.debug('Retrying %(args)s, %(tries)s attempts '
+ 'remaining...',
+ {'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
if not found_volume:
LOG.error(_LE('Failed to retrieve volume SolidFire-'
- 'ID: %s in get_by_account!') % sf_volume_id)
+ 'ID: %s in get_by_account!'), sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = {}
if len(presets) > 0:
if len(presets) > 1:
LOG.warning(_LW('More than one valid preset was '
- 'detected, using %s') % presets[0])
+ 'detected, using %s'), presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
if uuid in v['name'] or uuid in alt_id:
found_count += 1
sf_volref = v
- LOG.debug("Mapped SolidFire volumeID %s "
- "to cinder ID %s.",
- v['volumeID'], uuid)
+ LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
+ "to cinder ID %(uuid)s.",
+ {'volume_id': v['volumeID'], 'uuid': uuid})
if found_count == 0:
# NOTE(jdg): Previously we would raise here, but there are cases
LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
if found_count > 1:
- LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s.") %
+ LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."),
{'count': found_count,
'uuid': uuid})
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
- "delete_volume operation!") % volume['id'])
+ "delete_volume operation!"), volume['id'])
LOG.error(_LE("This usually means the volume was never "
"successfully created."))
return
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
if sfid is None:
- raise exception.SolidFireAPIException("Manage existing volume "
- "requires 'source-id'.")
+ raise exception.SolidFireAPIException(_("Manage existing volume "
+ "requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
sfid = external_ref.get('source-id', None)
if sfid is None:
- raise exception.SolidFireAPIException("Manage existing get size "
- "requires 'id'.")
+ raise exception.SolidFireAPIException(_("Manage existing get size "
+ "requires 'id'."))
params = {'startVolumeID': int(sfid),
'limit': 1}
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
- "unmanage operation!") % volume['id'])
- raise exception.SolidFireAPIException("Failed to find account "
- "for volume.")
+ "unmanage operation!"), volume['id'])
+ raise exception.SolidFireAPIException(_("Failed to find account "
+ "for volume."))
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
for attempt in xrange(self._count):
if attempt != 0:
LOG.warning(_LW('Retrying failed call to %(func)s, '
- 'attempt %(attempt)i.')
- % {'func': func_name,
- 'attempt': attempt})
+ 'attempt %(attempt)i.'),
+ {'func': func_name,
+ 'attempt': attempt})
try:
return fun(*args, **kwargs)
except self._exceptions:
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating Volume Group'))
- LOG.error(_LE('Cmd :%s') % err.cmd)
- LOG.error(_LE('StdOut :%s') % err.stdout)
- LOG.error(_LE('StdErr :%s') % err.stderr)
+ LOG.error(_LE('Cmd :%s'), err.cmd)
+ LOG.error(_LE('StdOut :%s'), err.stdout)
+ LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def deactivate_vg(self):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error deactivating Volume Group'))
- LOG.error(_LE('Cmd :%s') % err.cmd)
- LOG.error(_LE('StdOut :%s') % err.stdout)
- LOG.error(_LE('StdErr :%s') % err.stderr)
+ LOG.error(_LE('Cmd :%s'), err.cmd)
+ LOG.error(_LE('StdOut :%s'), err.stdout)
+ LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def destroy_vg(self):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error destroying Volume Group'))
- LOG.error(_LE('Cmd :%s') % err.cmd)
- LOG.error(_LE('StdOut :%s') % err.stdout)
- LOG.error(_LE('StdErr :%s') % err.stderr)
+ LOG.error(_LE('Cmd :%s'), err.cmd)
+ LOG.error(_LE('StdOut :%s'), err.stdout)
+ LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def pv_resize(self, pv_name, new_size_str):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error resizing Physical Volume'))
- LOG.error(_LE('Cmd :%s') % err.cmd)
- LOG.error(_LE('StdOut :%s') % err.stdout)
- LOG.error(_LE('StdErr :%s') % err.stderr)
+ LOG.error(_LE('Cmd :%s'), err.cmd)
+ LOG.error(_LE('StdOut :%s'), err.stdout)
+ LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def extend_thin_pool(self):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending thin provisioning pool'))
- LOG.error(_LE('Cmd :%s') % err.cmd)
- LOG.error(_LE('StdOut :%s') % err.stdout)
- LOG.error(_LE('StdErr :%s') % err.stderr)
+ LOG.error(_LE('Cmd :%s'), err.cmd)
+ LOG.error(_LE('StdOut :%s'), err.stdout)
+ LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def _attach_file(self, volume):
name = self._get_volname(volume)
devname = self._device_name(volume)
- LOG.debug('Attaching volume %s as %s', name, devname)
+ LOG.debug('Attaching volume %(name)s as %(devname)s',
+ {'name': name, 'devname': devname})
count = self._get_attached_count(volume)
if count == 0:
self._do_deactivate(volume, vg)
except putils.ProcessExecutionError:
LOG.warning(_LW('All attempts to recover failed detach '
- 'of %(volume)s failed.')
- % {'volume': volname})
+ 'of %(volume)s failed.'),
+ {'volume': volname})
@lockutils.synchronized('devices', 'cinder-srb-')
def _detach_file(self, volume):
count = self._get_attached_count(volume)
if count > 1:
LOG.info(_LI('Reference count of %(volume)s is %(count)d, '
- 'not detaching.')
- % {'volume': volume['name'],
- 'count': count})
+ 'not detaching.'),
+ {'volume': volume['name'], 'count': count})
return
message = (_('Could not detach volume %(vol)s from device %(dev)s.')
if vg is not None:
self._do_deactivate(volume, vg)
except putils.ProcessExecutionError:
- msg = _LE('Could not deactivate volume groupe %s')\
- % (self._get_volname(volume))
- LOG.error(msg)
+ LOG.error(_LE('Could not deactivate volume group %s'),
+ self._get_volname(volume))
raise
try:
self._do_detach(volume, vg=vg)
except putils.ProcessExecutionError:
- msg = _LE('Could not detach volume '
- '%(vol)s from device %(dev)s.') \
- % {'vol': name, 'dev': devname}
- LOG.error(msg)
+ LOG.error(_LE('Could not detach volume %(vol)s from device '
+ '%(dev)s.'), {'vol': name, 'dev': devname})
raise
self._decrement_attached_count(volume)
self._destroy_lvm(volume)
self._detach_file(volume)
- LOG.debug('Deleting volume %s, attached=%s',
- volume['name'], attached)
+ LOG.debug('Deleting volume %(volume_name)s, attached=%(attached)s',
+ {'volume_name': volume['name'], 'attached': attached})
self._destroy_file(volume)
"""
lun_type = '0'
- LOG.debug("Creating LUN %(name)s, %(size)s GB." %
+ LOG.debug("Creating LUN %(name)s, %(size)s GB.",
{'name': volume['name'], 'size': volume['size']})
if self.config.san_thin_provision:
LOG.debug("Lun %s already exists, continuing.", volume['id'])
except Exception:
- LOG.warn(_LW("Lun create for %s failed!"), volume['id'])
+ LOG.warning(_LW("Lun create for %s failed!"), volume['id'])
raise
@utils.synchronized('vmem-lun')
LOG.debug("Lun %s already deleted, continuing.", volume['id'])
except exception.ViolinBackendErrExists:
- LOG.warn(_LW("Lun %s has dependent snapshots, skipping."),
- volume['id'])
+ LOG.warning(_LW("Lun %s has dependent snapshots, skipping."),
+ volume['id'])
raise exception.VolumeIsBusy(volume_name=volume['id'])
except Exception:
volume -- volume object provided by the Manager
new_size -- new (increased) size in GB to be applied
"""
- LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s GB." %
+ LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s GB.",
{'id': volume['id'], 'size': volume['size'],
'new_size': new_size})
self.common = v6000_common.V6000Common(self.configuration)
self.lookup_service = fczm_utils.create_lookup_service()
- LOG.info(_LI("Initialized driver %(name)s version: %(vers)s.") %
+ LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
{'name': self.__class__.__name__, 'vers': self.VERSION})
def do_setup(self, context):
properties['target_lun'] = lun_id
properties['initiator_target_map'] = init_targ_map
- LOG.debug("Return FC data for zone addition: %(properties)s."
- % {'properties': properties})
+ LOG.debug("Return FC data for zone addition: %(properties)s.",
+ {'properties': properties})
return {'driver_volume_type': 'fibre_channel', 'data': properties}
properties['target_wwn'] = target_wwns
properties['initiator_target_map'] = init_targ_map
- LOG.debug("Return FC data for zone deletion: %(properties)s."
- % {'properties': properties})
+ LOG.debug("Return FC data for zone deletion: %(properties)s.",
+ {'properties': properties})
return {'driver_volume_type': 'fibre_channel', 'data': properties}
else:
raise exception.Error(_("No initiators found, cannot proceed"))
- LOG.debug("Exporting lun %s." % volume['id'])
+ LOG.debug("Exporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
v = self.common.vip
wwpns = self._convert_wwns_openstack_to_vmem(connector['wwpns'])
- LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s." %
+ LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s.",
{'wwpns': wwpns, 'igroup': igroup})
resp = v.igroup.add_initiators(igroup, wwpns)
if bn1 in resp:
total_gb = resp[bn1] / units.Gi
else:
- LOG.warn(_LW("Failed to receive update for total_gb stat!"))
+ LOG.warning(_LW("Failed to receive update for total_gb stat!"))
if 'total_capacity_gb' in self.stats:
total_gb = self.stats['total_capacity_gb']
if bn2 in resp:
free_gb = resp[bn2] / units.Gi
else:
- LOG.warn(_LW("Failed to receive update for free_gb stat!"))
+ LOG.warning(_LW("Failed to receive update for free_gb stat!"))
if 'free_capacity_gb' in self.stats:
free_gb = self.stats['free_capacity_gb']
data['free_capacity_gb'] = free_gb
for i in data:
- LOG.debug("stat update: %(name)s=%(data)s." %
+ LOG.debug("stat update: %(name)s=%(data)s.",
{'name': i, 'data': data[i]})
self.stats = data
self.configuration.append_config_values(san.san_opts)
self.common = v6000_common.V6000Common(self.configuration)
- LOG.info(_LI("Initialized driver %(name)s version: %(vers)s.") %
+ LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
{'name': self.__class__.__name__, 'vers': self.VERSION})
def do_setup(self, context):
target_name = self._get_short_name(volume['id'])
- LOG.debug("Exporting lun %s." % volume['id'])
+ LOG.debug("Exporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
if bn1 in resp:
total_gb = resp[bn1] / units.Gi
else:
- LOG.warn(_LW("Failed to receive update for total_gb stat!"))
+ LOG.warning(_LW("Failed to receive update for total_gb stat!"))
if 'total_capacity_gb' in self.stats:
total_gb = self.stats['total_capacity_gb']
if bn2 in resp:
free_gb = resp[bn2] / units.Gi
else:
- LOG.warn(_LW("Failed to receive update for free_gb stat!"))
+ LOG.warning(_LW("Failed to receive update for free_gb stat!"))
if 'free_capacity_gb' in self.stats:
free_gb = self.stats['free_capacity_gb']
data['free_capacity_gb'] = free_gb
for i in data:
- LOG.debug("stat update: %(name)s=%(data)s." %
+ LOG.debug("stat update: %(name)s=%(data)s.",
{'name': i, 'data': data[i]})
self.stats = data
if ret_dict:
hostname = ret_dict.items()[0][1]
else:
- LOG.debug("Unable to fetch gateway hostname for %s." % mg_to_query)
+ LOG.debug("Unable to fetch gateway hostname for %s.", mg_to_query)
return hostname
except exceptions.VimException:
# TODO(vbala) volumeops.get_dss_rp shouldn't throw VimException
# for empty datastore list.
- LOG.warn(_LW("Unable to fetch datastores connected "
- "to host %s."), host_ref, exc_info=True)
+ LOG.warning(_LW("Unable to fetch datastores connected "
+ "to host %s."), host_ref, exc_info=True)
continue
if not datastores:
VERSION = '1.4.0'
def _do_deprecation_warning(self):
- LOG.warn(_LW('The VMware ESX VMDK driver is now deprecated '
- 'and will be removed in the Juno release. The VMware '
- 'vCenter VMDK driver will remain and continue to be '
- 'supported.'))
+ LOG.warning(_LW('The VMware ESX VMDK driver is now deprecated '
+ 'and will be removed in the Juno release. The VMware '
+ 'vCenter VMDK driver will remain and continue to be '
+ 'supported.'))
def __init__(self, *args, **kwargs):
super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
LOG.error(msg, storage_profile)
raise exceptions.VimException(msg % storage_profile)
elif storage_profile:
- LOG.warn(_LW("Ignoring storage profile %s requirement for this "
- "volume since policy based placement is "
- "disabled."), storage_profile)
+ LOG.warning(_LW("Ignoring storage profile %s requirement for this "
+ "volume since policy based placement is "
+ "disabled."), storage_profile)
size_bytes = volume['size'] * units.Gi
datastore_summary = self._select_datastore_summary(size_bytes,
if not backing:
# Create a backing in case it does not exist. It is a bad use
# case to boot from an empty volume.
- LOG.warn(_LW("Trying to boot from an empty volume: %s."),
- volume['name'])
+ LOG.warning(_LW("Trying to boot from an empty volume: %s."),
+ volume['name'])
# Create backing
backing = self._create_backing(volume)
self.volumeops.delete_vmdk_file(
descriptor_ds_file_path, dc_ref)
except exceptions.VimException:
- LOG.warn(_LW("Error occurred while deleting temporary "
- "disk: %s."),
- descriptor_ds_file_path,
- exc_info=True)
+ LOG.warning(_LW("Error occurred while deleting temporary "
+ "disk: %s."),
+ descriptor_ds_file_path, exc_info=True)
def _copy_temp_virtual_disk(self, src_dc_ref, src_path, dest_dc_ref,
dest_path):
self.volumeops.delete_file(
path.get_descriptor_ds_file_path(), dc_ref)
except exceptions.VimException:
- LOG.warn(_LW("Error occurred while deleting "
- "descriptor: %s."),
- path.get_descriptor_ds_file_path(),
- exc_info=True)
+ LOG.warning(_LW("Error occurred while deleting "
+ "descriptor: %s."),
+ path.get_descriptor_ds_file_path(),
+ exc_info=True)
if dest_path != path:
# Copy temporary disk to given destination.
try:
self.volumeops.delete_backing(backing)
except exceptions.VimException:
- LOG.warn(_LW("Error occurred while deleting backing: %s."),
- backing,
- exc_info=True)
+ LOG.warning(_LW("Error occurred while deleting backing: %s."),
+ backing, exc_info=True)
def _create_volume_from_non_stream_optimized_image(
self, context, volume, image_service, image_id,
"""
# Can't attempt retype if the volume is in use.
if self._in_use(volume):
- LOG.warn(_LW("Volume: %s is in use, can't retype."),
- volume['name'])
+ LOG.warning(_LW("Volume: %s is in use, can't retype."),
+ volume['name'])
return False
# If the backing doesn't exist, retype is NOP.
best_candidate = self.ds_sel.select_datastore(req)
if not best_candidate:
# No candidate datastores; can't retype.
- LOG.warn(_LW("There are no datastores matching new "
- "requirements; can't retype volume: %s."),
- volume['name'])
+ LOG.warning(_LW("There are no datastores matching new "
+ "requirements; can't retype volume: %s."),
+ volume['name'])
return False
(host, rp, summary) = best_candidate
self.volumeops.rename_backing(backing,
volume['name'])
except exceptions.VimException:
- LOG.warn(_LW("Changing backing: %(backing)s "
- "name from %(new_name)s to "
- "%(old_name)s failed."),
- {'backing': backing,
- 'new_name': tmp_name,
- 'old_name': volume['name']})
+ LOG.warning(_LW("Changing backing: "
+ "%(backing)s name from "
+ "%(new_name)s to %(old_name)s "
+ "failed."),
+ {'backing': backing,
+ 'new_name': tmp_name,
+ 'old_name': volume['name']})
# Update the backing's storage profile if needed.
if need_profile_change:
self.volumeops.rename_backing(backing,
volume['name'])
except exceptions.VimException:
- LOG.warn(_LW("Cannot undo volume rename; old name "
- "was %(old_name)s and new name is "
- "%(new_name)s."),
- {'old_name': volume['name'],
- 'new_name': tmp_backing_name},
- exc_info=True)
+ LOG.warning(_LW("Cannot undo volume rename; old "
+ "name was %(old_name)s and new "
+ "name is %(new_name)s."),
+ {'old_name': volume['name'],
+ 'new_name': tmp_backing_name},
+ exc_info=True)
finally:
# Delete the temporary backing.
self._delete_temp_backing(src)
if len(mappings) > 0:
if os.path.exists(smbfs_share):
- LOG.debug('Share already mounted: %s' % smbfs_share)
+ LOG.debug('Share already mounted: %s', smbfs_share)
return True
else:
- LOG.debug('Share exists but is unavailable: %s '
- % smbfs_share)
+ LOG.debug('Share exists but is unavailable: %s ', smbfs_share)
for mapping in mappings:
# Due to a bug in the WMI module, getting the output of
# methods returning None will raise an AttributeError
options.get('pass'))
try:
- LOG.info(_LI('Mounting share: %s') % smbfs_share)
+ LOG.info(_LI('Mounting share: %s'), smbfs_share)
self.smb_conn.Msft_SmbMapping.Create(**smb_opts)
except wmi.x_wmi as exc:
err_msg = (_(
'WMI exception: %(wmi_exc)s'
'Options: %(options)s') % {'smbfs_share': smbfs_share,
'options': smb_opts,
- 'wmi_exc': exc})
+ 'wmi_exc': six.text_type(exc)})
raise exception.VolumeBackendAPIException(data=err_msg)
def get_capacity_info(self, smbfs_share):
ctypes.pointer(total_bytes),
ctypes.pointer(free_bytes))
if retcode == 0:
- LOG.error(_LE("Could not get share %s capacity info.") %
+ LOG.error(_LE("Could not get share %s capacity info."),
smbfs_share)
return 0, 0
return total_bytes.value, free_bytes.value
from oslo_utils import units
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder import utils
smbfs_share)
total_allocated = self._get_total_allocated(smbfs_share)
return_value = [total_size, total_available, total_allocated]
- LOG.info('Smb share %s Total size %s Total allocated %s'
- % (smbfs_share, total_size, total_allocated))
+ LOG.info(_LI('Smb share %(share)s Total size %(size)s '
+ 'Total allocated %(allocated)s'),
+ {'share': smbfs_share, 'size': total_size,
+ 'allocated': total_allocated})
return [float(x) for x in return_value]
def _get_total_allocated(self, smbfs_share):
"""Copy data from snapshot to destination volume."""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
- "volume_size: %(size)s" %
+ "volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': snapshot['volume_size']})
from oslo_config import cfg
from oslo_log import log as logging
+import six
from cinder import exception
from cinder.i18n import _, _LI
listen = wt_portal.Listen
except wmi.x_wmi as exc:
err_msg = (_('check_for_setup_error: the state of the WT Portal '
- 'could not be verified. WMI exception: %s'))
- LOG.error(err_msg % exc)
- raise exception.VolumeBackendAPIException(data=err_msg % exc)
+ 'could not be verified. WMI exception: %s')
+ % six.text_type(exc))
+ LOG.error(err_msg)
+ raise exception.VolumeBackendAPIException(data=err_msg)
if not listen:
err_msg = (_('check_for_setup_error: there is no ISCSI traffic '
wt_portal = self._conn_wmi.WT_Portal()[0]
except wmi.x_wmi as exc:
err_msg = (_('get_host_information: the state of the WT Portal '
- 'could not be verified. WMI exception: %s'))
- LOG.error(err_msg % exc)
- raise exception.VolumeBackendAPIException(data=err_msg % exc)
+ 'could not be verified. WMI exception: %s')
+ % six.text_type(exc))
+ LOG.error(err_msg)
+ raise exception.VolumeBackendAPIException(data=err_msg)
(address, port) = (wt_portal.Address, wt_portal.Port)
# Getting the host information
try:
host = hosts[0]
except wmi.x_wmi as exc:
err_msg = (_('get_host_information: the ISCSI target information '
- 'could not be retrieved. WMI exception: %s'))
- LOG.error(err_msg % exc)
+ 'could not be retrieved. WMI exception: %s')
+ % six.text_type(exc))
+ LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
properties = {}
'target name: %(target)s could not be established. '
'WMI exception: %(wmi_exc)s') %
{'init': initiator_name, 'target': target_name,
- 'wmi_exc': exc})
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
except wmi.x_wmi as exc:
err_msg = (_(
'delete_iscsi_target: error when deleting the iscsi target '
- 'associated with target name: %(target)s . '
- 'WMI exception: %(wmi_exc)s') % {'target': target_name,
- 'wmi_exc': exc})
+ 'associated with target name: %(target)s . WMI '
+ 'exception: %(wmi_exc)s') % {'target': target_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
err_msg = (_(
'create_volume: error when creating the volume name: '
'%(vol_name)s . WMI exception: '
- '%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
+ '%(wmi_exc)s') % {'vol_name': vol_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
err_msg = (_("Failed to import disk: %(vhd_path)s. "
"WMI exception: %(exc)s") %
{'vhd_path': vhd_path,
- 'exc': exc})
+ 'exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
err_msg = (_(
'Error changing disk status: '
'%(vol_name)s . WMI exception: '
- '%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
+ '%(wmi_exc)s') % {'vol_name': vol_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
disk = self._conn_wmi.WT_Disk(Description=vol_name)
if not disk:
LOG.debug('Skipping deleting disk %s as it does not '
- 'exist.' % vol_name)
+ 'exist.', vol_name)
return
wt_disk = disk[0]
wt_disk.Delete_()
err_msg = (_(
'delete_volume: error when deleting the volume name: '
'%(vol_name)s . WMI exception: '
- '%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
+ '%(wmi_exc)s') % {'vol_name': vol_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
err_msg = (_(
'create_snapshot: error when creating the snapshot name: '
'%(vol_name)s . WMI exception: '
- '%(wmi_exc)s') % {'vol_name': snapshot_name, 'wmi_exc': exc})
+ '%(wmi_exc)s') % {'vol_name': snapshot_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
except wmi.x_wmi as exc:
err_msg = (_(
'create_volume_from_snapshot: error when creating the volume '
- 'name: %(vol_name)s from snapshot name: %(snap_name)s. '
- 'WMI exception: %(wmi_exc)s') % {'vol_name': vol_name,
- 'snap_name': snap_name,
- 'wmi_exc': exc})
+ 'name: %(vol_name)s from snapshot name: %(snap_name)s. WMI '
+ 'exception: %(wmi_exc)s') % {'vol_name': vol_name,
+ 'snap_name': snap_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
err_msg = (_(
'delete_snapshot: error when deleting the snapshot name: '
'%(snap_name)s . WMI exception: '
- '%(wmi_exc)s') % {'snap_name': snap_name, 'wmi_exc': exc})
+ '%(wmi_exc)s') % {'snap_name': snap_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
err_msg = (_(
'create_iscsi_target: error when creating iscsi target: '
'%(tar_name)s . WMI exception: '
- '%(wmi_exc)s') % {'tar_name': target_name, 'wmi_exc': exc})
+ '%(wmi_exc)s') % {'tar_name': target_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
else:
host = self._conn_wmi.WT_Host(HostName=target_name)
if not host:
LOG.debug('Skipping removing target %s as it does not '
- 'exist.' % target_name)
+ 'exist.', target_name)
return
wt_host = host[0]
wt_host.RemoveAllWTDisks()
err_msg = (_(
'remove_iscsi_target: error when deleting iscsi target: '
'%(tar_name)s . WMI exception: '
- '%(wmi_exc)s') % {'tar_name': target_name, 'wmi_exc': exc})
+ '%(wmi_exc)s') % {'tar_name': target_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
except wmi.x_wmi as exc:
err_msg = (_(
'add_disk_to_target: error adding disk associated to volume : '
- '%(vol_name)s to the target name: %(tar_name)s '
- '. WMI exception: %(wmi_exc)s') % {'tar_name': target_name,
- 'vol_name': vol_name,
- 'wmi_exc': exc})
+ '%(vol_name)s to the target name: %(tar_name)s . WMI '
+ 'exception: %(wmi_exc)s') % {'tar_name': target_name,
+ 'vol_name': vol_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
wt_disk.Extend(additional_size)
except wmi.x_wmi as exc:
err_msg = (_(
- 'extend: error when extending the volume: %(vol_name)s '
- '.WMI exception: %(wmi_exc)s') % {'vol_name': vol_name,
- 'wmi_exc': exc})
+ 'extend: error when extending the volume: %(vol_name)s .WMI '
+ 'exception: %(wmi_exc)s') % {'vol_name': vol_name,
+ 'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
def local_path(self, volume, format=None):
base_vhd_folder = CONF.windows_iscsi_lun_path
if not os.path.exists(base_vhd_folder):
- LOG.debug('Creating folder: %s' % base_vhd_folder)
+ LOG.debug('Creating folder: %s', base_vhd_folder)
os.makedirs(base_vhd_folder)
if not format:
format = self.get_supported_format()
LOG.debug("XIOISEDriver check_for_setup_error called.")
# The san_ip must always be set
if self.configuration.san_ip == "":
- msg = _LE("san ip must be configured!")
- LOG.error(msg)
+ LOG.error(_LE("san ip must be configured!"))
RaiseXIODriverException()
# The san_login must always be set
if self.configuration.san_login == "":
- msg = _LE("san_login must be configured!")
- LOG.error(msg)
+ LOG.error(_LE("san_login must be configured!"))
RaiseXIODriverException()
# The san_password must always be set
if self.configuration.san_password == "":
- msg = _LE("san_password must be configured!")
- LOG.error(msg)
+ LOG.error(_LE("san_password must be configured!"))
RaiseXIODriverException()
return
if status != 200:
# unsuccessful - this is fatal as we need the global id
# to build REST requests.
- msg = _LE("Array query failed - No response (%d)!") % status
- LOG.error(msg)
+ LOG.error(_LE("Array query failed - No response (%d)!"), status)
RaiseXIODriverException()
# Successfully fetched QUERY info. Parse out globalid along with
# ipaddress for Controller 1 and Controller 2. We assign primary
self.configuration.ise_qos = False
capabilities = xml_tree.find('capabilities')
if capabilities is None:
- msg = _LE("Array query failed. No capabilities in response!")
- LOG.error(msg)
+ LOG.error(_LE("Array query failed. No capabilities in response!"))
RaiseXIODriverException()
for node in capabilities:
if node.tag != 'capability':
support['thin-clones'] = True
# Make sure ISE support necessary features
if not support['clones']:
- msg = _LE("ISE FW version is not compatible with Openstack!")
- LOG.error(msg)
+ LOG.error(_LE("ISE FW version is not compatible with Openstack!"))
RaiseXIODriverException()
# set up thin provisioning support
self.configuration.san_thin_provision = support['thin-clones']
# Fill in global id, primary and secondary ip addresses
globalid = xml_tree.find('globalid')
if globalid is None:
- msg = _LE("Array query failed. No global id in XML response!")
- LOG.error(msg)
+ LOG.error(_LE("Array query failed. No global id in XML response!"))
RaiseXIODriverException()
self.ise_globalid = globalid.text
controllers = xml_tree.find('controllers')
if controllers is None:
- msg = _LE("Array query failed. No controllers in response!")
- LOG.error(msg)
+ LOG.error(_LE("Array query failed. No controllers in response!"))
RaiseXIODriverException()
for node in controllers:
if node.tag != 'controller':
# this call will populate globalid
self._send_query()
if self.ise_globalid is None:
- msg = _LE("ISE globalid not set!")
- LOG.error(msg)
+ LOG.error(_LE("ISE globalid not set!"))
RaiseXIODriverException()
return self.ise_globalid
self.ise_primary_ip = self.configuration.san_ip
if self.ise_primary_ip == '':
# No IP - fatal.
- msg = _LE("Primary IP must be set!")
- LOG.error(msg)
+ LOG.error(_LE("Primary IP must be set!"))
RaiseXIODriverException()
return self.ise_primary_ip
def _call_loop(loop_args):
remaining = loop_args['retries']
args = loop_args['args']
- LOG.debug("In call loop (%d) %s", remaining, args)
+ LOG.debug("In call loop (%(remaining)d) %(args)s",
+ {'remaining': remaining, 'args': args})
(remaining, response) = loop_args['func'](args, remaining)
if remaining == 0:
# We are done - let our caller handle response
# successful, the request flag for a new QUERY will be set. The QUERY
# will be sent on next connection attempt to figure out which
# controller is primary in case it has changed.
- LOG.debug("Connect: %s %s %s", method, url, body)
+ LOG.debug("Connect: %(method)s %(url)s %(body)s",
+ {'method': method, 'url': url, 'body': body})
using_secondary = 0
response = {}
response['status'] = 0
if secondary_ip is '':
# if secondary is not setup yet, then assert
# connection on primary and secondary ip failed
- msg = (_LE("Connection to %s failed and no secondary!") %
- primary_ip)
- LOG.error(msg)
+ LOG.error(_LE("Connection to %s failed and no secondary!"),
+ primary_ip)
RaiseXIODriverException()
# swap primary for secondary ip in URL
url = string.replace(url, primary_ip, secondary_ip)
# connection failed on both IPs - break out of the loop
break
# connection on primary and secondary ip failed
- msg = (_LE("Could not connect to %(primary)s or %(secondary)s!") %
- {'primary': primary_ip, 'secondary': secondary_ip})
- LOG.error(msg)
+ LOG.error(_LE("Could not connect to %(primary)s or %(secondary)s!"),
+ {'primary': primary_ip, 'secondary': secondary_ip})
RaiseXIODriverException()
def _param_string(self, params):
resp = self._send_cmd('GET', url, {})
status = resp['status']
if status != 200:
- msg = _LW("IOnetworks GET failed (%d)") % status
- LOG.warning(msg)
+ LOG.warning(_LW("IOnetworks GET failed (%d)"), status)
return chap
# Got a good response. Parse out CHAP info. First check if CHAP is
# enabled and if so parse out username and password.
status = resp['status']
if status != 200:
# Not good. Throw an exception.
- msg = _LE("Controller GET failed (%d)") % status
- LOG.error(msg)
+ LOG.error(_LE("Controller GET failed (%d)"), status)
RaiseXIODriverException()
# Good response. Parse out IQN that matches iscsi_ip_address
# passed in from cinder.conf. IQN is 'hidden' in globalid field.
if target_iqn != '':
return target_iqn
# Did not find a matching IQN. Upsetting.
- msg = _LE("Failed to get IQN!")
- LOG.error(msg)
+ LOG.error(_LE("Failed to get IQN!"))
RaiseXIODriverException()
def find_target_wwns(self):
status = resp['status']
if status != 200:
# Not good. Throw an exception.
- msg = _LE("Controller GET failed (%d)") % status
- LOG.error(msg)
+ LOG.error(_LE("Controller GET failed (%d)"), status)
RaiseXIODriverException()
# Good response. Parse out globalid (WWN) of endpoint that matches
# protocol and type (array).
status = resp['status']
if status != 200:
# Not good. Throw an exception.
- msg = _LE("Failed to get allocation information (%d)!") % status
- LOG.error(msg)
+ LOG.error(_LE("Failed to get allocation information (%d)!"),
+ status)
RaiseXIODriverException()
# Good response. Parse out LUN.
xml_tree = etree.fromstring(resp['content'])
if luntag is not None:
return luntag.text
# Did not find LUN. Throw an exception.
- msg = _LE("Failed to get LUN information!")
- LOG.error(msg)
+ LOG.error(_LE("Failed to get LUN information!"))
RaiseXIODriverException()
def _get_volume_info(self, vol_name):
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, {'name': vol_name})
if resp['status'] != 200:
- msg = (_LW("Could not get status for %(name)s (%(status)d).") %
- {'name': vol_name, 'status': resp['status']})
- LOG.warning(msg)
+ LOG.warning(_LW("Could not get status for %(name)s (%(status)d)."),
+ {'name': vol_name, 'status': resp['status']})
return vol_info
# Good response. Parse down to Volume tag in list of one.
root = etree.fromstring(resp['content'])
volume_node = root.find('volume')
if volume_node is None:
- msg = _LW("No volume node in XML content.")
- LOG.warning(msg)
+ LOG.warning(_LW("No volume node in XML content."))
return vol_info
# Location can be found as an attribute in the volume node tag.
vol_info['location'] = volume_node.attrib['self']
# Find status tag
status = volume_node.find('status')
if status is None:
- msg = _LW("No status payload for volume %s.") % vol_name
- LOG.warning(msg)
+ LOG.warning(_LW("No status payload for volume %s."), vol_name)
return vol_info
# Fill in value and string from status tag attributes.
vol_info['value'] = status.attrib['value']
resp = self._send_cmd('GET', url, {'name': volume['name'],
'hostname': hostname})
if resp['status'] != 200:
- msg = (_LE("Could not GET allocation information (%d)!") %
- resp['status'])
- LOG.error(msg)
+ LOG.error(_LE("Could not GET allocation information (%d)!"),
+ resp['status'])
RaiseXIODriverException()
# Good response. Find the allocation based on volume name.
allocation_tree = etree.fromstring(resp['content'])
if status == 201:
LOG.info(_LI("Volume %s presented."), volume['name'])
elif status == 409:
- msg = (_LW("Volume %(name)s already presented (%(status)d)!") %
- {'name': volume['name'], 'status': status})
- LOG.warning(msg)
+ LOG.warning(_LW("Volume %(name)s already presented (%(status)d)!"),
+ {'name': volume['name'], 'status': status})
else:
- msg = (_LE("Failed to present volume %(name)s (%(status)d)!") %
- {'name': volume['name'], 'status': status})
- LOG.error(msg)
+ LOG.error(_LE("Failed to present volume %(name)s (%(status)d)!"),
+ {'name': volume['name'], 'status': status})
RaiseXIODriverException()
# Fetch LUN. In theory the LUN should be what caller requested.
# We try to use shortcut as location comes back in Location header.
if location != '':
target_lun = self._find_target_lun(location)
# Success. Return target LUN.
- LOG.debug("Volume %s presented: %s %s",
- volume['name'], hostname, target_lun)
+ LOG.debug("Volume %(volume)s presented: %(host)s %(lun)s",
+ {'volume': volume['name'], 'host': hostname,
+ 'lun': target_lun})
return target_lun
def find_allocations(self, hostname):
resp = self._send_cmd('GET', url, {'hostname': hostname})
status = resp['status']
if status != 200:
- msg = (_LE("Failed to get allocation information: "
- "%(host)s (%(status)d)!") %
- {'host': hostname, 'status': status})
- LOG.error(msg)
+ LOG.error(_LE("Failed to get allocation information: "
+ "%(host)s (%(status)d)!"),
+ {'host': hostname, 'status': status})
RaiseXIODriverException()
# Good response. Count the number of allocations.
allocation_tree = etree.fromstring(resp['content'])
resp = self._send_cmd('GET', url, params)
status = resp['status']
if resp['status'] != 200:
- msg = _LE("Could not find any hosts (%s)") % status
- LOG.error(msg)
+ LOG.error(_LE("Could not find any hosts (%s)"), status)
RaiseXIODriverException()
# Good response. Try to match up a host based on end point string.
host_tree = etree.fromstring(resp['content'])
else:
endpoint_str = endpoints
# Log host creation.
- LOG.debug("Create host %s; %s", hostname, endpoint_str)
+ LOG.debug("Create host %(host)s; %(endpoint)s",
+ {'host': hostname, 'endpoint': endpoint_str})
# Issue REST call to create host entry of Openstack type.
params = {}
params = {'name': hostname, 'endpoint': endpoint_str,
resp = self._send_cmd('POST', url, params)
status = resp['status']
if status != 201 and status != 409:
- msg = _LE("POST for host create failed (%s)!") % status
- LOG.error(msg)
+ LOG.error(_LE("POST for host create failed (%s)!"), status)
RaiseXIODriverException()
# Successfully created host entry. Return host name.
return hostname
if vol_info['value'] == '0':
LOG.debug('Source volume %s ready.', volume_name)
else:
- msg = _LE("Source volume %s not ready!") % volume_name
- LOG.error(msg)
+ LOG.error(_LE("Source volume %s not ready!"), volume_name)
RaiseXIODriverException()
# Prepare snapshot
# get extra_specs and qos specs from source volume
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
extra_specs = self._get_extra_specs(ctxt, type_id)
- LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
+ LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s",
+ {'volume_name': volume['name'], 'extra_specs': extra_specs})
qos = self._get_qos_specs(ctxt, type_id)
# Wait until snapshot/clone is prepared.
args['method'] = 'POST'
args, retries)
if resp['status'] != 202:
# clone prepare failed - bummer
- msg = _LE("Prepare clone failed for %s.") % clone['name']
- LOG.error(msg)
+ LOG.error(_LE("Prepare clone failed for %s."), clone['name'])
RaiseXIODriverException()
# clone prepare request accepted
# make sure not to continue until clone prepared
if PREPARED_STATUS in clone_info['details']:
LOG.debug('Clone %s prepared.', clone['name'])
else:
- msg = (_LE("Clone %s not in prepared state!") % clone['name'])
- LOG.error(msg)
+ LOG.error(_LE("Clone %s not in prepared state!"), clone['name'])
RaiseXIODriverException()
# Clone prepared, now commit the create
resp = self._send_cmd('PUT', clone_info['location'],
{clone_type: 'true'})
if resp['status'] != 201:
- msg = (_LE("Commit clone failed: %(name)s (%(status)d)!") %
- {'name': clone['name'], 'status': resp['status']})
- LOG.error(msg)
+ LOG.error(_LE("Commit clone failed: %(name)s (%(status)d)!"),
+ {'name': clone['name'], 'status': resp['status']})
RaiseXIODriverException()
# Clone create request accepted. Make sure not to return until clone
# operational.
clone_info = self._wait_for_completion(self._help_wait_for_status,
args, retries)
if OPERATIONAL_STATUS in clone_info['string']:
- msg = _LI("Clone %s created."), clone['name']
- LOG.info(msg)
+ LOG.info(_LI("Clone %s created."), clone['name'])
else:
- msg = _LE("Commit failed for %s!") % clone['name']
- LOG.error(msg)
+ LOG.error(_LE("Commit failed for %s!"), clone['name'])
RaiseXIODriverException()
return
status = resp['status']
if status != 200:
# Request failed. Return what we have, which isn't much.
- msg = _LW("Could not get pool information (%s)!") % status
- LOG.warning(msg)
+ LOG.warning(_LW("Could not get pool information (%s)!"), status)
return (pools, vol_cnt)
# Parse out available (free) and used. Add them up to get total.
xml_tree = etree.fromstring(resp['content'])
"""Get volume stats."""
if refresh:
self._vol_stats = self._update_volume_stats()
- LOG.debug("ISE get_volume_stats (total, free): %s, %s",
- self._vol_stats['total_capacity_gb'],
- self._vol_stats['free_capacity_gb'])
+ LOG.debug("ISE get_volume_stats (total, free): %(total)s, %(free)s",
+ {'total': self._vol_stats['total_capacity_gb'],
+ 'free': self._vol_stats['free_capacity_gb']})
return self._vol_stats
def _get_extra_specs(self, ctxt, type_id):
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
extra_specs = self._get_extra_specs(ctxt, type_id)
- LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
+ LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s",
+ {'volume_name': volume['name'], 'extra_specs': extra_specs})
qos = self._get_qos_specs(ctxt, type_id)
# Make create call
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if resp['status'] != 201:
- msg = (_LE("Failed to create volume: %(name)s (%(status)s)") %
- {'name': volume['name'], 'status': resp['status']})
- LOG.error(msg)
+ LOG.error(_LE("Failed to create volume: %(name)s (%(status)s)"),
+ {'name': volume['name'], 'status': resp['status']})
RaiseXIODriverException()
# Good response. Make sure volume is in operational state before
# returning. Volume creation completes asynchronously.
args, retries)
if OPERATIONAL_STATUS in vol_info['string']:
# Ready.
- msg = _LI("Volume %s created"), volume['name']
- LOG.info(msg)
+ LOG.info(_LI("Volume %s created"), volume['name'])
else:
- msg = _LE("Failed to create volume %s.") % volume['name']
- LOG.error(msg)
+ LOG.error(_LE("Failed to create volume %s."), volume['name'])
RaiseXIODriverException()
return
# in response. Used for DELETE call below.
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
- msg = _LW("Delete volume: %s not found!") % volume['name']
- LOG.warning(msg)
+ LOG.warning(_LW("Delete volume: %s not found!"), volume['name'])
return
# Make DELETE call.
args = {}
retries = self.configuration.ise_completion_retries
resp = self._wait_for_completion(self._help_call_method, args, retries)
if resp['status'] == 204:
- msg = (_LI("Volume %s deleted."), volume['name'])
- LOG.info(msg)
+ LOG.info(_LI("Volume %s deleted."), volume['name'])
return
def delete_volume(self, volume):
# in response. Used for PUT call below.
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
- msg = _LE("modify volume: %s does not exist!") % volume['name']
- LOG.error(msg)
+ LOG.error(_LE("modify volume: %s does not exist!"), volume['name'])
RaiseXIODriverException()
# Make modify volume REST call using PUT.
# Location from above is used as identifier.
if status == 201:
LOG.debug("Volume %s modified.", volume['name'])
return True
- msg = (_LE("Modify volume PUT failed: %(name)s (%(status)d).") %
- {'name': volume['name'], 'status': status})
- LOG.error(msg)
+ LOG.error(_LE("Modify volume PUT failed: %(name)s (%(status)d)."),
+ {'name': volume['name'], 'status': status})
RaiseXIODriverException()
def extend_volume(self, volume, new_size):
LOG.debug("extend_volume called")
ret = self._modify_volume(volume, {'size': new_size})
if ret is True:
- msg = (_LI("volume %(name)s extended to %(size)d."),
- {'name': volume['name'], 'size': new_size})
- LOG.info(msg)
+ LOG.info(_LI("volume %(name)s extended to %(size)d."),
+ {'name': volume['name'], 'size': new_size})
return
def retype(self, ctxt, volume, new_type, diff, host):
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if ret is True:
- msg = _LI("Volume %s retyped."), volume['name']
- LOG.info(msg)
+ LOG.info(_LI("Volume %s retyped."), volume['name'])
return True
def manage_existing(self, volume, ise_volume_ref):
"""Convert an existing ISE volume to a Cinder volume."""
LOG.debug("X-IO manage_existing called")
if 'source-name' not in ise_volume_ref:
- msg = _LE("manage_existing: No source-name in ref!")
- LOG.error(msg)
+ LOG.error(_LE("manage_existing: No source-name in ref!"))
RaiseXIODriverException()
# copy the source-name to 'name' for modify volume use
ise_volume_ref['name'] = ise_volume_ref['source-name']
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if ret is True:
- msg = _LI("Volume %s converted."), ise_volume_ref['name']
- LOG.info(msg)
+ LOG.info(_LI("Volume %s converted."), ise_volume_ref['name'])
return ret
def manage_existing_get_size(self, volume, ise_volume_ref):
"""Get size of an existing ISE volume."""
LOG.debug("X-IO manage_existing_get_size called")
if 'source-name' not in ise_volume_ref:
- msg = _LE("manage_existing_get_size: No source-name in ref!")
- LOG.error(msg)
+ LOG.error(_LE("manage_existing_get_size: No source-name in ref!"))
RaiseXIODriverException()
ref_name = ise_volume_ref['source-name']
# get volume status including size
vol_info = self._get_volume_info(ref_name)
if vol_info['location'] == '':
- msg = (_LE("manage_existing_get_size: %s does not exist!") %
- ref_name)
- LOG.error(msg)
+ LOG.error(_LE("manage_existing_get_size: %s does not exist!"),
+ ref_name)
RaiseXIODriverException()
return int(vol_info['size'])
LOG.debug("X-IO unmanage called")
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
- msg = _LE("unmanage: Volume %s does not exist!") % volume['name']
- LOG.error(msg)
+ LOG.error(_LE("unmanage: Volume %s does not exist!"),
+ volume['name'])
RaiseXIODriverException()
# This is a noop. ISE does not store any Cinder specific information.
host = self._find_host(endpoints)
if host['name'] == '':
# host still not found, this is fatal.
- msg = _LE("Host could not be found!")
- LOG.error(msg)
+ LOG.error(_LE("Host could not be found!"))
RaiseXIODriverException()
elif string.upper(host['type']) != 'OPENSTACK':
# Make sure host type is marked as Openstack host
resp = self._send_cmd('PUT', host['locator'], params)
status = resp['status']
if status != 201 and status != 409:
- msg = _LE("Host PUT failed (%s).") % status
- LOG.error(msg)
+ LOG.error(_LE("Host PUT failed (%s)."), status)
RaiseXIODriverException()
# We have a host object.
target_lun = ''
# The iscsi_ip_address must always be set.
if self.configuration.iscsi_ip_address == '':
- err_msg = _LE("iscsi_ip_address must be set!")
- LOG.error(err_msg)
+ LOG.error(_LE("iscsi_ip_address must be set!"))
RaiseXIODriverException()
# Setup common driver
self.driver = XIOISEDriver(configuration=self.configuration)
self.status = self.error.code
self.data = httplib.responses[self.status]
- LOG.debug('Response code: %s' % self.status)
- LOG.debug('Response data: %s' % self.data)
+ LOG.debug('Response code: %s', self.status)
+ LOG.debug('Response data: %s', self.data)
def get_header(self, name):
"""Get an HTTP header with the given name from the results
self.headers['x-auth-session'] = \
result.get_header('x-auth-session')
self.do_logout = True
- LOG.info(_LI('ZFSSA version: %s') %
+ LOG.info(_LI('ZFSSA version: %s'),
result.get_header('x-zfssa-version'))
elif result.status == httplib.NOT_FOUND:
retry = 0
response = None
- LOG.debug('Request: %s %s' % (request, zfssaurl))
- LOG.debug('Out headers: %s' % out_hdrs)
+ LOG.debug('Request: %s %s', (request, zfssaurl))
+ LOG.debug('Out headers: %s', out_hdrs)
if body and body != '':
- LOG.debug('Body: %s' % body)
+ LOG.debug('Body: %s', body)
while retry < maxreqretries:
try:
response = urllib2.urlopen(req, timeout=self.timeout)
except urllib2.HTTPError as err:
if err.code == httplib.NOT_FOUND:
- LOG.debug('REST Not Found: %s' % err.code)
+ LOG.debug('REST Not Found: %s', err.code)
else:
- LOG.error(_LE('REST Not Available: %s') % err.code)
+ LOG.error(_LE('REST Not Available: %s'), err.code)
if err.code == httplib.SERVICE_UNAVAILABLE and \
retry < maxreqretries:
retry += 1
time.sleep(1)
- LOG.error(_LE('Server Busy retry request: %s') % retry)
+ LOG.error(_LE('Server Busy retry request: %s'), retry)
continue
if (err.code == httplib.UNAUTHORIZED or
err.code == httplib.INTERNAL_SERVER_ERROR) and \
'/access/v1' not in zfssaurl:
try:
- LOG.error(_LE('Authorizing request: '
- '%(zfssaurl)s'
- 'retry: %(retry)d .')
- % {'zfssaurl': zfssaurl,
- 'retry': retry})
+ LOG.error(_LE('Authorizing request: %(zfssaurl)s '
+ 'retry: %(retry)d .'),
+ {'zfssaurl': zfssaurl, 'retry': retry})
self._authorize()
req.add_header('x-auth-session',
self.headers['x-auth-session'])
return RestResult(err=err)
except urllib2.URLError as err:
- LOG.error(_LE('URLError: %s') % err.reason)
+ LOG.error(_LE('URLError: %s'), err.reason)
raise RestClientError(-1, name="ERR_URLError",
message=err.reason)
request.get_method = lambda: method
- LOG.debug('Sending WebDAV request:%s %s %s' % (method, src_url,
- dst_url))
+ LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s',
+ {'method': method, 'src': src_url, 'des': dst_url})
while retry < maxretries:
try:
response = urllib2.urlopen(request, timeout=None)
except urllib2.HTTPError as err:
LOG.error(_LE('WebDAV returned with %(code)s error during '
- '%(method)s call.')
- % {'code': err.code,
- 'method': method})
+ '%(method)s call.'),
+ {'code': err.code, 'method': method})
if err.code == httplib.INTERNAL_SERVER_ERROR:
- exception_msg = (_('WebDAV operation failed with '
- 'error code: %(code)s '
- 'reason: %(reason)s '
- 'Retry attempt %(retry)s in progress.')
- % {'code': err.code,
- 'reason': err.reason,
- 'retry': retry})
- LOG.error(exception_msg)
+ LOG.error(_LE('WebDAV operation failed with error code: '
+ '%(code)s reason: %(reason)s Retry attempt '
+ '%(retry)s in progress.'),
+ {'code': err.code,
+ 'reason': err.reason,
+ 'retry': retry})
if retry < maxretries:
retry += 1
time.sleep(1)
https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
'/shares' + mountpoint
- LOG.debug('NFS mount path: %s' % self.mount_path)
- LOG.debug('WebDAV path to the share: %s' % https_path)
+ LOG.debug('NFS mount path: %s', self.mount_path)
+ LOG.debug('WebDAV path to the share: %s', https_path)
self.shares = {}
mnt_opts = self.configuration.zfssa_nfs_mount_options
try:
self._ensure_share_mounted(self.mount_path)
except Exception as exc:
- LOG.error(_LE('Exception during mounting %s.') % exc)
+ LOG.error(_LE('Exception during mounting %s.'), exc)
self._mounted_shares = [self.mount_path]
- LOG.debug('Available shares %s' % self._mounted_shares)
+ LOG.debug('Available shares %s', self._mounted_shares)
def check_for_setup_error(self):
"""Check that driver can login.
snapshot['name'])
except Exception:
with excutils.save_and_reraise_exception():
- LOG.debug('Error thrown during snapshot: %s creation' %
+ LOG.debug('Error thrown during snapshot: %s creation',
snapshot['name'])
finally:
self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool,
self.extend_volume(volume, volume['size'])
except Exception:
vol_path = self.local_path(volume)
- exception_msg = (_('Error in extending volume size: '
- 'Volume: %(volume)s '
- 'Vol_Size: %(vol_size)d with '
- 'Snapshot: %(snapshot)s '
- 'Snap_Size: %(snap_size)d')
- % {'volume': volume['name'],
- 'vol_size': volume['size'],
- 'snapshot': snapshot['name'],
- 'snap_size': snapshot['volume_size']})
with excutils.save_and_reraise_exception():
- LOG.error(exception_msg)
+ LOG.error(_LE('Error in extending volume size: Volume: '
+ '%(volume)s Vol_Size: %(vol_size)d with '
+ 'Snapshot: %(snapshot)s Snap_Size: '
+ '%(snap_size)d'),
+ {'volume': volume['name'],
+ 'vol_size': volume['size'],
+ 'snapshot': snapshot['name'],
+ 'snap_size': snapshot['volume_size']})
self._execute('rm', '-f', vol_path, run_as_root=True)
return {'provider_location': volume['provider_location']}
val = json.loads(ret.data)
if not self._is_pool_owned(val):
- exception_msg = (_('Error Pool ownership: '
- 'Pool %(pool)s is not owned '
- 'by %(host)s.')
- % {'pool': pool,
- 'host': self.host})
- LOG.error(exception_msg)
+ LOG.error(_LE('Error Pool ownership: Pool %(pool)s is not owned '
+ 'by %(host)s.'),
+ {'pool': pool, 'host': self.host})
raise exception.InvalidInput(reason=pool)
avail = val['pool']['usage']['available']
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
- exception_msg = (_('Error Setting '
- 'Volume: %(lun)s to '
- 'InitiatorGroup: %(initiatorgroup)s '
- 'Pool: %(pool)s '
- 'Project: %(project)s '
- 'Return code: %(ret.status)d '
- 'Message: %(ret.data)s.')
- % {'lun': lun,
- 'initiatorgroup': initiatorgroup,
- 'pool': pool,
- 'project': project,
- 'ret.status': ret.status,
- 'ret.data': ret.data})
- LOG.error(exception_msg)
+ LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: '
+ '%(initiatorgroup)s Pool: %(pool)s Project: '
+ '%(project)s Return code: %(ret.status)d Message: '
+ '%(ret.data)s.'),
+ {'lun': lun,
+ 'initiatorgroup': initiatorgroup,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
def delete_lun(self, pool, project, lun):
"""delete iscsi lun."""
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
- exception_msg = (_('Error Deleting '
- 'Volume: %(lun)s to '
- 'Pool: %(pool)s '
- 'Project: %(project)s '
- 'Return code: %(ret.status)d '
- 'Message: %(ret.data)s.')
- % {'lun': lun,
- 'pool': pool,
- 'project': project,
- 'ret.status': ret.status,
- 'ret.data': ret.data})
- LOG.error(exception_msg)
+ LOG.error(_LE('Error Deleting Volume: %(lun)s to Pool: %(pool)s '
+ 'Project: %(project)s Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.'),
+ {'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
def create_snapshot(self, pool, project, lun, snapshot):
"""create snapshot."""
svc = "/api/san/v1/iscsi/initiator-groups"
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
- LOG.error(_LE('Error getting initiator groups.'))
- exception_msg = (_('Error getting initiator groups.'))
- raise exception.VolumeBackendAPIException(data=exception_msg)
+ msg = _('Error getting initiator groups.')
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
val = json.loads(ret.data)
for initiator_group in val['groups']:
if initiator in initiator_group['initiators']:
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
- LOG.debug('%s service state: %s' % (service, data))
+ LOG.debug('%(service)s service state: %(data)s',
+ {'service': service, 'data': data})
status = 'online' if state == 'enable' else 'disabled'
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('Modify %(service)s service '
- 'return data: %(data)s'
- % {'service': service,
- 'data': data})
+ 'return data: %(data)s',
+ {'service': service,
+ 'data': data})
def create_share(self, pool, project, share, args):
"""Create a share in the specified pool and project"""
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
- LOG.debug("Cinder Volume DB check: vol_db_empty=%s" % vol_db_empty)
+ LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
self.driver = importutils.import_object(
volume_driver,
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error("Invalid JSON: %s" %
+ LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
def _add_to_threadpool(self, func, *args, **kwargs):
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
- except Exception as err:
- LOG.error(_LE('Fetch volume pool name failed.'),
- resource=volume)
- LOG.exception(err)
+ except Exception:
+ LOG.exception(_LE('Fetch volume pool name failed.'),
+ resource=volume)
return
if pool:
ctxt = context.get_admin_context()
- LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
+ LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
- except Exception as ex:
- LOG.error(_LE("Failed to initialize driver."),
- resource={'type': 'driver',
- 'id': self.__class__.__name__})
- LOG.exception(ex)
+ except Exception:
+ LOG.exception(_LE("Failed to initialize driver."),
+ resource={'type': 'driver',
+ 'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
- except Exception as export_ex:
- LOG.error(_LE("Failed to re-export volume, "
- "setting to ERROR."),
- resource=volume)
- LOG.exception(export_ex)
+ except Exception:
+ LOG.exception(_LE("Failed to re-export volume, "
+ "setting to ERROR."),
+ resource=volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
self.db.snapshot_update(ctxt,
snapshot['id'],
{'status': 'error'})
- except Exception as ex:
- LOG.error(_LE("Error during re-export on driver init."),
- resource=volume)
- LOG.exception(ex)
+ except Exception:
+ LOG.exception(_LE("Error during re-export on driver init."),
+ resource=volume)
return
self.driver.set_throttle()
cgsnapshot_id=cgsnapshot_id)
except Exception:
msg = _("Create manager volume flow failed.")
- LOG.exception((msg),
- resource={'type': 'volume',
- 'id': volume_id})
+ LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
if snapshot_id is not None:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
- if (vol_utils.extract_host(volume_ref['host']) != self.host):
+ if vol_utils.extract_host(volume_ref['host']) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = 'error'
snapshot.save(context)
- raise exception.MetadataCopyFailure(reason=ex)
+ raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = 'available'
snapshot.progress = '100%'
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
- msg = _("being attached by different mode")
- raise exception.InvalidVolume(reason=msg)
+ raise exception.InvalidVolume(
+ reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
- msg = _("volume is already attached")
- raise exception.InvalidVolume(reason=msg)
+ raise exception.InvalidVolume(
+ reason=_("volume is already attached"))
attachment = None
host_name_sanitized = utils.sanitize_hostname(
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
- raise exception.RemoveExportException(volume=volume_id, reason=ex)
+ raise exception.RemoveExportException(volume=volume_id,
+ reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
- raise exception.InvalidInput(reason=err)
+ raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
- "(error: %(err))."), {'err': six.text_type(err)})
+ "(error: %(err)).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
- raise exception.ExportFailure(reason=ex)
+ raise exception.ExportFailure(reason=six.text_type(ex))
initiator_data = self._get_driver_initiator_data(context, connector)
try:
connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
- "(error: %(err)s)."), {'err': six.text_type(err)})
+ "(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
- msg = _LE("Failed to copy volume %(vol1)s to %(vol2)s")
- LOG.error(msg, {'vol1': volume['id'],
- 'vol2': new_volume['id']})
+ LOG.error(_LE("Failed to copy volume %(vol1)s to %(vol2)s"),
+ {'vol1': volume['id'], 'vol2': new_volume['id']})
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'])
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
- msg = _("migrate_volume_completion: completing migration for "
- "volume %(vol1)s (temporary volume %(vol2)s")
- LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
+ LOG.debug("migrate_volume_completion: completing migration for "
+ "volume %(vol1)s (temporary volume %(vol2)s",
+ {'vol1': volume_id, 'vol2': new_volume_id})
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = self._get_original_status(volume)
if error:
- msg = _("migrate_volume_completion is cleaning up an error "
- "for volume %(vol1)s (temporary volume %(vol2)s")
- LOG.info(msg % {'vol1': volume['id'],
- 'vol2': new_volume['id']})
+ LOG.info(_LI("migrate_volume_completion is cleaning up an error "
+ "for volume %(vol1)s (temporary volume %(vol2)s"),
+ {'vol1': volume['id'], 'vol2': new_volume['id']})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': None, 'status': orig_volume_status}
self.db.volume_update(ctxt, volume_id, updates)
self.detach_volume(ctxt, volume_id, attachment['id'])
self.delete_volume(ctxt, volume_id)
except Exception as ex:
- msg = _LE("Delete migration source volume failed: %(err)s")
- LOG.error(msg, {'err': six.text_type(ex)}, resource=volume)
+ LOG.error(_LE("Delete migration source volume failed: %(err)s"),
+ {'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# Note this needs to go through rpc to the host of the new volume
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
- except Exception as ex:
+ except Exception:
retyped = False
- LOG.error(_LE("Volume %s: driver error when trying to retype, "
- "falling back to generic mechanism."),
- volume_ref['id'])
- LOG.exception(ex)
+ LOG.exception(_LE("Volume %s: driver error when trying to "
+ "retype, falling back to generic "
+ "mechanism."), volume_ref['id'])
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
volume_id,
ref)
except Exception:
- LOG.exception(_LE("Failed to create manage_existing flow."),
- resource={'type': 'volume',
- 'id': volume_id})
- raise exception.CinderException(
- _("Failed to create manage existing flow."))
+ msg = _("Failed to create manage_existing flow.")
+ LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
+ raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
- " %(snapshot_id)s metadata.") %
+ " %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
- raise exception.MetadataCopyFailure(reason=ex)
+ raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
- " %(volume_id)s metadata") %
+ " %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
self.db.snapshot_update(context,
snapshot['id'],
{'status': 'error'})
- raise exception.MetadataCopyFailure(reason=ex)
+ raise exception.MetadataCopyFailure(
+ reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'], {'status': 'available',