import requests
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume.drivers.san import san
results = self._issue_api_request('cluster')
if 'uuid' not in results:
- LOG.error(_('Failed to get updated stats from Datera Cluster.'))
+ LOG.error(_LE('Failed to get updated stats from Datera Cluster.'))
backend_name = self.configuration.safe_get('volume_backend_name')
stats = {
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.drivers.emc import emc_vmax_provision
from cinder.volume.drivers.emc import emc_vmax_utils
if len(storageTierInstanceNames) == 0:
storageTierInstanceNames = None
- LOG.warn(_("Unable to get storage tiers from tier policy rule "))
+ LOG.warn(_LW("Unable to get storage tiers "
+ "from tier policy rule "))
return storageTierInstanceNames
storageGroupInstanceName, tierPolicyRuleInstanceName,
storageGroupName, fastPolicyName)
except Exception as ex:
- LOG.error(_("Exception: %s") % six.text_type(ex))
+ LOG.error(_LE("Exception: %s") % six.text_type(ex))
errorMessage = (_(
"Failed to add storage group %(storageGroupInstanceName)s "
" to tier policy rule %(tierPolicyRuleInstanceName)s")
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
- LOG.error(_("Error disassociating storage group from "
+ LOG.error(_LE("Error disassociating storage group from "
"policy: %s") % errordesc)
else:
LOG.debug("Disassociated storage group from policy %s")
else:
LOG.debug("ModifyStorageTierPolicyRule completed")
except Exception as e:
- LOG.info(_("Storage group not associated with the policy %s")
+ LOG.info(_LI("Storage group not associated with the policy %s")
% six.text_type(e))
def get_pool_associated_to_policy(
isTieringPolicySupported = self.is_tiering_policy_enabled(
conn, tierPolicyServiceInstanceName)
except Exception as e:
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
return False
return isTieringPolicySupported
from cinder import context
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume import volume_types
def __init__(self, prtcl):
if not pywbemAvailable:
- LOG.info(_(
+ LOG.info(_LI(
'Module PyWBEM not installed. '
'Install PyWBEM using the python-pywbem package.'))
self.protocol = prtcl
if self._is_job_finished(conn, job):
raise loopingcall.LoopingCallDone()
if self.retries > JOB_RETRIES:
- LOG.error(_("_wait_for_job_complete failed after %(retries)d "
- "tries") % {'retries': self.retries})
+ LOG.error(_LE("_wait_for_job_complete "
+ "failed after %(retries)d "
+ "tries") % {'retries': self.retries})
raise loopingcall.LoopingCallDone()
try:
if self._is_job_finished(conn, job):
self.wait_for_job_called = True
except Exception as e:
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
exceptionMessage = (_("Issue encountered waiting for job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage)
if self._is_sync_complete(conn, syncName):
raise loopingcall.LoopingCallDone()
if self.retries > JOB_RETRIES:
- LOG.error(_("_wait_for_sync failed after %(retries)d tries")
+ LOG.error(_LE("_wait_for_sync failed after %(retries)d tries")
% {'retries': self.retries})
raise loopingcall.LoopingCallDone()
try:
if self._is_sync_complete(conn, syncName):
self.wait_for_sync_called = True
except Exception as e:
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
exceptionMessage = (_("Issue encountered waiting for "
"synchronization."))
LOG.error(exceptionMessage)
% {'fileName': fileName,
'fastPolicyName': fastPolicyName})
else:
- LOG.info(_("Fast Policy not found."))
+ LOG.info(_LI("Fast Policy not found."))
return fastPolicyName
def parse_array_name_from_file(self, fileName):
from cinder import exception
from cinder.exception import EMCVnxCLICmdError
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import jsonutils as json
from cinder.openstack.common import lockutils
self.primary_storage_ip = self.active_storage_ip
self.secondary_storage_ip = configuration.san_secondary_ip
if self.secondary_storage_ip == self.primary_storage_ip:
- LOG.warn(_("san_secondary_ip is configured as "
- "the same value as san_ip."))
+ LOG.warn(_LE("san_secondary_ip is configured as "
+ "the same value as san_ip."))
self.secondary_storage_ip = None
if not configuration.san_ip:
err_msg = _('san_ip: Mandatory field configuration. '
# if there is security file path provided, use this security file
if storage_vnx_security_file:
self.credentials = ('-secfilepath', storage_vnx_security_file)
- LOG.info(_("Using security file in %s for authentication") %
+ LOG.info(_LI("Using security file in %s for authentication") %
storage_vnx_security_file)
# if there is a username/password provided, use those in the cmd line
elif storage_username is not None and len(storage_username) > 0 and\
self.credentials = ('-user', storage_username,
'-password', storage_password,
'-scope', storage_auth_type)
- LOG.info(_("Plain text credentials are being used for "
- "authentication"))
+ LOG.info(_LI("Plain text credentials are being used for "
+ "authentication"))
else:
- LOG.info(_("Neither security file nor plain "
- "text credentials are specified. Security file under "
- "home directory will be used for authentication "
- "if present."))
+ LOG.info(_LI("Neither security file nor plain "
+ "text credentials are specified. Security file under "
+ "home directory will be used for authentication "
+ "if present."))
self.iscsi_initiator_map = None
if configuration.iscsi_initiators:
self.iscsi_initiator_map = \
json.loads(configuration.iscsi_initiators)
- LOG.info(_("iscsi_initiators: %s"), self.iscsi_initiator_map)
+ LOG.info(_LI("iscsi_initiators: %s"), self.iscsi_initiator_map)
# extra spec constants
self.pool_spec = 'storagetype:pool'
except EMCVnxCLICmdError as ex:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
- LOG.error(_("Error on enable compression on lun %s.")
+ LOG.error(_LE("Error on enable compression on lun %s.")
% six.text_type(ex))
# handle consistency group
except EMCVnxCLICmdError as ex:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
- LOG.error(_("Error on adding lun to consistency"
- " group. %s") % six.text_type(ex))
+ LOG.error(_LE("Error on adding lun to consistency"
+ " group. %s") % six.text_type(ex))
return data
@log_enter_exit
if rc != 0:
# Ignore the error that due to retry
if rc == 4 and out.find('(0x712d8d04)') >= 0:
- LOG.warn(_('LUN already exists, LUN name %(name)s. '
- 'Message: %(msg)s') %
+ LOG.warn(_LW('LUN already exists, LUN name %(name)s. '
+ 'Message: %(msg)s') %
{'name': name, 'msg': out})
else:
raise EMCVnxCLICmdError(cmd, rc, out)
if rc != 0:
# Ignore the error that due to retry
if rc == 9 and out.find("not exist") >= 0:
- LOG.warn(_("LUN is already deleted, LUN name %(name)s. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("LUN is already deleted, LUN name %(name)s. "
+ "Message: %(msg)s") %
{'name': name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_delete_lun, rc, out)
if rc != 0:
# Ignore the error that due to retry
if rc == 4 and out.find("(0x712d8e04)") >= 0:
- LOG.warn(_("LUN %(name)s is already expanded. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("LUN %(name)s is already expanded. "
+ "Message: %(msg)s") %
{'name': name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_expand_lun, rc, out)
# Ignore the error if consistency group already exists
if (rc == 33 and
out.find("(0x716d8021)") >= 0):
- LOG.warn(_('Consistency group %(name)s already '
- 'exists. Message: %(msg)s') %
+ LOG.warn(_LW('Consistency group %(name)s already '
+ 'exists. Message: %(msg)s') %
{'name': cg_name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_create_cg, rc, out)
if rc != 0:
# Ignore the error if CG doesn't exist
if rc == 13 and out.find(self.CLI_RESP_PATTERN_CG_NOT_FOUND) >= 0:
- LOG.warn(_("CG %(cg_name)s does not exist. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("CG %(cg_name)s does not exist. "
+ "Message: %(msg)s") %
{'cg_name': cg_name, 'msg': out})
elif rc == 1 and out.find("0x712d8801") >= 0:
- LOG.warn(_("CG %(cg_name)s is deleting. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("CG %(cg_name)s is deleting. "
+ "Message: %(msg)s") %
{'cg_name': cg_name, 'msg': out})
else:
raise EMCVnxCLICmdError(delete_cg_cmd, rc, out)
else:
- LOG.info(_('Consistency group %s was deleted '
- 'successfully.') % cg_name)
+ LOG.info(_LI('Consistency group %s was deleted '
+ 'successfully.') % cg_name)
@log_enter_exit
def create_cgsnapshot(self, cgsnapshot):
# Ignore the error if cgsnapshot already exists
if (rc == 5 and
out.find("(0x716d8005)") >= 0):
- LOG.warn(_('Cgsnapshot name %(name)s already '
- 'exists. Message: %(msg)s') %
+ LOG.warn(_LW('Cgsnapshot name %(name)s already '
+ 'exists. Message: %(msg)s') %
{'name': snap_name, 'msg': out})
else:
raise EMCVnxCLICmdError(create_cg_snap_cmd, rc, out)
# Ignore the error if cgsnapshot does not exist.
if (rc == 5 and
out.find(self.CLI_RESP_PATTERN_SNAP_NOT_FOUND) >= 0):
- LOG.warn(_('Snapshot %(name)s for consistency group '
- 'does not exist. Message: %(msg)s') %
+ LOG.warn(_LW('Snapshot %(name)s for consistency group '
+ 'does not exist. Message: %(msg)s') %
{'name': snap_name, 'msg': out})
else:
raise EMCVnxCLICmdError(delete_cg_snap_cmd, rc, out)
# Ignore the error that due to retry
if (rc == 5 and
out.find("(0x716d8005)") >= 0):
- LOG.warn(_('Snapshot %(name)s already exists. '
- 'Message: %(msg)s') %
+ LOG.warn(_LW('Snapshot %(name)s already exists. '
+ 'Message: %(msg)s') %
{'name': name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_create_snapshot, rc, out)
if rc != 0:
# Ignore the error that due to retry
if rc == 5 and out.find("not exist") >= 0:
- LOG.warn(_("Snapshot %(name)s may deleted already. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("Snapshot %(name)s may deleted already. "
+ "Message: %(msg)s") %
{'name': name, 'msg': out})
return True
# The snapshot cannot be destroyed because it is
# attached to a snapshot mount point. Wait
elif rc == 3 and out.find("(0x716d8003)") >= 0:
- LOG.warn(_("Snapshot %(name)s is in use, retry. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("Snapshot %(name)s is in use, retry. "
+ "Message: %(msg)s") %
{'name': name, 'msg': out})
return False
else:
raise EMCVnxCLICmdError(command_delete_snapshot, rc, out)
else:
- LOG.info(_('Snapshot %s was deleted successfully.') %
+ LOG.info(_LI('Snapshot %s was deleted successfully.') %
name)
return True
if rc != 0:
# Ignore the error that due to retry
if rc == 4 and out.find("(0x712d8d04)") >= 0:
- LOG.warn(_("Mount point %(name)s already exists. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("Mount point %(name)s already exists. "
+ "Message: %(msg)s") %
{'name': name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_create_mount_point, rc, out)
if rc != 0:
# Ignore the error that due to retry
if rc == 85 and out.find('(0x716d8055)') >= 0:
- LOG.warn(_("Snapshot %(snapname)s is attached to snapshot "
- "mount point %(mpname)s already. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("Snapshot %(snapname)s is attached to snapshot "
+ "mount point %(mpname)s already. "
+ "Message: %(msg)s") %
{'snapname': snapshot_name,
'mpname': name,
'msg': out})
except EMCVnxCLICmdError as ex:
migration_succeed = False
if self._is_sp_unavailable_error(ex.out):
- LOG.warn(_("Migration command may get network timeout. "
- "Double check whether migration in fact "
- "started successfully. Message: %(msg)s") %
+ LOG.warn(_LW("Migration command may get network timeout. "
+ "Double check whether migration in fact "
+ "started successfully. Message: %(msg)s") %
{'msg': ex.out})
command_migrate_list = ('migrate', '-list',
'-source', src_id)
migration_succeed = True
if not migration_succeed:
- LOG.warn(_("Start migration failed. Message: %s") %
+ LOG.warn(_LW("Start migration failed. Message: %s") %
ex.out)
LOG.debug("Delete temp LUN after migration "
"start failed. LUN: %s" % dst_name)
if rc != 0:
# Ignore the error that due to retry
if rc == 66 and out.find("name already in use") >= 0:
- LOG.warn(_('Storage group %(name)s already exists. '
- 'Message: %(msg)s') %
+ LOG.warn(_LW('Storage group %(name)s already exists. '
+ 'Message: %(msg)s') %
{'name': name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_create_storage_group, rc, out)
# Ignore the error that due to retry
if rc == 83 and out.find("group name or UID does not "
"match any storage groups") >= 0:
- LOG.warn(_("Storage group %(name)s doesn't exist, "
- "may have already been deleted. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("Storage group %(name)s doesn't exist, "
+ "may have already been deleted. "
+ "Message: %(msg)s") %
{'name': name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_delete_storage_group, rc, out)
if rc == 116 and \
re.search("host is not.*connected to.*storage group",
out) is not None:
- LOG.warn(_("Host %(host)s has already disconnected from "
- "storage group %(sgname)s. Message: %(msg)s") %
+ LOG.warn(_LW("Host %(host)s has already disconnected from "
+ "storage group %(sgname)s. Message: %(msg)s") %
{'host': hostname, 'sgname': sg_name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_host_disconnect, rc, out)
if rc == 66 and \
re.search("LUN.*already.*added to.*Storage Group",
out) is not None:
- LOG.warn(_("LUN %(lun)s has already added to "
- "Storage Group %(sgname)s. "
- "Message: %(msg)s") %
+ LOG.warn(_LW("LUN %(lun)s has already added to "
+ "Storage Group %(sgname)s. "
+ "Message: %(msg)s") %
{'lun': alu, 'sgname': sg_name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_add_hlu, rc, out)
# Ignore the error that due to retry
if rc == 66 and\
out.find("No such Host LUN in this Storage Group") >= 0:
- LOG.warn(_("HLU %(hlu)s has already been removed from "
- "%(sgname)s. Message: %(msg)s") %
+ LOG.warn(_LW("HLU %(hlu)s has already been removed from "
+ "%(sgname)s. Message: %(msg)s") %
{'hlu': hlu, 'sgname': sg_name, 'msg': out})
else:
raise EMCVnxCLICmdError(command_remove_hlu, rc, out)
try:
return propertyDescriptor.converter(m.group(1))
except ValueError:
- LOG.error(_("Invalid value for %(key)s, "
- "value is %(value)s.") %
+ LOG.error(_LE("Invalid value for %(key)s, "
+ "value is %(value)s.") %
{'key': propertyDescriptor.key,
'value': m.group(1)})
return None
pool, self.POOL_FREE_CAPACITY)
temp_cache.append(obj)
except Exception as ex:
- LOG.error(_("Error happened during storage pool querying, %s.")
+ LOG.error(_LE("Error happened during storage pool querying, %s.")
% ex)
# NOTE: Do not want to continue raise the exception
# as the pools may temporarly unavailable
if m:
data['array_serial'] = m.group(1)
else:
- LOG.warn(_("No array serial number returned, "
- "set as unknown."))
+ LOG.warn(_LW("No array serial number returned, "
+ "set as unknown."))
else:
raise EMCVnxCLICmdError(command_get_array_serial, rc, out)
LOG.debug("See available iSCSI target: %s",
connection_pingnode)
return True
- LOG.warn(_("See unavailable iSCSI target: %s"), connection_pingnode)
+ LOG.warn(_LW("See unavailable iSCSI target: %s"), connection_pingnode)
return False
@log_enter_exit
self.active_storage_ip == self.primary_storage_ip else\
self.primary_storage_ip
- LOG.info(_('Toggle storage_vnx_ip_address from %(old)s to '
- '%(new)s.') %
+ LOG.info(_LI('Toggle storage_vnx_ip_address from %(old)s to '
+ '%(new)s.') %
{'old': old_ip,
'new': self.primary_storage_ip})
return True
FCSanLookupService(configuration=configuration)
self.max_retries = 5
if self.destroy_empty_sg:
- LOG.warn(_("destroy_empty_storage_group: True. "
- "Empty storage group will be deleted "
- "after volume is detached."))
+ LOG.warn(_LW("destroy_empty_storage_group: True. "
+ "Empty storage group will be deleted "
+ "after volume is detached."))
if not self.itor_auto_reg:
- LOG.info(_("initiator_auto_registration: False. "
- "Initiator auto registration is not enabled. "
- "Please register initiator manually."))
+ LOG.info(_LI("initiator_auto_registration: False. "
+ "Initiator auto registration is not enabled. "
+ "Please register initiator manually."))
self.hlu_set = set(xrange(1, self.max_luns_per_sg + 1))
self._client = CommandLineHelper(self.configuration)
self.array_serial = None
if not provisioning:
provisioning = 'thick'
- LOG.info(_('Create Volume: %(volume)s Size: %(size)s '
- 'pool: %(pool)s '
- 'provisioning: %(provisioning)s '
- 'tiering: %(tiering)s.')
+ LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s '
+ 'pool: %(pool)s '
+ 'provisioning: %(provisioning)s '
+ 'tiering: %(tiering)s.')
% {'volume': volumename,
'size': volumesize,
'pool': pool,
"""check whether an extra spec's value is valid."""
if not extra_spec or not valid_values:
- LOG.error(_('The given extra_spec or valid_values is None.'))
+ LOG.error(_LE('The given extra_spec or valid_values is None.'))
elif extra_spec not in valid_values:
msg = _("The extra_spec: %s is invalid.") % extra_spec
LOG.error(msg)
false_ret = (False, None)
if 'location_info' not in host['capabilities']:
- LOG.warn(_("Failed to get target_pool_name and "
- "target_array_serial. 'location_info' "
- "is not in host['capabilities']."))
+ LOG.warn(_LW("Failed to get target_pool_name and "
+ "target_array_serial. 'location_info' "
+ "is not in host['capabilities']."))
return false_ret
# mandatory info should be ok
target_pool_name = info_detail[0]
target_array_serial = info_detail[1]
except AttributeError:
- LOG.warn(_("Error on parsing target_pool_name/"
- "target_array_serial."))
+ LOG.warn(_LW("Error on parsing target_pool_name/"
+ "target_array_serial."))
return false_ret
if len(target_pool_name) == 0:
volume, target_pool_name, new_type)[0]:
return True
else:
- LOG.warn(_('Storage-assisted migration failed during '
- 'retype.'))
+ LOG.warn(_LW('Storage-assisted migration failed during '
+ 'retype.'))
return False
else:
# migration is invalid
snapshotname = snapshot['name']
volumename = snapshot['volume_name']
- LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s')
+ LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s')
% {'snapshot': snapshotname,
'volume': volumename})
snapshotname = snapshot['name']
- LOG.info(_('Delete Snapshot: %(snapshot)s')
+ LOG.info(_LI('Delete Snapshot: %(snapshot)s')
% {'snapshot': snapshotname})
self._client.delete_snapshot(snapshotname)
@log_enter_exit
def create_consistencygroup(self, context, group):
"""Create a consistency group."""
- LOG.info(_('Start to create consistency group: %(group_name)s '
- 'id: %(id)s') %
+ LOG.info(_LI('Start to create consistency group: %(group_name)s '
+ 'id: %(id)s') %
{'group_name': group['name'], 'id': group['id']})
model_update = {'status': 'available'}
model_update = {}
model_update['status'] = group['status']
- LOG.info(_('Start to delete consistency group: %(cg_name)s')
+ LOG.info(_LI('Start to delete consistency group: %(cg_name)s')
% {'cg_name': cg_name})
try:
self._client.delete_consistencygroup(cg_name)
context, cgsnapshot_id)
model_update = {}
- LOG.info(_('Start to create cgsnapshot for consistency group'
- ': %(group_name)s') %
+ LOG.info(_LI('Start to create cgsnapshot for consistency group'
+ ': %(group_name)s') %
{'group_name': cgsnapshot['consistencygroup_id']})
try:
model_update = {}
model_update['status'] = cgsnapshot['status']
- LOG.info(_('Delete cgsnapshot %(snap_name)s for consistency group: '
- '%(group_name)s') % {'snap_name': cgsnapshot['id'],
+ LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
+ '%(group_name)s') % {'snap_name': cgsnapshot['id'],
'group_name': cgsnapshot['consistencygroup_id']})
try:
# SG was not created or was destroyed by another concurrent
# operation before connected.
# Create SG and try to connect again
- LOG.warn(_('Storage Group %s is not found. Create it.'),
+ LOG.warn(_LW('Storage Group %s is not found. Create it.'),
storage_group)
self.assure_storage_group(storage_group)
self._client.connect_host_to_storage_group(
def _register_iscsi_initiator(self, ip, host, initiator_uids):
for initiator_uid in initiator_uids:
iscsi_targets = self._client.get_iscsi_targets()
- LOG.info(_('Get ISCSI targets %(tg)s to register '
- 'initiator %(in)s.')
+ LOG.info(_LI('Get ISCSI targets %(tg)s to register '
+ 'initiator %(in)s.')
% ({'tg': iscsi_targets,
'in': initiator_uid}))
def _register_fc_initiator(self, ip, host, initiator_uids):
for initiator_uid in initiator_uids:
fc_targets = self._client.get_fc_targets()
- LOG.info(_('Get FC targets %(tg)s to register initiator %(in)s.')
+ LOG.info(_LI('Get FC targets %(tg)s to register initiator %(in)s.')
% ({'tg': fc_targets,
'in': initiator_uid}))
lun_map = self.get_lun_map(hostname)
except EMCVnxCLICmdError as ex:
if ex.rc == 83:
- LOG.warn(_("Storage Group %s is not found. "
- "terminate_connection() is unnecessary."),
+ LOG.warn(_LW("Storage Group %s is not found. "
+ "terminate_connection() is unnecessary."),
hostname)
return True
try:
lun_id = self.get_lun_id(volume)
except EMCVnxCLICmdError as ex:
if ex.rc == 9:
- LOG.warn(_("Volume %s is not found. "
- "It has probably been removed in VNX.")
+ LOG.warn(_LW("Volume %s is not found. "
+ "It has probably been removed in VNX.")
% volume_name)
if lun_id in lun_map:
self._client.remove_hlu_from_storagegroup(
lun_map[lun_id], hostname)
else:
- LOG.warn(_("Volume %(vol)s was not in Storage Group %(sg)s.")
+ LOG.warn(_LW("Volume %(vol)s was not in Storage Group %(sg)s.")
% {'vol': volume_name, 'sg': hostname})
if self.destroy_empty_sg or self.zonemanager_lookup_service:
try:
if not lun_map:
LOG.debug("Storage Group %s was empty.", hostname)
if self.destroy_empty_sg:
- LOG.info(_("Storage Group %s was empty, "
- "destroy it."), hostname)
+ LOG.info(_LI("Storage Group %s was empty, "
+ "destroy it."), hostname)
self._client.disconnect_host_from_storage_group(
hostname, hostname)
self._client.delete_storage_group(hostname)
LOG.debug("Storage Group %s not empty,", hostname)
return False
except Exception:
- LOG.warn(_("Failed to destroy Storage Group %s."),
+ LOG.warn(_LW("Failed to destroy Storage Group %s."),
hostname)
else:
return False
if m is not None:
result = True if 'Enabled' == m.group(1) else False
else:
- LOG.error(_("Error parsing output for FastCache Command."))
+ LOG.error(_LE("Error parsing output for FastCache Command."))
return result
@log_enter_exit
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.san import san
if exc.code == 400 and hasattr(exc, 'read'):
error = json.load(exc)
if error['message'].endswith('obj_not_found'):
- LOG.warning(_("object %(key)s of type %(typ)s not found"),
+ LOG.warning(_LW("object %(key)s of "
+ "type %(typ)s not found"),
{'key': key, 'typ': object_type})
raise exception.NotFound()
elif error['message'] == 'vol_obj_name_not_unique':
- LOG.error(_("can't create 2 volumes with the same name"))
+ LOG.error(_LE("can't create 2 volumes with the same name"))
msg = (_('Volume by this name already exists'))
raise exception.VolumeBackendAPIException(data=msg)
- LOG.error(_('Bad response from XMS, %s'), exc.read())
+ LOG.error(_LE('Bad response from XMS, %s'), exc.read())
msg = (_('Exception: %s') % six.text_type(exc))
raise exception.VolumeDriverException(message=msg)
if response.code >= 300:
- LOG.error(_('bad API response, %s'), response.msg)
+ LOG.error(_LE('bad API response, %s'), response.msg)
msg = (_('bad response from XMS got http code %(code)d, %(msg)s') %
{'code': response.code, 'msg': response.msg})
raise exception.VolumeBackendAPIException(data=msg)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
- LOG.info(_('XtremIO SW version %s'), sys['sys-sw-version'])
+ LOG.info(_LI('XtremIO SW version %s'), sys['sys-sw-version'])
def create_volume(self, volume):
"Creates a volume"
try:
self.req('volumes', 'DELETE', name=volume['id'])
except exception.NotFound:
- LOG.info(_("volume %s doesn't exist"), volume['id'])
+ LOG.info(_LI("volume %s doesn't exist"), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
try:
self.req('volumes', 'DELETE', name=snapshot.id)
except exception.NotFound:
- LOG.info(_("snapshot %s doesn't exist"), snapshot.id)
+ LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id)
def _update_volume_stats(self):
self._stats = {'volume_backend_name': self.backend_name,
lm_name = '%s_%s_%s' % (str(vol['index']),
str(ig['index']) if ig else 'any',
str(tg['index']))
- LOG.info(_('removing lun map %s'), lm_name)
+ LOG.info(_LI('removing lun map %s'), lm_name)
self.req('lun-maps', 'DELETE', name=lm_name)
except exception.NotFound:
- LOG.warning(_("terminate_connection: lun map not found"))
+ LOG.warning(_LW("terminate_connection: lun map not found"))
def _find_lunmap(self, ig_name, vol_name):
try:
res = self.req('lun-maps', 'POST', {'ig-id': ig['ig-id'][2],
'vol-id': volume['id']})
lunmap = self._obj_from_result(res)
- LOG.info(_('created lunmap\n%s'), lunmap)
+ LOG.info(_LI('created lunmap\n%s'), lunmap)
except urllib2.HTTPError as exc:
if exc.code == 400:
error = json.load(exc)
if 'already_mapped' in error.message:
- LOG.info(_('volume already mapped,'
- ' trying to retrieve it %(ig)s, %(vol)d'),
+ LOG.info(_LI('volume already mapped,'
+ ' trying to retrieve it %(ig)s, %(vol)d'),
{'ig': ig['ig-id'][1], 'vol': volume['id']})
lunmap = self._find_lunmap(ig['ig-id'][1], volume['id'])
elif error.message == 'vol_obj_not_found':
- LOG.error(_("Can't find volume to map %s"), volume['id'])
+ LOG.error(_LE("Can't find volume to map %s"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
else:
raise
'password']
# delete the initiator to create a new one with password
if not chap_passwd:
- LOG.info(_('initiator has no password while using chap,'
- 'removing it'))
+ LOG.info(_LI('initiator has no password while using chap,'
+ 'removing it'))
self.req('initiators', 'DELETE', name=initiator)
# check if the initiator already exists
raise exception.NotFound()
self._eql_execute(*cmd)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Failed to add multihost-access'
- ' for volume "%s".'),
+ LOG.error(_LE('Failed to add multihost-access '
+ 'for volume "%s".'),
volume['name'])
def delete_volume(self, volume):
volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Failed to delete volume "%s".'), volume['name'])
+ LOG.error(_LE('Failed to delete '
+ 'volume "%s".'), volume['name'])
def create_snapshot(self, snapshot):
""""Create snapshot of existing volume on appliance."""
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete snapshot %(snap)s of '
- 'volume %(vol)s.'),
+ 'volume %(vol)s.'),
{'snap': snapshot['name'],
'vol': snapshot['volume_name']})
}
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Failed to initialize connection'
- ' to volume "%s".'),
+ LOG.error(_LE('Failed to initialize connection '
+ 'to volume "%s".'),
volume['name'])
def terminate_connection(self, volume, connector, force=False, **kwargs):
'access', 'delete', connection_id)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Failed to terminate connection'
- ' to volume "%s".'),
+ LOG.error(_LE('Failed to terminate connection '
+ 'to volume "%s".'),
volume['name'])
def create_export(self, context, volume):
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to extend_volume %(name)s from '
- '%(current_size)sGB to %(new_size)sGB.'),
+ '%(current_size)sGB to %(new_size)sGB.'),
{'name': volume['name'],
- 'current_size': volume['size'],
+ 'current_size': volume['size'],
'new_size': new_size})
def local_path(self, volume):
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.openstack.common import units
errordesc = RETCODE_dic[six.text_type(rc)]
if rc != 0L:
- LOG.error(_('Error Create Volume: %(volumename)s. '
- 'Return code: %(rc)lu. Error: %(error)s')
+ LOG.error(_LE('Error Create Volume: %(volumename)s. '
+ 'Return code: %(rc)lu. Error: %(error)s')
% {'volumename': volumename,
'rc': rc,
'error': errordesc})
vol_instance = self._find_lun(volume)
if vol_instance is None:
- LOG.error(_('Volume %(name)s not found on the array. '
- 'No volume to delete.')
+ LOG.error(_LE('Volume %(name)s not found on the array. '
+ 'No volume to delete.')
% {'name': volumename})
return
repservice = self._find_replication_service(storage_system)
if repservice is None:
- LOG.error(_("Cannot find Replication Service to create snapshot "
- "for volume %s.") % volumename)
+ LOG.error(_LE("Cannot find Replication Service to create snapshot "
+ "for volume %s.") % volumename)
exception_message = (_("Cannot find Replication Service to "
"create snapshot for volume %s.")
% volumename)
sync_name, storage_system =\
self._find_storage_sync_sv_sv(snapshot, volume, False)
if sync_name is None:
- LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s '
- 'not found on the array. No snapshot to delete.')
+ LOG.error(_LE('Snapshot: %(snapshot)s: volume: %(volume)s '
+ 'not found on the array. No snapshot to delete.')
% {'snapshot': snapshotname,
'volume': volumename})
return
'volume': volumename})
raise loopingcall.LoopingCallDone()
if int(time.time()) - start >= wait_timeout:
- LOG.warn(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
- 'Snapshot deleted but cleanup timed out.')
+ LOG.warn(_LW('Snapshot: %(snapshot)s: volume: %(volume)s. '
+ 'Snapshot deleted but cleanup timed out.')
% {'snapshot': snapshotname,
'volume': volumename})
raise loopingcall.LoopingCallDone()
% {'snapshot': snapshotname,
'volume': volumename})
else:
- LOG.warn(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
- 'Snapshot deleted but error during cleanup. '
- 'Error: %(error)s')
+ LOG.warn(_LW('Snapshot: %(snapshot)s: volume: %(volume)s. '
+ 'Snapshot deleted but error during cleanup. '
+ 'Error: %(error)s')
% {'snapshot': snapshotname,
'volume': volumename,
'error': six.text_type(ex.args)})
from cinder import compute
from cinder import db
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
try:
self._do_umount(True, share)
except Exception as exc:
- LOG.warning(_('Exception during unmounting %s') % (exc))
+ LOG.warning(_LE('Exception during unmounting %s') % (exc))
def _do_umount(self, ignore_not_mounted, share):
mount_path = self._get_mount_point_for_share(share)
self._execute(*command, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ignore_not_mounted and 'not mounted' in exc.stderr:
- LOG.info(_("%s is already umounted"), share)
+ LOG.info(_LI("%s is already umounted"), share)
else:
- LOG.error(_("Failed to umount %(share)s, reason=%(stderr)s"),
+ LOG.error(_LE("Failed to umount %(share)s, reason=%(stderr)s"),
{'share': share, 'stderr': exc.stderr})
raise
self._unmount_shares()
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.stderr:
- LOG.warn(_("Failed to refresh mounts, reason=%s") %
+ LOG.warn(_LW("Failed to refresh mounts, reason=%s") %
exc.stderr)
else:
raise
volume['provider_location'] = self._find_share(volume['size'])
- LOG.info(_('casted to %s') % volume['provider_location'])
+ LOG.info(_LI('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
"""Deletes a logical volume."""
if not volume['provider_location']:
- LOG.warn(_('Volume %s does not have provider_location specified, '
- 'skipping'), volume['name'])
+ LOG.warn(_LW('Volume %s does not have '
+ 'provider_location specified, '
+ 'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
snapshot['id'],
delete_info)
except Exception as e:
- LOG.error(_('Call to Nova delete snapshot failed'))
+ LOG.error(_LE('Call to Nova delete snapshot failed'))
LOG.exception(e)
raise e
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
- LOG.error(_('Exception during mounting %s') % (exc,))
+ LOG.error(_LE('Exception during mounting %s') % (exc,))
LOG.debug('Available shares: %s' % self._mounted_shares)
connection_info)
LOG.debug('nova call result: %s' % result)
except Exception as e:
- LOG.error(_('Call to Nova to create snapshot failed'))
+ LOG.error(_LE('Call to Nova to create snapshot failed'))
LOG.exception(e)
raise e
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
self.configuration.san_ssh_port = self.configuration.nas_ssh_port
self.configuration.ibmnas_platform_type = \
self.configuration.ibmnas_platform_type.lower()
- LOG.info(_('Initialized driver for IBMNAS Platform: %s.'),
+ LOG.info(_LI('Initialized driver for IBMNAS Platform: %s.'),
self.configuration.ibmnas_platform_type)
def set_execute(self, execute):
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
- LOG.warn(_('Volume %s does not have provider_location specified, '
- 'skipping.'), volume['name'])
+ LOG.warn(_LW('Volume %s does not have '
+ 'provider_location specified, '
+ 'skipping.'), volume['name'])
return
export_path = self._get_export_path(volume['id'])
from cinder.brick import exception as brick_exception
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
return True
if self.vg.lv_has_snapshot(volume['name']):
- LOG.error(_('Unabled to delete due to existing snapshot '
- 'for volume: %s') % volume['name'])
+ LOG.error(_LE('Unabled to delete due to existing snapshot '
+ 'for volume: %s') % volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
- LOG.warning(_("snapshot: %s not found, "
- "skipping delete operations") % snapshot['name'])
+ LOG.warning(_LW("snapshot: %s not found, "
+ "skipping delete operations") % snapshot['name'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
- LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
+ LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
LOG.debug("Updating volume stats")
if self.vg is None:
- LOG.warning(_('Unable to update stats on non-initialized '
- 'Volume Group: %s'), self.configuration.volume_group)
+ LOG.warning(_LW('Unable to update stats on non-initialized '
+ 'Volume Group: %s'), self.configuration.
+ volume_group)
return
self.vg.update_volume_group_info()
if attempts == 0:
raise
else:
- LOG.warning(_('Error creating iSCSI target, retrying '
- 'creation for target: %s') % iscsi_name)
+ LOG.warning(_LW('Error creating iSCSI target, retrying '
+ 'creation for target: %s') % iscsi_name)
return tid
def ensure_export(self, context, volume):
from suds import client
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume.drivers.san.san import SanISCSIDriver
% {'netlabel': subnet_label, 'netconf': netconfig})
ret_discovery_ip = None
for subnet in netconfig['subnet-list']:
- LOG.info(_('Exploring array subnet label %s') % subnet['label'])
+ LOG.info(_LI('Exploring array subnet label %s') % subnet['label'])
if subnet_label == '*':
# Use the first data subnet, save mgmt+data for later
if (subnet['subnet-id']['type'] == SM_SUBNET_DATA):
- LOG.info(_('Discovery ip %(disc_ip)s is used '
- 'on data subnet %(net_label)s')
+ LOG.info(_LI('Discovery ip %(disc_ip)s is used '
+ 'on data subnet %(net_label)s')
% {'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
return subnet['discovery-ip']
elif (subnet['subnet-id']['type'] ==
SM_SUBNET_MGMT_PLUS_DATA):
- LOG.info(_('Discovery ip %(disc_ip)s is found'
- ' on mgmt+data subnet %(net_label)s')
+ LOG.info(_LI('Discovery ip %(disc_ip)s is found'
+ ' on mgmt+data subnet %(net_label)s')
% {'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
ret_discovery_ip = subnet['discovery-ip']
# If subnet is specified and found, use the subnet
elif subnet_label == subnet['label']:
- LOG.info(_('Discovery ip %(disc_ip)s is used'
- ' on subnet %(net_label)s')
+ LOG.info(_LI('Discovery ip %(disc_ip)s is used'
+ ' on subnet %(net_label)s')
% {'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
return subnet['discovery-ip']
if ret_discovery_ip:
- LOG.info(_('Discovery ip %s is used on mgmt+data subnet')
+ LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet')
% ret_discovery_ip)
return ret_discovery_ip
else:
password=self.configuration.san_password,
ip=self.configuration.san_ip)
except Exception:
- LOG.error(_('Failed to create SOAP client.'
- 'Check san_ip, username, password'
- ' and make sure the array version is compatible'))
+ LOG.error(_LE('Failed to create SOAP client.'
+ 'Check san_ip, username, password'
+ ' and make sure the array version is compatible'))
raise
def _get_provider_location(self, volume_name):
target_ipaddr = self._get_discovery_ip(netconfig)
iscsi_portal = target_ipaddr + ':3260'
provider_location = '%s %s %s' % (iscsi_portal, iqn, LUN_ID)
- LOG.info(_('vol_name=%(name)s provider_location=%(loc)s')
+ LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s')
% {'name': volume_name, 'loc': provider_location})
return provider_location
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
volume_name = volume['name']
- LOG.info(_('Entering extend_volume volume=%(vol)s new_size=%(size)s')
+ LOG.info(_LI('Entering extend_volume volume=%(vol)s new_size=%(size)s')
% {'vol': volume_name, 'size': new_size})
vol_size = int(new_size) * units.Gi
reserve = not self.configuration.san_thin_provision
def _create_igroup_for_initiator(self, initiator_name):
"""Creates igroup for an initiator and returns the igroup name."""
igrp_name = 'openstack-' + self._generate_random_string(12)
- LOG.info(_('Creating initiator group %(grp)s with initiator %(iname)s')
+ LOG.info(_LI('Creating initiator group %(grp)s '
+ 'with initiator %(iname)s')
% {'grp': igrp_name, 'iname': initiator_name})
self.APIExecutor.create_initiator_group(igrp_name, initiator_name)
return igrp_name
if (len(initiator_group['initiator-list']) == 1 and
initiator_group['initiator-list'][0]['name'] ==
initiator_name):
- LOG.info(_('igroup %(grp)s found for initiator %(iname)s')
+ LOG.info(_LI('igroup %(grp)s found for '
+ 'initiator %(iname)s')
% {'grp': initiator_group['name'],
'iname': initiator_name})
return initiator_group['name']
- LOG.info(_('No igroup found for initiator %s') % initiator_name)
+ LOG.info(_LI('No igroup found for initiator %s') % initiator_name)
return None
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance."""
- LOG.info(_('Entering initialize_connection volume=%(vol)s'
- ' connector=%(conn)s location=%(loc)s')
+ LOG.info(_LI('Entering initialize_connection volume=%(vol)s'
+ ' connector=%(conn)s location=%(loc)s')
% {'vol': volume,
'conn': connector,
'loc': volume['provider_location']})
if not initiator_group_name:
initiator_group_name = self._create_igroup_for_initiator(
initiator_name)
- LOG.info(_('Initiator group name is %(grp)s for initiator %(iname)s')
+ LOG.info(_LI('Initiator group name is %(grp)s for initiator %(iname)s')
% {'grp': initiator_group_name, 'iname': initiator_name})
self.APIExecutor.add_acl(volume, initiator_group_name)
(iscsi_portal, iqn, lun_num) = volume['provider_location'].split()
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
- LOG.info(_('Entering terminate_connection volume=%(vol)s'
- ' connector=%(conn)s location=%(loc)s.')
+ LOG.info(_LI('Entering terminate_connection volume=%(vol)s'
+ ' connector=%(conn)s location=%(loc)s.')
% {'vol': volume,
'conn': connector,
'loc': volume['provider_location']})
return func(self, *args, **kwargs)
except NimbleAPIException as e:
if attempts < 1 and (re.search('SM-eaccess', str(e))):
- LOG.info(_('Session might have expired.'
- ' Trying to relogin'))
+ LOG.info(_LI('Session might have expired.'
+ ' Trying to relogin'))
self.login()
continue
else:
- LOG.error(_('Re-throwing Exception %s') % e)
+ LOG.error(_LE('Re-throwing Exception %s') % e)
raise
return inner_connection_checker
# Limit description size to 254 characters
description = description[:254]
- LOG.info(_('Creating a new volume=%(vol)s size=%(size)s'
- ' reserve=%(reserve)s in pool=%(pool)s')
+ LOG.info(_LI('Creating a new volume=%(vol)s size=%(size)s'
+ ' reserve=%(reserve)s in pool=%(pool)s')
% {'vol': volume['name'],
'size': volume_size,
'reserve': reserve,
def create_vol(self, volume, pool_name, reserve):
"""Execute createVol API."""
response = self._execute_create_vol(volume, pool_name, reserve)
- LOG.info(_('Successfully create volume %s') % response['name'])
+ LOG.info(_LI('Successfully create volume %s') % response['name'])
return response['name']
@_connection_checker
@_response_checker
def add_acl(self, volume, initiator_group_name):
"""Execute addAcl API."""
- LOG.info(_('Adding ACL to volume=%(vol)s with'
- ' initiator group name %(igrp)s')
+ LOG.info(_LI('Adding ACL to volume=%(vol)s with'
+ ' initiator group name %(igrp)s')
% {'vol': volume['name'],
'igrp': initiator_group_name})
return self.client.service.addVolAcl(
@_response_checker
def remove_acl(self, volume, initiator_group_name):
"""Execute removeVolAcl API."""
- LOG.info(_('Removing ACL from volume=%(vol)s'
- ' for initiator group %(igrp)s')
+ LOG.info(_LI('Removing ACL from volume=%(vol)s'
+ ' for initiator group %(igrp)s')
% {'vol': volume['name'],
'igrp': initiator_group_name})
return self.client.service.removeVolAcl(
@_connection_checker
@_response_checker
def _execute_get_vol_info(self, vol_name):
- LOG.info(_('Getting volume information for vol_name=%s') % (vol_name))
+ LOG.info(_LI('Getting volume information '
+ 'for vol_name=%s') % (vol_name))
return self.client.service.getVolInfo(request={'sid': self.sid,
'name': vol_name})
def get_vol_info(self, vol_name):
"""Execute getVolInfo API."""
response = self._execute_get_vol_info(vol_name)
- LOG.info(_('Successfully got volume information for volume %s')
+ LOG.info(_LI('Successfully got volume information for volume %s')
% vol_name)
return response['vol']
@_response_checker
def online_vol(self, vol_name, online_flag, *args, **kwargs):
"""Execute onlineVol API."""
- LOG.info(_('Setting volume %(vol)s to online_flag %(flag)s')
+ LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s')
% {'vol': vol_name, 'flag': online_flag})
return self.client.service.onlineVol(request={'sid': self.sid,
'name': vol_name,
@_response_checker
def dissociate_volcoll(self, vol_name, *args, **kwargs):
"""Execute dissocProtPol API."""
- LOG.info(_('Dissociating volume %s ') % vol_name)
+ LOG.info(_LI('Dissociating volume %s ') % vol_name)
return self.client.service.dissocProtPol(
request={'sid': self.sid,
'vol-name': vol_name})
@_response_checker
def delete_vol(self, vol_name, *args, **kwargs):
"""Execute deleteVol API."""
- LOG.info(_('Deleting volume %s ') % vol_name)
+ LOG.info(_LI('Deleting volume %s ') % vol_name)
return self.client.service.deleteVol(request={'sid': self.sid,
'name': vol_name})
snap_description = snap_display_name + snap_display_description
# Limit to 254 characters
snap_description = snap_description[:254]
- LOG.info(_('Creating snapshot for volume_name=%(vol)s'
- ' snap_name=%(name)s snap_description=%(desc)s')
+ LOG.info(_LI('Creating snapshot for volume_name=%(vol)s'
+ ' snap_name=%(name)s snap_description=%(desc)s')
% {'vol': volume_name,
'name': snap_name,
'desc': snap_description})
@_response_checker
def delete_snap(self, vol_name, snap_name, *args, **kwargs):
"""Execute deleteSnap API."""
- LOG.info(_('Deleting snapshot %s ') % snap_name)
+ LOG.info(_LI('Deleting snapshot %s ') % snap_name)
return self.client.service.deleteSnap(request={'sid': self.sid,
'vol': vol_name,
'name': snap_name})
clone_name = volume['name']
snap_size = snapshot['volume_size']
reserve_size = snap_size * units.Gi if reserve else 0
- LOG.info(_('Cloning volume from snapshot volume=%(vol)s '
- 'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
- 'reserve=%(reserve)s')
+ LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s '
+ 'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
+ 'reserve=%(reserve)s')
% {'vol': volume_name,
'snap': snap_name,
'clone': clone_name,
@_response_checker
def edit_vol(self, vol_name, mask, attr):
"""Execute editVol API."""
- LOG.info(_('Editing Volume %(vol)s with mask %(mask)s')
+ LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s')
% {'vol': vol_name, 'mask': str(mask)})
return self.client.service.editVol(request={'sid': self.sid,
'name': vol_name,
@_connection_checker
@_response_checker
def _execute_get_initiator_grp_list(self):
- LOG.info(_('Getting getInitiatorGrpList'))
+ LOG.info(_LI('Getting getInitiatorGrpList'))
return (self.client.service.getInitiatorGrpList(
request={'sid': self.sid}))
def get_initiator_grp_list(self):
"""Execute getInitiatorGrpList API."""
response = self._execute_get_initiator_grp_list()
- LOG.info(_('Successfully retrieved InitiatorGrpList'))
+ LOG.info(_LI('Successfully retrieved InitiatorGrpList'))
return (response['initiatorgrp-list']
if 'initiatorgrp-list' in response else [])
@_response_checker
def create_initiator_group(self, initiator_group_name, initiator_name):
"""Execute createInitiatorGrp API."""
- LOG.info(_('Creating initiator group %(igrp)s'
- ' with one initiator %(iname)s')
+ LOG.info(_LI('Creating initiator group %(igrp)s'
+ ' with one initiator %(iname)s')
% {'igrp': initiator_group_name, 'iname': initiator_name})
return self.client.service.createInitiatorGrp(
request={'sid': self.sid,
@_response_checker
def delete_initiator_group(self, initiator_group_name, *args, **kwargs):
"""Execute deleteInitiatorGrp API."""
- LOG.info(_('Deleting deleteInitiatorGrp %s ') % initiator_group_name)
+ LOG.info(_LI('Deleting deleteInitiatorGrp %s ') % initiator_group_name)
return self.client.service.deleteInitiatorGrp(
request={'sid': self.sid,
'name': initiator_group_name})
import errno
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
import cinder.volume.driver
from cinder.volume.drivers.prophetstor import dplcommon
(backend_name or 'DPLISCSIDriver')
self._stats = data
except Exception as exc:
- LOG.warning(_('Cannot get volume status '
- '%(exc)%s.') % {'exc': exc})
+ LOG.warning(_LW('Cannot get volume status '
+ '%(exc)%s.') % {'exc': exc})
return self._stats
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.openstack.common import units
payload = json.dumps(params, ensure_ascii=False)
payload.encode('utf-8')
except Exception:
- LOG.error(_('JSON encode params error: %s.'),
+ LOG.error(_LE('JSON encode params error: %s.'),
six.text_type(params))
retcode = errno.EINVAL
for i in range(CONNECTION_RETRY):
retcode = 0
break
except IOError as ioerr:
- LOG.error(_('Connect to Flexvisor error: %s.'),
+ LOG.error(_LE('Connect to Flexvisor error: %s.'),
six.text_type(ioerr))
retcode = errno.ENOTCONN
except Exception as e:
- LOG.error(_('Connect to Flexvisor failed: %s.'),
+ LOG.error(_LE('Connect to Flexvisor failed: %s.'),
six.text_type(e))
retcode = errno.EFAULT
retcode = errno.ENOTCONN
continue
except Exception as e:
- LOG.error(_('Failed to send request: %s.'),
+ LOG.error(_LE('Failed to send request: %s.'),
six.text_type(e))
retcode = errno.EFAULT
break
try:
response = connection.getresponse()
if response.status == httplib.SERVICE_UNAVAILABLE:
- LOG.error(_('The Flexvisor service is unavailable.'))
+ LOG.error(_LE('The Flexvisor service is unavailable.'))
time.sleep(1)
retry -= 1
retcode = errno.ENOPROTOOPT
retcode = errno.EFAULT
continue
except Exception as e:
- LOG.error(_('Failed to get response: %s.'),
+ LOG.error(_LE('Failed to get response: %s.'),
six.text_type(e.message))
retcode = errno.EFAULT
break
response.status == httplib.NOT_FOUND:
retcode = errno.ENODATA
elif retcode == 0 and response.status not in expected_status:
- LOG.error(_('%(method)s %(url)s unexpected response status: '
- '%(response)s (expects: %(expects)s).')
+ LOG.error(_LE('%(method)s %(url)s unexpected response status: '
+ '%(response)s (expects: %(expects)s).')
% {'method': method,
'url': url,
'response': httplib.responses[response.status],
data = response.read()
data = json.loads(data)
except (TypeError, ValueError) as e:
- LOG.error(_('Call to json.loads() raised an exception: %s.'),
+ LOG.error(_LE('Call to json.loads() raised an exception: %s.'),
six.text_type(e))
retcode = errno.ENOEXEC
except Exception as e:
- LOG.error(_('Read response raised an exception: %s.'),
+ LOG.error(_LE('Read response raised an exception: %s.'),
six.text_type(e))
retcode = errno.ENOEXEC
elif retcode == 0 and \
data = response.read()
data = json.loads(data)
except (TypeError, ValueError) as e:
- LOG.error(_('Call to json.loads() raised an exception: %s.'),
+ LOG.error(_LE('Call to json.loads() raised an exception: %s.'),
six.text_type(e))
retcode = errno.ENOEXEC
except Exception as e:
- LOG.error(_('Read response raised an exception: %s.'),
+ LOG.error(_LE('Read response raised an exception: %s.'),
six.text_type(e))
retcode = errno.ENOEXEC
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
- LOG.info(_('Start to create consistency group: %(group_name)s '
- 'id: %(id)s') %
+ LOG.info(_LI('Start to create consistency group: %(group_name)s '
+ 'id: %(id)s') %
{'group_name': group['name'], 'id': group['id']})
model_update = {'status': 'available'}
try:
context, group['id'])
model_update = {}
model_update['status'] = group['status']
- LOG.info(_('Start to delete consistency group: %(cg_name)s')
+ LOG.info(_LI('Start to delete consistency group: %(cg_name)s')
% {'cg_name': group['id']})
try:
self.dpl.delete_vg(self._conver_uuid2hex(group['id']))
context, cgsnapshot_id)
model_update = {}
- LOG.info(_('Start to create cgsnapshot for consistency group'
- ': %(group_name)s') %
+ LOG.info(_LI('Start to create cgsnapshot for consistency group'
+ ': %(group_name)s') %
{'group_name': cgId})
try:
model_update = {}
model_update['status'] = cgsnapshot['status']
- LOG.info(_('Delete cgsnapshot %(snap_name)s for consistency group: '
- '%(group_name)s') % {'snap_name': cgsnapshot['id'],
+ LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
+ '%(group_name)s') % {'snap_name': cgsnapshot['id'],
'group_name': cgsnapshot['consistencygroup_id']})
try:
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.context = context
- LOG.info(_('Activate Flexvisor cinder volume driver.'))
+ LOG.info(_LI('Activate Flexvisor cinder volume driver.'))
def check_for_setup_error(self):
"""Check DPL can connect properly."""
ret = 0
output = status.get('output', {})
else:
- LOG.error(_('Flexvisor failed to get pool info '
- '(failed to get event)%s.') % (poolid))
+ LOG.error(_LE('Flexvisor failed to get pool info '
+ '(failed to get event)%s.') % (poolid))
raise exception.VolumeBackendAPIException(
data="failed to get event")
elif ret != 0:
if err.kwargs["code"] == 400:
# Happens if the volume does not exist.
ctxt.reraise = False
- LOG.error(_LE("Volume deletion failed with message: %s") %
- err.msg)
+ LOG.error(_LE("Volume deletion failed with message: {0}"
+ ).format(err.msg))
LOG.debug("Leave PureISCSIDriver.delete_volume.")
def create_snapshot(self, snapshot):
if err.kwargs["code"] == 400:
# Happens if the snapshot does not exist.
ctxt.reraise = False
- LOG.error(_LE("Snapshot deletion failed with message:"
- " %s") % err.msg)
+ LOG.error(_LE("Snapshot deletion failed with message: {0}"
+ ).format(err.msg))
LOG.debug("Leave PureISCSIDriver.delete_snapshot.")
def initialize_connection(self, volume, connector):
self._run_iscsiadm_bare(["-m", "discovery", "-t", "sendtargets",
"-p", self._iscsi_port["portal"]])
except processutils.ProcessExecutionError as err:
- LOG.warn(_LW("iSCSI discovery of port %(port_name)s at "
- "%(port_portal)s failed with error: %(err_msg)s") %
- {"port_name": self._iscsi_port["name"],
- "port_portal": self._iscsi_port["portal"],
- "err_msg": err.stderr})
+ LOG.warn(_LW("iSCSI discovery of port {0[name]} at {0[portal]} "
+ "failed with error: {1}").format(self._iscsi_port,
+ err.stderr))
self._iscsi_port = self._choose_target_iscsi_port()
return self._iscsi_port
if err.kwargs["code"] == 400:
# Happens if the host and volume are not connected.
ctxt.reraise = False
- LOG.error(_LE("Disconnection failed with message: "
- "%(msg)s.") % {"msg": err.msg})
+ LOG.error(_LE("Disconnection failed "
+ "with message: {msg}."
+ ).format(msg=err.msg))
if (GENERATED_NAME.match(host_name) and not host["hgroup"] and
not self._array.list_host_connections(host_name,
private=True)):
self._array.delete_host(host_name)
else:
LOG.error(_LE("Unable to find host object in Purity with IQN: "
- "%(iqn)s.") % {"iqn": connector["initiator"]})
+ "{iqn}.").format(iqn=connector["initiator"]))
LOG.debug("Leave PureISCSIDriver.terminate_connection.")
def get_volume_stats(self, refresh=False):
def list_ports(self, **kwargs):
"""Return a list of dictionaries describing ports."""
- return self._http_request("GET", "port", kwargs)
\ No newline at end of file
+ return self._http_request("GET", "port", kwargs)
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
try:
self._rbd_meta.image.flush()
except AttributeError:
- LOG.warning(_("flush() not supported in this version of librbd"))
+ LOG.warning(_LW("flush() not supported in "
+ "this version of librbd"))
def fileno(self):
"""RBD does not have support for fileno() so we raise IOError.
snapshot=snapshot,
read_only=read_only)
except driver.rbd.Error:
- LOG.exception(_("error opening rbd image %s"), name)
+ LOG.exception(_LE("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
raise
self.driver = driver
ioctx = client.open_ioctx(pool)
return client, ioctx
except self.rados.Error as exc:
- LOG.error("error connecting to ceph cluster.")
+ LOG.error(_LE("error connecting to ceph cluster."))
# shutdown cannot raise an exception
client.shutdown()
raise exception.VolumeBackendAPIException(data=str(exc))
stats['free_capacity_gb'] = new_stats['kb_avail'] / units.Mi
except self.rados.Error:
# just log and return unknown capacities
- LOG.exception(_('error refreshing volume stats'))
+ LOG.exception(_LE('error refreshing volume stats'))
self._stats = stats
def get_volume_stats(self, refresh=False):
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
- LOG.info(_("volume %s no longer exists in backend")
+ LOG.info(_LI("volume %s no longer exists in backend")
% (volume_name))
return
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
stats['total_capacity_gb'] = total / units.Gi
stats['free_capacity_gb'] = (total - used) / units.Gi
except processutils.ProcessExecutionError:
- LOG.exception(_('error refreshing volume stats'))
+ LOG.exception(_LE('error refreshing volume stats'))
self._stats = stats
from cinder import context
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
cluster_stats = {}
retry_exc_tuple = (exception.SolidFireRetryableException,
requests.exceptions.ConnectionError)
- retryable_errors = ['xDBVersionMisMatch',
+ retryable_errors = ['xDBVersionMismatch',
'xMaxSnapshotsPerVolumeExceeded',
'xMaxClonesPerVolumeExceeded',
'xMaxSnapshotsPerNodeExceeded',
iteration_count += 1
if not found_volume:
- LOG.error(_('Failed to retrieve volume SolidFire-'
- 'ID: %s in get_by_account!') % sf_volume_id)
+ LOG.error(_LE('Failed to retrieve volume SolidFire-'
+ 'ID: %s in get_by_account!') % sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = {}
if i.key == 'sf-qos' and i.value in valid_presets]
if len(presets) > 0:
if len(presets) > 1:
- LOG.warning(_('More than one valid preset was '
- 'detected, using %s') % presets[0])
+ LOG.warning(_LW('More than one valid preset was '
+ 'detected, using %s') % presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
# NOTE(jdg): Previously we would raise here, but there are cases
# where this might be a cleanup for a failed delete.
# Until we get better states we'll just log an error
- LOG.error(_("Volume %s, not found on SF Cluster."), uuid)
+ LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
if found_count > 1:
- LOG.error(_("Found %(count)s volumes mapped to id: %(uuid)s.") %
+ LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s.") %
{'count': found_count,
'uuid': uuid})
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
- LOG.error(_("Account for Volume ID %s was not found on "
- "the SolidFire Cluster while attempting "
- "delete_volume operation!") % volume['id'])
- LOG.error(_("This usually means the volume was never "
- "successfully created."))
+ LOG.error(_LE("Account for Volume ID %s was not found on "
+ "the SolidFire Cluster while attempting "
+ "delete_volume operation!") % volume['id'])
+ LOG.error(_LE("This usually means the volume was never "
+ "successfully created."))
return
params = {'accountID': sfaccount['accountID']}
msg = _("Failed to delete SolidFire Volume: %s") % data
raise exception.SolidFireAPIException(msg)
else:
- LOG.error(_("Volume ID %s was not found on "
- "the SolidFire Cluster while attempting "
- "delete_volume operation!"), volume['id'])
+ LOG.error(_LE("Volume ID %s was not found on "
+ "the SolidFire Cluster while attempting "
+ "delete_volume operation!"), volume['id'])
LOG.debug("Leaving SolidFire delete_volume")
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
- LOG.error(_("Volume ID %s was not found on "
- "the SolidFire Cluster while attempting "
- "extend_volume operation!"), volume['id'])
+ LOG.error(_LE("Volume ID %s was not found on "
+ "the SolidFire Cluster while attempting "
+ "extend_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
params = {
# of stats data, this is just one of the calls
results = self._issue_api_request('GetClusterCapacity', params)
if 'result' not in results:
- LOG.error(_('Failed to get updated stats'))
+ LOG.error(_LE('Failed to get updated stats'))
results = results['result']['clusterCapacity']
free_capacity =\
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
- LOG.error(_("Volume ID %s was not found on "
- "the SolidFire Cluster while attempting "
- "attach_volume operation!"), volume['id'])
+ LOG.error(_LE("Volume ID %s was not found on "
+ "the SolidFire Cluster while attempting "
+ "attach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
- LOG.error(_("Volume ID %s was not found on "
- "the SolidFire Cluster while attempting "
- "detach_volume operation!"), volume['id'])
+ LOG.error(_LE("Volume ID %s was not found on "
+ "the SolidFire Cluster while attempting "
+ "detach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
- LOG.error(_("Volume ID %s was not found on "
- "the SolidFire Cluster while attempting "
- "accept_transfer operation!"), volume['id'])
+ LOG.error(_LE("Volume ID %s was not found on "
+ "the SolidFire Cluster while attempting "
+ "accept_transfer operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
if new_project != volume['project_id']:
# do a create_sfaccount here as this tenant
LOG.debug("Enter SolidFire unmanage...")
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
- LOG.error(_("Account for Volume ID %s was not found on "
- "the SolidFire Cluster while attempting "
- "unmanage operation!") % volume['id'])
+ LOG.error(_LE("Account for Volume ID %s was not found on "
+ "the SolidFire Cluster while attempting "
+ "unmanage operation!") % volume['id'])
raise exception.SolidFireAPIException("Failed to find account "
"for volume.")
import netaddr
import six.moves.urllib.parse as urlparse
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
msg = _("Could not retrieve URL from lease.")
LOG.exception(msg)
raise error_util.VimException(msg)
- LOG.info(_("Opening vmdk url: %s for write.") % url)
+ LOG.info(_LI("Opening vmdk url: %s for write.") % url)
# Prepare the http connection to the vmdk url
cookies = session.vim.client.options.transport.cookiejar
msg = _("Could not retrieve URL from lease.")
LOG.exception(msg)
raise error_util.VimException(msg)
- LOG.info(_("Opening vmdk url: %s for read.") % url)
+ LOG.info(_LI("Opening vmdk url: %s for read.") % url)
cookies = session.vim.client.options.transport.cookiejar
headers = {'User-Agent': USER_AGENT,
from cinder import exception
from cinder import flow_utils
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
#
# NOTE(harlowja): Being unable to destroy a volume is pretty
# bad though!!
- LOG.exception(_("Failed destroying volume entry %s"), vol_id)
+ LOG.exception(_LE("Failed destroying volume entry %s"), vol_id)
class QuotaReserveTask(flow_utils.CinderTask):
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
- LOG.exception(_("Failed rolling back quota for"
- " %s reservations"), reservations)
+ LOG.exception(_LE("Failed rolling back quota for"
+ " %s reservations"), reservations)
class QuotaCommitTask(flow_utils.CinderTask):
QUOTAS.commit(context, reservations,
project_id=context.project_id)
except Exception:
- LOG.exception(_("Failed to update quota for deleting volume: %s"),
- volume['id'])
+ LOG.exception(_LE("Failed to update quota for deleting "
+ "volume: %s"), volume['id'])
class VolumeCastTask(flow_utils.CinderTask):
volume_id = kwargs['volume_id']
common.restore_source_status(context, self.db, kwargs)
common.error_out_volume(context, self.db, volume_id)
- LOG.error(_("Volume %s: create failed"), volume_id)
+ LOG.error(_LE("Volume %s: create failed"), volume_id)
exc_info = False
if all(flow_failures[-1].exc_info):
exc_info = flow_failures[-1].exc_info
- LOG.error(_('Unexpected build error:'), exc_info=exc_info)
+ LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def get_flow(scheduler_rpcapi, volume_rpcapi, db_api,
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
except exception.CinderException:
# NOTE(harlowja): Don't let this cause further exceptions since this is
# a non-critical failure.
- LOG.exception(_("Failed setting source volume %(source_volid)s back to"
- " its initial %(source_status)s status") %
+ LOG.exception(_LE("Failed setting source "
+ "volume %(source_volid)s back to"
+ " its initial %(source_status)s status") %
{'source_status': source_status,
'source_volid': source_volid})
db.volume_update(context, volume_id, update)
except exception.CinderException:
# Don't let this cause further exceptions.
- LOG.exception(_("Failed updating volume %(volume_id)s with"
- " %(update)s") % {'volume_id': volume_id,
- 'update': update})
+ LOG.exception(_LE("Failed updating volume %(volume_id)s with"
+ " %(update)s") % {'volume_id': volume_id,
+ 'update': update})
from cinder import exception
from cinder import flow_utils
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
from cinder.image import glance
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
return
common.error_out_volume(context, self.db, volume_id)
- LOG.error(_("Volume %s: create failed"), volume_id)
+ LOG.error(_LE("Volume %s: create failed"), volume_id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
try:
copy_image_to_volume(context, volume_ref, image_service, image_id)
except processutils.ProcessExecutionError as ex:
- LOG.error(_("Failed to copy image %(image_id)s to volume: "
- "%(volume_id)s, error: %(error)s") %
+ LOG.error(_LE("Failed to copy image %(image_id)s to volume: "
+ "%(volume_id)s, error: %(error)s") %
{'volume_id': volume_id,
'error': ex.stderr, 'image_id': image_id})
raise exception.ImageCopyFailure(reason=ex.stderr)
except exception.ImageUnacceptable as ex:
- LOG.error(_("Failed to copy image to volume: %(volume_id)s, "
- "error: %(error)s") % {'volume_id': volume_id,
- 'error': ex})
+ LOG.error(_LE("Failed to copy image to volume: %(volume_id)s, "
+ "error: %(error)s") %
+ {'volume_id': volume_id, 'error': ex})
raise exception.ImageUnacceptable(ex)
except Exception as ex:
- LOG.error(_("Failed to copy image %(image_id)s to "
- "volume: %(volume_id)s, error: %(error)s") %
+ LOG.error(_LE("Failed to copy image %(image_id)s to "
+ "volume: %(volume_id)s, error: %(error)s") %
{'volume_id': volume_id, 'error': ex,
'image_id': image_id})
if not isinstance(ex, exception.ImageCopyFailure):
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
- LOG.error(_("Unable to create volume. "
- "Volume driver %s not initialized") % driver_name)
+ LOG.error(_LE("Unable to create volume. "
+ "Volume driver %s not initialized") % driver_name)
# NOTE(flaper87): Set the error status before
# raising any exception.
self.db.volume_update(context, volume_id, dict(status='error'))
raise exception.DriverNotInitialized()
create_type = volume_spec.pop('type', None)
- LOG.info(_("Volume %(volume_id)s: being created as %(create_type)s "
- "with specification: %(volume_spec)s") %
+ LOG.info(_LI("Volume %(volume_id)s: being created as %(create_type)s "
+ "with specification: %(volume_spec)s") %
{'volume_spec': volume_spec, 'volume_id': volume_id,
'create_type': create_type})
if create_type == 'raw':
from cinder import exception
from cinder import flow_utils
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.volume.flows.api import create_volume as create_api
from cinder.volume.flows import common as flow_common
volume_id = volume_ref['id']
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
- LOG.error(_("Unable to manage existing volume. "
- "Volume driver %s not initialized.") % driver_name)
+ LOG.error(_LE("Unable to manage existing volume. "
+ "Volume driver %s not initialized.") % driver_name)
flow_common.error_out_volume(context, self.db, volume_id,
reason=_("Volume driver %s "
"not initialized.") %
from cinder import context
from cinder import exception
from cinder import flow_utils
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import manager
from cinder.openstack.common import excutils
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
- LOG.warn(_("Driver path %s is deprecated, update your "
- "configuration to the new path."), volume_driver)
+ LOG.warn(_LW("Driver path %s is deprecated, update your "
+ "configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
try:
pool = self.driver.get_pool(volume)
except Exception as err:
- LOG.error(_('Failed to fetch pool name for volume: %s'),
+ LOG.error(_LE('Failed to fetch pool name for volume: %s'),
volume['id'])
LOG.exception(err)
return
None, filters=None)
if len(vol_entries) == 0:
- LOG.info(_("Determined volume DB was empty at startup."))
+ LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
- LOG.info(_("Determined volume DB was not empty at startup."))
+ LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
- LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") %
+ LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
- LOG.error(_("Error encountered during "
- "initialization of driver: %(name)s") %
+ LOG.error(_LE("Error encountered during "
+ "initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception as export_ex:
- LOG.error(_("Failed to re-export volume %s: "
- "setting to error state"), volume['id'])
+ LOG.error(_LE("Failed to re-export volume %s: "
+ "setting to error state"), volume['id'])
LOG.exception(export_ex)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
elif volume['status'] == 'downloading':
- LOG.info(_("volume %s stuck in a downloading state"),
+ LOG.info(_LI("volume %s stuck in a downloading state"),
volume['id'])
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
else:
- LOG.info(_("volume %s: skipping export"), volume['id'])
+ LOG.info(_LI("volume %s: skipping export"), volume['id'])
except Exception as ex:
- LOG.error(_("Error encountered during "
- "re-exporting phase of driver initialization: "
- " %(name)s") %
+ LOG.error(_LE("Error encountered during "
+ "re-exporting phase of driver initialization: "
+ " %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
return
LOG.debug('Resuming any in progress delete operations')
for volume in volumes:
if volume['status'] == 'deleting':
- LOG.info(_('Resuming delete on volume: %s') % volume['id'])
+ LOG.info(_LI('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
request_spec=request_spec,
filter_properties=filter_properties)
except Exception:
- LOG.exception(_("Failed to create manager volume flow"))
+ LOG.exception(_LE("Failed to create manager volume flow"))
raise exception.CinderException(
_("Failed to create manager volume flow."))
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
- LOG.info(_("Tried to delete volume %s, but it no longer exists, "
- "moving on") % (volume_id))
+ LOG.info(_LI("Tried to delete volume %s, but it no longer exists, "
+ "moving on") % (volume_id))
return True
if context.project_id != volume_ref['project_id']:
else:
project_id = context.project_id
- LOG.info(_("volume %s: deleting"), volume_ref['id'])
+ LOG.info(_LI("volume %s: deleting"), volume_ref['id'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
else:
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
- LOG.error(_("Cannot delete volume %s: volume is busy"),
+ LOG.error(_LE("Cannot delete volume %s: volume is busy"),
volume_ref['id'])
self.db.volume_update(context, volume_ref['id'],
{'status': 'available'})
**reserve_opts)
except Exception:
reservations = None
- LOG.exception(_("Failed to update usages deleting volume"))
+ LOG.exception(_LE("Failed to update usages deleting volume"))
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
- LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
+ LOG.info(_LI("volume %s: deleted successfully"), volume_ref['id'])
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
- LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
+ LOG.info(_LI("snapshot %s: creating"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "create.start")
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_ref['id'], volume_id)
except exception.CinderException as ex:
- LOG.exception(_("Failed updating %(snapshot_id)s"
- " metadata using the provided volumes"
- " %(volume_id)s metadata") %
+ LOG.exception(_LE("Failed updating %(snapshot_id)s"
+ " metadata using the provided volumes"
+ " %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
self.db.snapshot_update(context,
{'status': 'available',
'progress': '100%'})
- LOG.info(_("snapshot %s: created successfully"), snapshot_ref['id'])
+ LOG.info(_LI("snapshot %s: created successfully"), snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
return snapshot_id
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
project_id = snapshot_ref['project_id']
- LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
+ LOG.info(_LI("snapshot %s: deleting"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "delete.start")
self.driver.delete_snapshot(snapshot_ref)
except exception.SnapshotIsBusy:
- LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
+ LOG.error(_LE("Cannot delete snapshot %s: snapshot is busy"),
snapshot_ref['id'])
self.db.snapshot_update(context,
snapshot_ref['id'],
**reserve_opts)
except Exception:
reservations = None
- LOG.exception(_("Failed to update usages deleting snapshot"))
+ LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
self.db.snapshot_destroy(context, snapshot_id)
- LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id'])
+ LOG.info(_LI("snapshot %s: deleted successfully"), snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")
# Commit the reservations
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Error detaching volume %(volume)s, "
- "due to uninitialized driver."),
+ LOG.exception(_LE("Error detaching volume %(volume)s, "
+ "due to uninitialized driver."),
{"volume": volume_id})
except Exception as ex:
- LOG.exception(_("Error detaching volume %(volume)s, "
- "due to remove export failure."),
+ LOG.exception(_LE("Error detaching volume %(volume)s, "
+ "due to remove export failure."),
{"volume": volume_id})
raise exception.RemoveExportException(volume=volume_id, reason=ex)
"image (%(image_id)s) successfully",
{'volume_id': volume_id, 'image_id': image_id})
except Exception as error:
- LOG.error(_("Error occurred while uploading volume %(volume_id)s "
- "to image %(image_id)s."),
+ LOG.error(_LE("Error occurred while uploading "
+ "volume %(volume_id)s "
+ "to image %(image_id)s."),
{'volume_id': volume_id, 'image_id': image_meta['id']})
if image_service is not None:
# Deletes the image if it is in queued or saving state
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
- LOG.warn("Deleting image %(image_id)s in %(image_status)s "
- "state.",
+ LOG.warn(_LW("Deleting image %(image_id)s in %(image_status)s "
+ "state."),
{'image_id': image_id,
'image_status': image_status})
image_service.delete(context, image_id)
except Exception:
- LOG.warn(_("Error occurred while deleting image %s."),
+ LOG.warn(_LW("Error occurred while deleting image %s."),
image_id, exc_info=True)
def initialize_connection(self, context, volume_id, connector):
volume_id,
model_update)
except exception.CinderException as ex:
- LOG.exception(_("Failed updating model of volume %(volume_id)s"
- " with driver provided model %(model)s") %
+ LOG.exception(_LE("Failed updating model of volume %(volume_id)s"
+ " with driver provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update})
raise exception.ExportFailure(reason=ex)
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed updating model of "
- "volume %(volume_id)s "
- "with drivers update %(model)s "
- "during xfr.") %
+ LOG.exception(_LE("Failed updating model of "
+ "volume %(volume_id)s "
+ "with drivers update %(model)s "
+ "during xfr.") %
{'volume_id': volume_id,
'model': model_update})
self.db.volume_update(context.elevated(),
@periodic_task.periodic_task
def _report_driver_status(self, context):
- LOG.info(_("Updating volume status"))
+ LOG.info(_LI("Updating volume status"))
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
- LOG.warning(_('Unable to update stats, %(driver_name)s '
- '-%(driver_version)s '
- '%(config_group)s driver is uninitialized.') %
+ LOG.warning(_LW('Unable to update stats, %(driver_name)s '
+ '-%(driver_version)s '
+ '%(config_group)s driver is uninitialized.') %
{'driver_name': self.driver.__class__.__name__,
'driver_version': self.driver.get_version(),
'config_group': config_group})
self._publish_service_capabilities(context)
def notification(self, context, event):
- LOG.info(_("Notification {%s} received"), event)
+ LOG.info(_LI("Notification {%s} received"), event)
def _notify_about_volume_usage(self,
context,
size_increase = (int(new_size)) - volume['size']
self._notify_about_volume_usage(context, volume, "resize.start")
try:
- LOG.info(_("volume %s: extending"), volume['id'])
+ LOG.info(_LI("volume %s: extending"), volume['id'])
self.driver.extend_volume(volume, new_size)
- LOG.info(_("volume %s: extended successfully"), volume['id'])
+ LOG.info(_LI("volume %s: extended successfully"), volume['id'])
except Exception:
- LOG.exception(_("volume %s: Error trying to extend volume"),
+ LOG.exception(_LE("volume %s: Error trying to extend volume"),
volume_id)
try:
self.db.volume_update(context, volume['id'],
except Exception:
old_reservations = None
self.db.volume_update(context, volume_id, status_update)
- LOG.exception(_("Failed to update usages while retyping volume."))
+ LOG.exception(_LE("Failed to update usages "
+ "while retyping volume."))
raise exception.CinderException(_("Failed to get old volume type"
" quota reservations"))
retyped = ret
if retyped:
- LOG.info(_("Volume %s: retyped successfully"), volume_id)
+ LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
except Exception as ex:
retyped = False
- LOG.error(_("Volume %s: driver error when trying to retype, "
- "falling back to generic mechanism."),
+ LOG.error(_LE("Volume %s: driver error when trying to retype, "
+ "falling back to generic mechanism."),
volume_ref['id'])
LOG.exception(ex)
volume_id,
ref)
except Exception:
- LOG.exception(_("Failed to create manage_existing flow."))
+ LOG.exception(_LE("Failed to create manage_existing flow."))
raise exception.CinderException(
_("Failed to create manage existing flow."))
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to promote replica for volume %(id)s.")
+ LOG.exception(_LE("Failed to promote replica "
+ "for volume %(id)s.")
% {'id': volume_id})
volume = self.db.volume_get(ctxt, volume_id)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to sync replica for volume %(id)s.")
+ LOG.exception(_LE("Failed to sync replica for volume %(id)s.")
% {'id': volume_id})
volume = self.db.volume_get(ctxt, volume_id)
@periodic_task.periodic_task
def _update_replication_relationship_status(self, ctxt):
- LOG.info(_('Updating volume replication status.'))
+ LOG.info(_LI('Updating volume replication status.'))
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
- LOG.warning(_('Unable to update volume replication status, '
- '%(driver_name)s -%(driver_version)s '
- '%(config_group)s driver is uninitialized.') %
+ LOG.warning(_LW('Unable to update volume replication status, '
+ '%(driver_name)s -%(driver_version)s '
+ '%(config_group)s driver is uninitialized.') %
{'driver_name': self.driver.__class__.__name__,
'driver_version': self.driver.get_version(),
'config_group': config_group})
vol['id'],
model_update)
except Exception:
- LOG.exception(_("Error checking replication status for "
- "volume %s") % vol['id'])
+ LOG.exception(_LE("Error checking replication status for "
+ "volume %s") % vol['id'])
def create_consistencygroup(self, context, group_id):
"""Creates the consistency group."""
try:
utils.require_driver_initialized(self.driver)
- LOG.info(_("Consistency group %s: creating"), group_ref['name'])
+ LOG.info(_LI("Consistency group %s: creating"), group_ref['name'])
model_update = self.driver.create_consistencygroup(context,
group_ref)
context,
group_ref['id'],
{'status': 'error'})
- LOG.error(_("Consistency group %s: create failed"),
+ LOG.error(_LE("Consistency group %s: create failed"),
group_ref['name'])
now = timeutils.utcnow()
group_ref['id'],
{'status': status,
'created_at': now})
- LOG.info(_("Consistency group %s: created successfully"),
+ LOG.info(_LI("Consistency group %s: created successfully"),
group_ref['name'])
self._notify_about_consistencygroup_usage(
else:
project_id = context.project_id
- LOG.info(_("Consistency group %s: deleting"), group_ref['id'])
+ LOG.info(_LI("Consistency group %s: deleting"), group_ref['id'])
volumes = self.db.volume_get_all_by_group(context, group_id)
**reserve_opts)
except Exception:
cgreservations = None
- LOG.exception(_("Failed to update usages deleting "
- "consistency groups."))
+ LOG.exception(_LE("Failed to update usages deleting "
+ "consistency groups."))
for volume_ref in volumes:
# Get reservations for volume
**reserve_opts)
except Exception:
reservations = None
- LOG.exception(_("Failed to update usages deleting volume."))
+ LOG.exception(_LE("Failed to update usages deleting volume."))
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
project_id=project_id)
self.db.consistencygroup_destroy(context, group_id)
- LOG.info(_("Consistency group %s: deleted successfully."),
+ LOG.info(_LI("Consistency group %s: deleted successfully."),
group_id)
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.end")
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
- LOG.info(_("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
+ LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot['id'], volume_id)
except exception.CinderException as ex:
- LOG.error(_("Failed updating %(snapshot_id)s"
- " metadata using the provided volumes"
- " %(volume_id)s metadata") %
+ LOG.error(_LE("Failed updating %(snapshot_id)s"
+ " metadata using the provided volumes"
+ " %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
self.db.snapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'available'})
- LOG.info(_("cgsnapshot %s: created successfully"),
+ LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.end")
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
project_id = cgsnapshot_ref['project_id']
- LOG.info(_("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
+ LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
except Exception:
reservations = None
- LOG.exception(_("Failed to update usages deleting snapshot"))
+ LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
QUOTAS.commit(context, reservations, project_id=project_id)
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
- LOG.info(_("cgsnapshot %s: deleted successfully"),
+ LOG.info(_LI("cgsnapshot %s: deleted successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.end")