import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.drivers.emc import emc_vmax_fast
from cinder.volume.drivers.emc import emc_vmax_masking
def __init__(self, prtcl, configuration=None):
if not pywbemAvailable:
- LOG.info(_(
+ LOG.info(_LI(
'Module PyWBEM not installed. '
'Install PyWBEM using the python-pywbem package.'))
# add the volume to the default storage group created for
# volumes in pools associated with this fast policy
if extraSpecs[FASTPOLICY]:
- LOG.info(_("Adding volume: %(volumeName)s to default storage group"
- " for FAST policy: %(fastPolicyName)s ")
- % {'volumeName': volumeName,
- 'fastPolicyName': extraSpecs[FASTPOLICY]})
+ LOG.info(_LI("Adding volume: %(volumeName)s to "
+ "default storage group "
+ "for FAST policy: %(fastPolicyName)s "),
+ {'volumeName': volumeName,
+ 'fastPolicyName': extraSpecs[FASTPOLICY]})
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, volumeDict,
volumeDict, volumeName, storageConfigService,
storageSystemName, extraSpecs[FASTPOLICY])
- LOG.info(_("Leaving create_volume: %(volumeName)s "
- "Return code: %(rc)lu "
- "volume dict: %(name)s")
+ LOG.info(_LI("Leaving create_volume: %(volumeName)s "
+ "Return code: %(rc)lu "
+ "volume dict: %(name)s")
% {'volumeName': volumeName,
'rc': rc,
'name': volumeDict})
:param volume: volume Object
"""
- LOG.info(_("Deleting Volume: %(volume)s")
+ LOG.info(_LI("Deleting Volume: %(volume)s")
% {'volume': volume['name']})
rc, volumeName = self._delete_volume(volume)
- LOG.info(_("Leaving delete_volume: %(volumename)s Return code: "
- "%(rc)lu")
+ LOG.info(_LI("Leaving delete_volume: %(volumename)s Return code: "
+ "%(rc)lu")
% {'volumename': volumeName,
'rc': rc})
:param snapshot: snapshot object
:param volume: volume Object to create snapshot from
"""
- LOG.info(_("Delete Snapshot: %(snapshotName)s ")
+ LOG.info(_LI("Delete Snapshot: %(snapshotName)s ")
% {'snapshotName': snapshot['name']})
rc, snapshotName = self._delete_volume(snapshot)
LOG.debug("Leaving delete_snapshot: %(snapshotname)s Return code: "
"""
extraSpecs = self._initial_setup(volume)
volumename = volume['name']
- LOG.info(_("Unmap volume: %(volume)s")
+ LOG.info(_LI("Unmap volume: %(volume)s")
% {'volume': volumename})
device_info = self.find_device_number(volume, connector)
device_number = device_info['hostlunid']
if device_number is None:
- LOG.info(_("Volume %s is not mapped. No volume to unmap.")
+ LOG.info(_LI("Volume %s is not mapped. No volume to unmap.")
% (volumename))
return
extraSpecs = self._initial_setup(volume)
volumeName = volume['name']
- LOG.info(_("Initialize connection: %(volume)s")
+ LOG.info(_LI("Initialize connection: %(volume)s")
% {'volume': volumeName})
self.conn = self._get_ecom_connection()
deviceInfoDict = self._wrap_find_device_number(volume, connector)
deviceInfoDict['hostlunid'] is not None):
# Device is already mapped so we will leave the state as is
deviceNumber = deviceInfoDict['hostlunid']
- LOG.info(_("Volume %(volume)s is already mapped. "
- "The device number is %(deviceNumber)s ")
+ LOG.info(_LI("Volume %(volume)s is already mapped. "
+ "The device number is %(deviceNumber)s ")
% {'volume': volumeName,
'deviceNumber': deviceNumber})
else:
if 'hostlunid' not in deviceInfoDict:
# Did not successfully attach to host,
# so a rollback for FAST is required
- LOG.error(_("Error Attaching volume %(vol)s ")
+ LOG.error(_LE("Error Attaching volume %(vol)s ")
% {'vol': volumeName})
if rollbackDict['fastPolicyName'] is not None:
(
self._initial_setup(volume)
volumename = volume['name']
- LOG.info(_("Terminate connection: %(volume)s")
+ LOG.info(_LI("Terminate connection: %(volume)s")
% {'volume': volumename})
self.conn = self._get_ecom_connection()
poolName = self.utils.parse_pool_name_from_file(emcConfigFileName)
if poolName is None:
- LOG.error(_(
+ LOG.error(_LE(
"PoolName %(poolName)s must be in the file "
"%(emcConfigFileName)s ")
% {'poolName': poolName,
'emcConfigFileName': emcConfigFileName})
arrayName = self.utils.parse_array_name_from_file(emcConfigFileName)
if arrayName is None:
- LOG.error(_(
+ LOG.error(_LE(
"Array Serial Number %(arrayName)s must be in the file "
"%(emcConfigFileName)s ")
% {'arrayName': arrayName,
total_capacity_gb, free_capacity_gb = (
self.fast.get_capacities_associated_to_policy(
self.conn, arrayName, fastPolicyName))
- LOG.info(
+ LOG.info(_LI(
"FAST: capacity stats for policy %(fastPolicyName)s on "
"array %(arrayName)s (total_capacity_gb=%(total_capacity_gb)lu"
- ", free_capacity_gb=%(free_capacity_gb)lu"
+ ", free_capacity_gb=%(free_capacity_gb)lu")
% {'fastPolicyName': fastPolicyName,
'arrayName': arrayName,
'total_capacity_gb': total_capacity_gb,
else: # NON-FAST
total_capacity_gb, free_capacity_gb = (
self.utils.get_pool_capacities(self.conn, poolName, arrayName))
- LOG.info(
+ LOG.info(_LI(
"NON-FAST: capacity stats for pool %(poolName)s on array "
"%(arrayName)s (total_capacity_gb=%(total_capacity_gb)lu, "
- "free_capacity_gb=%(free_capacity_gb)lu"
+ "free_capacity_gb=%(free_capacity_gb)lu")
% {'poolName': poolName,
'arrayName': arrayName,
'total_capacity_gb': total_capacity_gb,
volumeName = volume['name']
volumeStatus = volume['status']
- LOG.info(_("Migrating using retype Volume: %(volume)s")
+ LOG.info(_LI("Migrating using retype Volume: %(volume)s")
% {'volume': volumeName})
extraSpecs = self._initial_setup(volume)
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
- LOG.error(_("Volume %(name)s not found on the array. "
- "No volume to migrate using retype.")
+ LOG.error(_LE("Volume %(name)s not found on the array. "
+ "No volume to migrate using retype.")
% {'name': volumeName})
return False
volumeName, volumeStatus))
if not isValid:
- LOG.error(_("Volume %(name)s is not suitable for storage "
- "assisted migration using retype")
+ LOG.error(_LE("Volume %(name)s is not suitable for storage "
+ "assisted migration using retype")
% {'name': volumeName})
return False
if volume['host'] != host['host']:
:returns: boolean True/False
:returns: list
"""
- LOG.warn(_("The VMAX plugin only supports Retype. "
- "If a pool based migration is necessary "
- "this will happen on a Retype "
- "From the command line: "
- "cinder --os-volume-api-version 2 retype "
- "<volumeId> <volumeType> --migration-policy on-demand"))
+ LOG.warn(_LW("The VMAX plugin only supports Retype. "
+ "If a pool based migration is necessary "
+ "this will happen on a Retype "
+ "From the command line: "
+ "cinder --os-volume-api-version 2 retype "
+ "<volumeId> <volumeType> --migration-policy on-demand"))
return True, {}
def _migrate_volume(
if moved is False and sourceFastPolicyName is not None:
# Return the volume to the default source fast policy storage
# group because the migrate was unsuccessful
- LOG.warn(_("Failed to migrate: %(volumeName)s from "
- "default source storage group "
- "for FAST policy: %(sourceFastPolicyName)s "
- "Attempting cleanup... ")
+ LOG.warn(_LW("Failed to migrate: %(volumeName)s from "
+ "default source storage group "
+ "for FAST policy: %(sourceFastPolicyName)s "
+ "Attempting cleanup... ")
% {'volumeName': volumeName,
'sourceFastPolicyName': sourceFastPolicyName})
if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume(
if not self._migrate_volume_fast_target(
volumeInstance, storageSystemName,
targetFastPolicyName, volumeName):
- LOG.warn(_("Attempting a rollback of: %(volumeName)s to "
- "original pool %(sourcePoolInstanceName)s ")
+ LOG.warn(_LW("Attempting a rollback of: %(volumeName)s to "
+ "original pool %(sourcePoolInstanceName)s ")
% {'volumeName': volumeName,
'sourcePoolInstanceName': sourcePoolInstanceName})
self._migrate_rollback(
:returns: int, the return code from migrate operation
"""
- LOG.warn(_("_migrate_rollback on : %(volumeName)s from ")
+ LOG.warn(_LW("_migrate_rollback on : %(volumeName)s from ")
% {'volumeName': volumeName})
storageRelocationService = self.utils.find_storage_relocation_service(
:returns: int, the return code from migrate operation
"""
- LOG.warn(_("_migrate_cleanup on : %(volumeName)s from ")
+ LOG.warn(_LW("_migrate_cleanup on : %(volumeName)s from ")
% {'volumeName': volumeName})
controllerConfigurationService = (
:returns: boolean True/False
"""
falseRet = False
- LOG.info(_("Adding volume: %(volumeName)s to default storage group "
- "for FAST policy: %(fastPolicyName)s ")
+ LOG.info(_LI("Adding volume: %(volumeName)s to default storage group "
+ "for FAST policy: %(fastPolicyName)s ")
% {'volumeName': volumeName,
'fastPolicyName': targetFastPolicyName})
except Exception as e:
# rollback by deleting the volume if adding the volume to the
# default storage group were to fail
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
exceptionMessage = (_("Error migrating volume: %(volumename)s. "
"to target pool %(targetPoolName)s. ")
% {'volumename': volumeName,
conn, controllerConfigurationService,
volumeInstance.path, volumeName, sourceFastPolicyName))
except Exception as ex:
- LOG.error(_("Exception: %s") % six.text_type(ex))
+ LOG.error(_LE("Exception: %s") % six.text_type(ex))
exceptionMessage = (_("Failed to remove: %(volumename)s. "
"from the default storage group for "
"FAST policy %(fastPolicyName)s. ")
"""
falseRet = (False, None, None)
if 'location_info' not in host['capabilities']:
- LOG.error(_('Error getting target pool name and array'))
+ LOG.error(_LE('Error getting target pool name and array'))
return falseRet
info = host['capabilities']['location_info']
targetPoolName = infoDetail[1]
targetFastPolicy = infoDetail[2]
except Exception:
- LOG.error(_("Error parsing target pool name, array, "
- "and fast policy"))
+ LOG.error(_LE("Error parsing target pool name, array, "
+ "and fast policy"))
if targetArraySerialNumber not in sourceArraySerialNumber:
errorMessage = (_(
rc, targetEndpoints = self.provision.get_target_endpoints(
self.conn, storageHardwareService, hardwareIdInstance)
except Exception as ex:
- LOG.error(_("Exception: %s") % six.text_type(ex))
+ LOG.error(_LE("Exception: %s") % six.text_type(ex))
errorMessage = (_(
"Unable to get target endpoints for hardwareId "
"%(hardwareIdInstance)s")
if not any(d == wwn for d in targetWwns):
targetWwns.append(wwn)
else:
- LOG.error(_(
+ LOG.error(_LE(
"Target end points do not exist for hardware Id : "
"%(hardwareIdInstance)s ")
% {'hardwareIdInstance': hardwareIdInstance})
except Exception as e:
# rollback by deleting the volume if adding the volume to the
# default storage group were to fail
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
errorMessage = (_(
"Rolling back %(volumeName)s by deleting it. ")
% {'volumeName': volumeName})
sourceName = sourceVolume['name']
cloneName = cloneVolume['name']
- LOG.info(_("Create a Clone from Volume: Clone Volume: %(cloneName)s "
- "Source Volume: %(sourceName)s")
+ LOG.info(_LI("Create a Clone from Volume: Clone "
+ "Volume: %(cloneName)s "
+ "Source Volume: %(sourceName)s")
% {'cloneName': cloneName,
'sourceName': sourceName})
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
- LOG.error(_("Volume %(name)s not found on the array. "
- "No volume to delete.")
+ LOG.error(_LE("Volume %(name)s not found on the array. "
+ "No volume to delete.")
% {'name': volumeName})
return errorRet
'fastPolicyName': fastPolicyName})
LOG.error(errorMsg)
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
errorMessage = (_("Failed to delete volume %(volumeName)s")
% {'volumeName': volumeName})
LOG.error(errorMessage)
self.masking.get_associated_masking_group_from_device(
self.conn, volumeInstanceName))
if storageGroupInstanceName is not None:
- LOG.warn(_("Pre check for deletion "
- "Volume: %(volumeName)s is part of a storage group "
- "Attempting removal from %(storageGroupInstanceName)s ")
+ LOG.warn(_LW("Pre check for deletion "
+ "Volume: %(volumeName)s is part of a storage group "
+ "Attempting removal "
+ "from %(storageGroupInstanceName)s ")
% {'volumeName': volumeName,
'storageGroupInstanceName': storageGroupInstanceName})
self.provision.remove_device_from_storage_group(
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.drivers.emc import emc_vmax_fast
from cinder.volume.drivers.emc import emc_vmax_provision
if self._is_volume_in_storage_group(
conn, storageGroupInstanceName,
volumeInstance):
- LOG.warn(_(
+ LOG.warn(_LW(
"Volume: %(volumeName)s is already part "
"of storage group %(sgGroupName)s ")
% {'volumeName': volumeName,
conn, controllerConfigService, volumeInstance, volumeName,
fastPolicyName, defaultStorageGroupInstanceName)
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
errorMessage = (_(
"Failed to get or create masking view %(maskingViewName)s ")
% {'maskingViewName': maskingViewName})
% {'foundElement': foundStorageGroupInstance['ElementName']})
if (foundStorageGroupInstance['ElementName'] == (
storageGroupInstance['ElementName'])):
- LOG.warn(_(
+ LOG.warn(_LW(
"The volume is already part of storage group: "
"%(storageGroupInstanceName)s. ")
% {'storageGroupInstanceName': storageGroupInstanceName})
conn, controllerConfigService, storageGroupName,
volumeInstance.path))
if foundStorageGroupInstanceName is None:
- LOG.error(_(
+ LOG.error(_LE(
"Cannot get storage Group from job : %(storageGroupName)s. ")
% {'storageGroupName': storageGroupName})
return failedRet
else:
- LOG.info(_(
+ LOG.info(_LI(
"Created new storage group: %(storageGroupName)s ")
% {'storageGroupName': storageGroupName})
foundStorageGroupInstanceName,
storageGroupName, fastPolicyName))
if assocTierPolicyInstanceName is None:
- LOG.error(_(
+ LOG.error(_LE(
"Cannot add and verify tier policy association for storage"
" group : %(storageGroupName)s to FAST policy : "
"%(fastPolicyName)s. ")
break
if foundPortGroupInstanceName is None:
- LOG.error(_(
+ LOG.error(_LE(
"Could not find port group : %(portGroupName)s. Check that the"
" EMC configuration file has the correct port group name. ")
% {'portGroupName': portGroupName})
self._get_storage_hardware_id_instance_names(
conn, initiatorNames, storageSystemName))
if not storageHardwareIDInstanceNames:
- LOG.error(_(
+ LOG.error(_LE(
"Initiator Name(s) %(initiatorNames)s are not on array "
"%(storageSystemName)s ")
% {'initiatorNames': initiatorNames,
conn, controllerConfigService, igGroupName,
storageHardwareIDInstanceNames)
- LOG.info("Created new initiator group name: %(igGroupName)s "
+ LOG.info(_LI("Created new initiator group name: %(igGroupName)s ")
% {'igGroupName': igGroupName})
else:
- LOG.info("Using existing initiator group name: %(igGroupName)s "
+ LOG.info(_LI("Using existing initiator "
+ "group name: %(igGroupName)s ")
% {'igGroupName': igGroupName})
return foundInitiatorGroupInstanceName
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
- LOG.info(_("Created new masking view : %(maskingViewName)s ")
+ LOG.info(_LI("Created new masking view : %(maskingViewName)s ")
% {'maskingViewName': maskingViewName})
return rc, job
else:
if self._is_volume_in_storage_group(
conn, storageGroupInstanceName, volumeInstance):
- LOG.warn(_("Volume: %(volumeName)s is already "
- "part of storage group %(sgGroupName)s ")
+ LOG.warn(_LW("Volume: %(volumeName)s is already "
+ "part of storage group %(sgGroupName)s ")
% {'volumeName': volumeName,
'sgGroupName': sgGroupName})
else:
LOG.error(errorMessage)
return foundPortGroupInstanceName
- LOG.info(_(
+ LOG.info(_LI(
"Port group instance name is %(foundPortGroupInstanceName)s")
% {'foundPortGroupInstanceName': foundPortGroupInstanceName})
conn, controllerConfigService, volumeInstance,
fastPolicyName, volumeName)
except Exception as e:
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
errorMessage = (_(
"Rollback for Volume: %(volumeName)s has failed. "
"Please contact your system administrator to manually return "
self._get_storage_hardware_id_instance_names(
conn, initiatorNames, storageSystemName))
if not storageHardwareIDInstanceNames:
- LOG.error(_(
+ LOG.error(_LE(
"Initiator Name(s) %(initiatorNames)s are not on "
"array %(storageSystemName)s ")
% {'initiatorNames': initiatorNames,
"%(maskingViewName)s. "
% {'maskingViewName': maskingViewName})
else:
- LOG.error(_(
+ LOG.error(_LE(
"One of the components of the original masking view "
"%(maskingViewName)s cannot be retrieved so "
"please contact your system administrator to check "
tierPolicyInstanceName = self.fast.get_tier_policy_by_name(
conn, storageSystemInstanceName['Name'], fastPolicyName)
- LOG.info(_(
+ LOG.info(_LI(
"policy:%(policy)s, policy service:%(service)s, "
"masking group=%(maskingGroup)s")
% {'policy': tierPolicyInstanceName,
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.volume.drivers.emc import emc_vmax_utils
rc = self._terminate_migrate_session(
conn, volumeInstanceName)
except Exception as ex:
- LOG.error(_("Exception: %s") % six.text_type(ex))
+ LOG.error(_LE("Exception: %s") % six.text_type(ex))
exceptionMessage = (_(
"Failed to terminate migrate session"))
LOG.error(exceptionMessage)
conn, storageRelocationServiceInstanceName,
volumeInstanceName, targetPoolInstanceName)
except Exception as ex:
- LOG.error(_("Exception: %s") % six.text_type(ex))
+ LOG.error(_LE("Exception: %s") % six.text_type(ex))
exceptionMessage = (_(
"Failed to migrate volume for the second time"))
LOG.error(exceptionMessage)
data=exceptionMessage)
else:
- LOG.error(_("Exception: %s") % six.text_type(ex))
+ LOG.error(_LE("Exception: %s") % six.text_type(ex))
exceptionMessage = (_(
"Failed to migrate volume for the first time"))
LOG.error(exceptionMessage)
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder import utils
"""Read an xml element."""
try:
val = root.findtext(element)
- LOG.info(_("%(element)s: %(val)s")
+ LOG.info(_LI("%(element)s: %(val)s")
% {'element': element,
'val': val})
if val:
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
- LOG.error(_("XML exception reading parameter: %s") % element)
+ LOG.error(_LE("XML exception reading parameter: %s") % element)
else:
- LOG.info(_("XML exception reading parameter: %s") % element)
+ LOG.info(_LI("XML exception reading parameter: %s") % element)
return None
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
svc['port'], svc['hdp']) # ip, ipp, ctl, port, hdp
else:
- LOG.error(_("No configuration found for service: %s") % label)
+ LOG.error(_LE("No configuration found for service: %s") % label)
raise exception.ParameterNotFound(param=label)
return service
lst.extend([self.config['snapshot_hdp'], ])
for hdp in lst:
if hdp not in hdpl:
- LOG.error(_("HDP not found: %s") % hdp)
+ LOG.error(_LE("HDP not found: %s") % hdp)
err = "HDP not found: " + hdp
raise exception.ParameterNotFound(param=err)
self.config['services'][svc]['iscsi_port'] = (
iscsi_info[svc_ip]['iscsi_port'])
else: # config iscsi address not found on device!
- LOG.error(_("iSCSI portal not found for service: %s") % svc_ip)
+ LOG.error(_LE("iSCSI portal not found "
+ "for service: %s") % svc_ip)
raise exception.ParameterNotFound(param=svc_ip)
return
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LE, _LI
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
try:
val = root.findtext(element)
- LOG.info(_("%(element)s: %(val)s")
+ LOG.info(_LI("%(element)s: %(val)s")
% {'element': element,
'val': val})
if val:
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
- LOG.error(_("XML exception reading parameter: %s") % element)
+ LOG.error(_LE("XML exception reading "
+ "parameter: %s") % element)
else:
- LOG.info(_("XML exception reading parameter: %s") % element)
+ LOG.info(_LI("XML exception reading parameter: %s") % element)
return None
self.type = 'HNAS'
self.platform = self.type.lower()
- LOG.info(_("Backend type: %s") % self.type)
+ LOG.info(_LI("Backend type: %s") % self.type)
self.bend = factory_bend(self.type)
def _array_info_get(self):
if label not in self.config['services'].keys():
# default works if no match is found
label = 'default'
- LOG.info(_("Using default: instead of %s") % label)
- LOG.info(_("Available services: %s")
+ LOG.info(_LI("Using default: instead of %s") % label)
+ LOG.info(_LI("Available services: %s")
% self.config['services'].keys())
if label in self.config['services'].keys():
if self.config['chap_enabled'] == 'True':
# it may not exist, create and set secret
if 'iscsi_secret' not in svc:
- LOG.info(_("Retrieving secret for service: %s")
+ LOG.info(_LI("Retrieving secret for service: %s")
% label)
out = self.bend.get_targetsecret(self.config['hnas_cmd'],
svc['iscsi_secret'] = ""
if 'iscsi_target' not in svc:
- LOG.info(_("Retrieving target for service: %s") % label)
+ LOG.info(_LI("Retrieving target for service: %s") % label)
out = self.bend.get_targetiqn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
svc['port'], svc['hdp'], svc['iscsi_target'],
svc['iscsi_secret'])
else:
- LOG.info(_("Available services: %s")
+ LOG.info(_LI("Available services: %s")
% self.config['services'].keys())
- LOG.error(_("No configuration found for service: %s")
+ LOG.error(_LE("No configuration found for service: %s")
% label)
raise exception.ParameterNotFound(param=label)
hnas_stat['QoS_support'] = False
hnas_stat['reserved_percentage'] = 0
- LOG.info(_("stats: stats: %s") % hnas_stat)
+ LOG.info(_LI("stats: stats: %s") % hnas_stat)
return hnas_stat
def _get_hdp_list(self):
hdp_list.extend(inf[1:2])
# returns a list of HDP IDs
- LOG.info(_("HDP list: %s") % hdp_list)
+ LOG.info(_LI("HDP list: %s") % hdp_list)
return hdp_list
def _check_hdp_list(self):
for hdp in lst:
if hdp not in hdpl:
- LOG.error(_("HDP not found: %s") % hdp)
+ LOG.error(_LE("HDP not found: %s") % hdp)
err = "HDP not found: " + hdp
raise exception.ParameterNotFound(param=err)
# status, verify corresponding status is Normal
self._check_hdp_list()
iscsi_info = self._get_iscsi_info()
- LOG.info(_("do_setup: %s") % iscsi_info)
+ LOG.info(_LI("do_setup: %s") % iscsi_info)
for svc in self.config['services'].keys():
svc_ip = self.config['services'][svc]['iscsi_ip']
if svc_ip in iscsi_info.keys():
- LOG.info(_("iSCSI portal found for service: %s") % svc_ip)
+ LOG.info(_LI("iSCSI portal found for service: %s") % svc_ip)
self.config['services'][svc]['port'] = \
iscsi_info[svc_ip]['port']
self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl']
self.config['services'][svc]['iscsi_port'] = \
iscsi_info[svc_ip]['iscsi_port']
else: # config iscsi address not found on device!
- LOG.error(_("iSCSI portal not found for service: %s") % svc_ip)
+ LOG.error(_LE("iSCSI portal not found "
+ "for service: %s") % svc_ip)
raise exception.ParameterNotFound(param=svc_ip)
def ensure_export(self, context, volume):
'%s' % (int(volume['size']) * units.Ki),
volume['name'])
- LOG.info(_("create_volume: create_lu returns %s") % out)
+ LOG.info(_LI("create_volume: create_lu returns %s") % out)
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
# Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd
- LOG.info(_("LUN %(lun)s of size %(sz)s MB is created.")
+ LOG.info(_LI("LUN %(lun)s of size %(sz)s MB is created.")
% {'lun': lun, 'sz': sz})
return {'provider_location': lun}
'%s' % (new_size * units.Ki),
volume['name'])
- LOG.info(_("LUN %(lun)s extended to %(size)s GB.")
+ LOG.info(_LI("LUN %(lun)s extended to %(size)s GB.")
% {'lun': lun, 'size': new_size})
def delete_volume(self, volume):
myid = self.arid
if arid != myid:
- LOG.error(_('Array mismatch %(myid)s vs %(arid)s')
+ LOG.error(_LE('Array mismatch %(myid)s vs %(arid)s')
% {'myid': myid,
'arid': arid})
msg = 'Array id mismatch in delete snapshot'
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
from cinder.image import image_utils
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
try:
val = root.findtext(element)
- LOG.info(_("%(element)s: %(val)s")
+ LOG.info(_LI("%(element)s: %(val)s")
% {'element': element,
'val': val})
if val:
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
- LOG.error(_("XML exception reading parameter: %s") % element)
+ LOG.error(_LE("XML exception reading parameter: %s") % element)
else:
- LOG.info(_("XML exception reading parameter: %s") % element)
+ LOG.info(_LI("XML exception reading parameter: %s") % element)
return None
LOG.info("Get service: %s->%s" % (label, svc['fslabel']))
service = (svc['hdp'], svc['path'], svc['fslabel'])
else:
- LOG.info(_("Available services: %s")
+ LOG.info(_LI("Available services: %s")
% self.config['services'].keys())
- LOG.error(_("No configuration found for service: %s") % label)
+ LOG.error(_LE("No configuration found for service: %s") % label)
raise exception.ParameterNotFound(param=label)
return service
if self._is_file_size_equal(path, new_size):
return
else:
- LOG.info(_('Resizing file to %sG'), new_size)
+ LOG.info(_LI('Resizing file to %sG'), new_size)
image_utils.resize_image(path, new_size)
if self._is_file_size_equal(path, new_size):
- LOG.info(_("LUN %(id)s extended to %(size)s GB.")
+ LOG.info(_LI("LUN %(id)s extended to %(size)s GB.")
% {'id': volume['id'], 'size': new_size})
return
else:
tries += 1
if tries >= self.configuration.num_shell_tries:
raise
- LOG.exception(_("Recovering from a failed execute. "
- "Try number %s"), tries)
+ LOG.exception(_LE("Recovering from a failed execute. "
+ "Try number %s"), tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
ip = utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
- LOG.error(_('Error resolving host %(host)s. Error - %(e)s.')
+ LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
% {'host': host, 'e': e})
return None
msg = _('Controller ips not valid after resolution.')
raise exception.NoValidHost(reason=msg)
if host in ips:
- LOG.info(_('Embedded mode detected.'))
+ LOG.info(_LI('Embedded mode detected.'))
system = self._client.list_storage_systems()[0]
else:
- LOG.info(_('Proxy mode detected.'))
+ LOG.info(_LI('Proxy mode detected.'))
system = self._client.register_storage_system(
ips, password=self.configuration.netapp_sa_password)
self._client.set_system_id(system.get('id'))
# password was not in sync previously.
if ((status == 'nevercontacted') or
(password_not_in_sync and status == 'passwordoutofsync')):
- LOG.info(_('Waiting for web service array communication.'))
+ LOG.info(_LI('Waiting for web service array communication.'))
time.sleep(self.SLEEP_SECS)
comm_time = comm_time + self.SLEEP_SECS
if comm_time >= sa_comm_timeout:
status == 'offline'):
msg = _("System %(id)s found with bad status - %(status)s.")
raise exception.NetAppDriverException(msg % msg_dict)
- LOG.info(_("System %(id)s has %(status)s status.") % msg_dict)
+ LOG.info(_LI("System %(id)s has %(status)s status.") % msg_dict)
return True
def _populate_system_objects(self):
def _cache_allowed_disk_pool_refs(self):
"""Caches disk pools refs as per pools configured by user."""
d_pools = self.configuration.netapp_storage_pools
- LOG.info(_('Configured storage pools %s.'), d_pools)
+ LOG.info(_LI('Configured storage pools %s.'), d_pools)
pools = [x.strip().lower() if x else None for x in d_pools.split(',')]
for pool in self._client.list_storage_pools():
if (pool.get('raidLevel') == 'raidDiskPool'
try:
vol = self._client.create_volume(target_pool['volumeGroupRef'],
eseries_volume_label, size_gb)
- LOG.info(_("Created volume with label %s."), eseries_volume_label)
+ LOG.info(_LI("Created volume with "
+ "label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error creating volume. Msg - %s."),
+ LOG.error(_LE("Error creating volume. Msg - %s."),
six.text_type(e))
return vol
try:
vol = self._client.create_volume(pool['volumeGroupRef'],
label, size_gb)
- LOG.info(_("Created volume with label %s."), label)
+ LOG.info(_LI("Created volume with label %s."), label)
return vol
except exception.NetAppDriverException as e:
- LOG.error(_("Error creating volume. Msg - %s."), e)
+ LOG.error(_LE("Error creating volume. Msg - %s."), e)
msg = _("Failure creating volume %s.")
raise exception.NetAppDriverException(msg % label)
src_vol = self._create_snapshot_volume(snapshot['id'])
self._copy_volume_high_prior_readonly(src_vol, dst_vol)
self._cache_volume(dst_vol)
- LOG.info(_("Created volume with label %s."), label)
+ LOG.info(_LI("Created volume with label %s."), label)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
self._client.delete_volume(dst_vol['volumeRef'])
try:
self._client.delete_snapshot_volume(src_vol['id'])
except exception.NetAppDriverException as e:
- LOG.error(_("Failure deleting snap vol. Error: %s."), e)
+ LOG.error(_LE("Failure deleting snap vol. Error: %s."), e)
else:
- LOG.warn(_("Snapshot volume not found."))
+ LOG.warn(_LW("Snapshot volume not found."))
def _create_snapshot_volume(self, snapshot_id):
"""Creates snapshot volume for given group with snapshot_id."""
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
- LOG.info(_("Copying src vol %(src)s to dest vol %(dst)s.")
+ LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
% {'src': src_vol['label'], 'dst': dst_vol['label']})
try:
job = None
time.sleep(self.SLEEP_SECS)
continue
if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
- LOG.error(_("Vol copy job status %s."), j_st['status'])
+ LOG.error(_LE("Vol copy job status %s."), j_st['status'])
msg = _("Vol copy job for dest %s failed.")\
% dst_vol['label']
raise exception.NetAppDriverException(msg)
- LOG.info(_("Vol copy job completed for dest %s.")
+ LOG.info(_LI("Vol copy job completed for dest %s.")
% dst_vol['label'])
break
finally:
try:
self._client.delete_vol_copy_job(job['volcopyRef'])
except exception.NetAppDriverException:
- LOG.warn(_("Failure deleting job %s."), job['volcopyRef'])
+ LOG.warn(_LW("Failure deleting "
+ "job %s."), job['volcopyRef'])
else:
- LOG.warn(_('Volume copy job for src vol %s not found.'),
+ LOG.warn(_LW('Volume copy job for src vol %s not found.'),
src_vol['id'])
- LOG.info(_('Copy job to dest vol %s completed.'), dst_vol['label'])
+ LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
try:
self.delete_snapshot(snapshot)
except exception.NetAppDriverException:
- LOG.warn(_("Failure deleting temp snapshot %s."),
+ LOG.warn(_LW("Failure deleting temp snapshot %s."),
snapshot['id'])
def delete_volume(self, volume):
vol = self._get_volume(volume['id'])
self._delete_volume(vol['label'])
except KeyError:
- LOG.info(_("Volume %s already deleted."), volume['id'])
+ LOG.info(_LI("Volume %s already deleted."), volume['id'])
return
def _delete_volume(self, label):
snap_image = self._client.create_snapshot_image(
snap_grp['pitGroupRef'])
self._cache_snap_img(snap_image)
- LOG.info(_("Created snap grp with label %s."), snapshot_name)
+ LOG.info(_LI("Created snap grp with label %s."), snapshot_name)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
if snap_image is None and snap_grp:
try:
snap_grp = self._get_cached_snapshot_grp(snapshot['id'])
except KeyError:
- LOG.warn(_("Snapshot %s already deleted.") % snapshot['id'])
+ LOG.warn(_LW("Snapshot %s already deleted.") % snapshot['id'])
return
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
snapshot_name = snap_grp['label']
LOG.warn(msg % {'l': host['label'], 'e': e.msg})
return host
except exception.NotFound as e:
- LOG.warn(_("Message - %s."), e.msg)
+ LOG.warn(_LW("Message - %s."), e.msg)
return self._create_host(port_id, host_type)
def _get_host_with_port(self, port_id):
def _create_host(self, port_id, host_type):
"""Creates host on system with given initiator as port_id."""
- LOG.info(_("Creating host with port %s."), port_id)
+ LOG.info(_LI("Creating host with port %s."), port_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
port_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
host_type = self._get_host_type_definition(host_type)
new_vol = stage_2
self._cache_volume(new_vol)
self._cache_volume(stage_1)
- LOG.info(_('Extended volume with label %s.'), src_label)
+ LOG.info(_LI('Extended volume with label %s.'), src_label)
except exception.NetAppDriverException:
if stage_1 == 0:
with excutils.save_and_reraise_exception():
"""Removes tmp vols with no snapshots."""
try:
if not utils.set_safe_attr(self, 'clean_job_running', True):
- LOG.warn(_('Returning as clean tmp vol job already running.'))
+ LOG.warn(_LW('Returning as clean tmp '
+ 'vol job already running.'))
return
for label in self._objects['volumes']['label_ref'].keys():
if (label.startswith('tmp-') and
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
self.extend_volume(volume, volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(
- _("Resizing %s failed. Cleaning volume."), new_name)
+ LOG.error(_LE("Resizing %s failed. "
+ "Cleaning volume."), new_name)
self.delete_volume(volume)
def terminate_connection(self, volume, connector, **kwargs):
attr = getattr(self._get_lun_from_table(name), attr)
return attr
except exception.VolumeNotFound as e:
- LOG.error(_("Message: %s"), e.msg)
+ LOG.error(_LE("Message: %s"), e.msg)
except Exception as e:
- LOG.error(_("Error getting lun attribute. Exception: %s"),
+ LOG.error(_LE("Error getting lun attribute. Exception: %s"),
e.__str__())
return None
self.extend_volume(volume, volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(
- _("Resizing %s failed. Cleaning volume."), new_name)
+ LOG.error(_LE("Resizing %s failed. "
+ "Cleaning volume."), new_name)
self.delete_volume(volume)
def get_volume_stats(self, refresh=False):
self._do_sub_clone_resize(path, new_size_bytes)
self.lun_table[name].size = new_size_bytes
else:
- LOG.info(_("No need to extend volume %s"
- " as it is already the requested new size."), name)
+ LOG.info(_LI("No need to extend volume %s"
+ " as it is already the requested new size."), name)
def _do_direct_resize(self, path, new_size_bytes, force=True):
"""Uses the resize api to resize the lun."""
seg = path.split("/")
- LOG.info(_("Resizing lun %s directly to new size."), seg[-1])
+ LOG.info(_LI("Resizing lun %s directly to new size."), seg[-1])
lun_resize = NaElement("lun-resize")
lun_resize.add_new_child('path', path)
lun_resize.add_new_child('size', new_size_bytes)
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
- LOG.error(_("Lun %(path)s geometry failed. Message - %(msg)s")
+ LOG.error(_LE("Lun %(path)s geometry failed. Message - %(msg)s")
% {'path': path, 'msg': e.message})
return geometry
after a successful clone.
"""
seg = path.split("/")
- LOG.info(_("Resizing lun %s using sub clone to new size."), seg[-1])
+ LOG.info(_LI("Resizing lun %s using sub clone to new size."), seg[-1])
name = seg[-1]
vol_name = seg[2]
lun = self._get_lun_from_table(name)
"""Try post sub clone resize in a transactional manner."""
st_tm_mv, st_nw_mv, st_del_old = None, None, None
seg = path.split("/")
- LOG.info(_("Post clone resize lun %s"), seg[-1])
+ LOG.info(_LI("Post clone resize lun %s"), seg[-1])
new_lun = 'new-%s' % (seg[-1])
tmp_lun = 'tmp-%s' % (seg[-1])
tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
raise exception.VolumeBackendAPIException(
data=msg % (seg[-1]))
elif st_del_old is None:
- LOG.error(_("Failure deleting staged tmp lun %s."),
+ LOG.error(_LE("Failure deleting staged tmp lun %s."),
tmp_lun)
else:
- LOG.error(_("Unknown exception in"
- " post clone resize lun %s."), seg[-1])
- LOG.error(_("Exception details: %s") % (e.__str__()))
+ LOG.error(_LE("Unknown exception in"
+ " post clone resize lun %s."), seg[-1])
+ LOG.error(_LE("Exception details: %s") % (e.__str__()))
def _get_lun_block_count(self, path):
"""Gets block counts for the lun."""
volume_name = vol.get_child_content('name')
if self._get_vol_option(volume_name, 'root') == 'true':
return volume_name
- LOG.warn(_('Could not determine root volume name '
- 'on %s.') % self._get_owner())
+ LOG.warn(_LW('Could not determine root volume name '
+ 'on %s.') % self._get_owner())
return None
def _get_igroup_by_initiator(self, initiator):
if luns:
lun_list.extend(luns)
except NaApiError:
- LOG.warn(_("Error finding luns for volume %s."
- " Verify volume exists.") % (vol))
+ LOG.warn(_LW("Error finding luns for volume %s."
+ " Verify volume exists.") % (vol))
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
try:
job_set = set_safe_attr(self, 'vol_refresh_running', True)
if not job_set:
- LOG.warn(
- _("Volume refresh job already running. Returning..."))
+ LOG.warn(_LW("Volume refresh job already "
+ "running. Returning..."))
return
self.vol_refresh_voluntary = False
self.vols = self._get_filer_volumes()
self.vol_refresh_time = timeutils.utcnow()
except Exception as e:
- LOG.warn(_("Error refreshing volume info. Message: %s"),
+ LOG.warn(_LW("Error refreshing volume info. Message: %s"),
six.text_type(e))
finally:
set_safe_attr(self, 'vol_refresh_running', False)
import six.moves.urllib.parse as urlparse
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
self.extend_volume(volume, vol_size)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(
- _("Resizing %s failed. Cleaning volume."),
- volume.name)
+ LOG.error(_LE("Resizing %s failed. Cleaning volume."),
+ volume.name)
self._execute('rm', path, run_as_root=run_as_root)
else:
raise exception.CinderException(
tries = tries + 1
if tries >= self.configuration.num_shell_tries:
raise
- LOG.exception(_("Recovering from a failed execute. "
- "Try number %s"), tries)
+ LOG.exception(_LE("Recovering from a failed execute. "
+ "Try number %s"), tries)
time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name):
try:
self.extend_volume(volume, vol_size)
except Exception as e:
- LOG.error(
- _("Resizing %s failed. Cleaning volume."), volume.name)
+ LOG.error(_LE("Resizing %s failed. "
+ "Cleaning volume. "), volume.name)
self._execute('rm', path,
run_as_root=self._execute_as_root)
raise e
"""Fetch the image from image_service and write it to the volume."""
super(NetAppNFSDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
- LOG.info(_('Copied image to volume %s using regular download.'),
+ LOG.info(_LI('Copied image to volume %s using regular download.'),
volume['name'])
self._register_image_in_cache(volume, image_id)
def _register_image_in_cache(self, volume, image_id):
"""Stores image in the cache."""
file_name = 'img-cache-%s' % image_id
- LOG.info(_("Registering image in cache %s"), file_name)
+ LOG.info(_LI("Registering image in cache %s"), file_name)
try:
self._do_clone_rel_img_cache(
volume['name'], file_name,
volume['provider_location'], file_name)
except Exception as e:
- LOG.warn(
- _('Exception while registering image %(image_id)s'
- ' in cache. Exception: %(exc)s')
- % {'image_id': image_id, 'exc': e.__str__()})
+ LOG.warn(_LW('Exception while registering image %(image_id)s'
+ ' in cache. Exception: %(exc)s')
+ % {'image_id': image_id, 'exc': e.__str__()})
def _find_image_in_cache(self, image_id):
"""Finds image in cache and returns list of shares with file name."""
dir = self._get_mount_point_for_share(share)
file_path = '%s/%s' % (dir, dst)
if not os.path.exists(file_path):
- LOG.info(_('Cloning from cache to destination %s'), dst)
+ LOG.info(_LI('Cloning from cache to destination %s'), dst)
self._clone_volume(src, dst, volume_id=None, share=share)
_do_clone()
self._get_capacity_info(share)
avl_percent = int((total_avl / total_size) * 100)
if avl_percent <= thres_size_perc_start:
- LOG.info(_('Cleaning cache for share %s.'), share)
+ LOG.info(_LI('Cleaning cache for share %s.'), share)
eligible_files = self._find_old_cache_files(share)
threshold_size = int(
(thres_size_perc_stop * total_size) / 100)
else:
continue
except Exception as e:
- LOG.warn(_(
- 'Exception during cache cleaning'
- ' %(share)s. Message - %(ex)s')
- % {'share': share, 'ex': e.__str__()})
+ LOG.warn(_LW('Exception during cache cleaning'
+ ' %(share)s. Message - %(ex)s')
+ % {'share': share, 'ex': e.__str__()})
continue
finally:
LOG.debug('Image cache cleaning done.')
self._execute(*cmd, run_as_root=self._execute_as_root)
return True
except Exception as ex:
- LOG.warning(_('Exception during deleting %s'), ex.__str__())
+ LOG.warning(_LW('Exception during deleting %s'), ex.__str__())
return False
def clone_image(self, volume, image_location, image_id, image_meta):
post_clone = self._post_clone_image(volume)
except Exception as e:
msg = e.msg if getattr(e, 'msg', None) else e.__str__()
- LOG.info(_('Image cloning unsuccessful for image'
- ' %(image_id)s. Message: %(msg)s')
+ LOG.info(_LI('Image cloning unsuccessful for image'
+ ' %(image_id)s. Message: %(msg)s')
% {'image_id': image_id, 'msg': msg})
vol_path = self.local_path(volume)
volume['provider_location'] = None
def _clone_from_cache(self, volume, image_id, cache_result):
"""Clones a copy from image cache."""
cloned = False
- LOG.info(_('Cloning image %s from cache'), image_id)
+ LOG.info(_LI('Cloning image %s from cache'), image_id)
for res in cache_result:
# Repeat tries in other shares if failed in some
(share, file_name) = res
volume['provider_location'] = share
break
except Exception:
- LOG.warn(_('Unexpected exception during'
- ' image cloning in share %s'), share)
+ LOG.warn(_LW('Unexpected exception during'
+ ' image cloning in share %s'), share)
return cloned
def _direct_nfs_clone(self, volume, image_location, image_id):
"""Clone directly in nfs share."""
- LOG.info(_('Checking image clone %s from glance share.'), image_id)
+ LOG.info(_LI('Checking image clone %s from glance share.'), image_id)
cloned = False
image_location = self._construct_image_nfs_url(image_location)
share = self._is_cloneable_share(image_location)
volume_id=None, share=share)
cloned = True
else:
- LOG.info(
- _('Image will locally be converted to raw %s'),
- image_id)
+ LOG.info(_LI('Image will locally be converted to raw %s'),
+ image_id)
dst = '%s/%s' % (dir_path, volume['name'])
image_utils.convert_image(img_path, dst, 'raw',
run_as_root=run_as_root)
def _post_clone_image(self, volume):
"""Do operations post image cloning."""
- LOG.info(_('Performing post clone for %s'), volume['name'])
+ LOG.info(_LI('Performing post clone for %s'), volume['name'])
vol_path = self.local_path(volume)
if self._discover_file_till_timeout(vol_path):
self._set_rw_permissions(vol_path)
if self._is_file_size_equal(path, new_size):
return
else:
- LOG.info(_('Resizing file to %sG'), new_size)
+ LOG.info(_LI('Resizing file to %sG'), new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if self._is_file_size_equal(path, new_size):
return True
else:
if retry_seconds <= 0:
- LOG.warn(_('Discover file retries exhausted.'))
+ LOG.warn(_LW('Discover file retries exhausted.'))
return False
else:
time.sleep(sleep_interval)
share_candidates)
return self._share_match_for_ip(ip, share_candidates)
except Exception:
- LOG.warn(_("Unexpected exception while short listing used share."))
+ LOG.warn(_LW("Unexpected exception while short "
+ "listing used share."))
return None
def _construct_image_nfs_url(self, image_location):
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
- LOG.info(_('Extending volume %s.'), volume['name'])
+ LOG.info(_LI('Extending volume %s.'), volume['name'])
path = self.local_path(volume)
self._resize_image_file(path, new_size)
@utils.synchronized(dest_path, external=True)
def _move_file(src, dst):
if os.path.exists(dst):
- LOG.warn(_("Destination %s already exists."), dst)
+ LOG.warn(_LW("Destination %s already exists."), dst)
return False
self._execute('mv', src, dst,
run_as_root=self._execute_as_root)
try:
return _move_file(source_path, dest_path)
except Exception as e:
- LOG.warn(_('Exception moving file %(src)s. Message - %(e)s')
+ LOG.warn(_LW('Exception moving file %(src)s. Message - %(e)s')
% {'src': source_path, 'e': e})
return False
self.stale_vols = set()
if self.vserver:
self.ssc_enabled = True
- LOG.info(_("Shares on vserver %s will only"
- " be used for provisioning.") % (self.vserver))
+ LOG.info(_LI("Shares on vserver %s will only"
+ " be used for provisioning.") % (self.vserver))
else:
self.ssc_enabled = False
- LOG.warn(_("No vserver set in config. SSC will be disabled."))
+ LOG.warn(_LW("No vserver set in config. SSC will be disabled."))
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
try:
volume['provider_location'] = share
- LOG.info(_('casted to %s') % volume['provider_location'])
+ LOG.info(_LI('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
if qos_policy_group:
self._set_qos_policy_group_on_volume(volume, share,
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
- LOG.error(_("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s")
+ LOG.error(_LE("Exception creating vol %(name)s on "
+ "share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
'ex': ex})
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
if not self._mounted_shares:
- LOG.warn(_("No shares found hence skipping ssc refresh."))
+ LOG.warn(_LW("No shares found hence skipping ssc refresh."))
return
mnt_share_vols = set()
vs_ifs = self._get_vserver_ips(self.vserver)
if (major == 1 and minor >= 20 and col_path):
self._try_copyoffload(context, volume, image_service, image_id)
copy_success = True
- LOG.info(_('Copied image %(img)s to volume %(vol)s using copy'
- ' offload workflow.')
+ LOG.info(_LI('Copied image %(img)s to '
+ 'volume %(vol)s using copy'
+ ' offload workflow.')
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug("Copy offload either not configured or"
" unsupported.")
except Exception as e:
- LOG.exception(_('Copy offload workflow unsuccessful. %s'), e)
+ LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e)
finally:
if not copy_success:
super(NetAppDirectCmodeNfsDriver, self).copy_image_to_volume(
copied = True
break
except Exception as e:
- LOG.exception(_('Error in workflow copy from cache. %s.'), e)
+ LOG.exception(_LE('Error in workflow copy '
+ 'from cache. %s.'), e)
return copied
def _clone_file_dst_exists(self, share, src_name, dst_name,
raise exception.InvalidHost(reason=msg)
volume['provider_location'] = share
- LOG.info(_('Creating volume at location %s')
+ LOG.info(_LI('Creating volume at location %s')
% volume['provider_location'])
try:
self._do_create_volume(volume)
except Exception as ex:
- LOG.error(_("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s")
+ LOG.error(_LE("Exception creating vol %(name)s on "
+ "share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
'ex': six.text_type(ex)})
"""
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers import nexenta
:param volume: volume reference
:param new_size: volume new size in GB
"""
- LOG.info(_('Extending volume: %(id)s New size: %(size)s GB'),
+ LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'),
{'id': volume['id'], 'size': new_size})
self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']),
'volsize', '%sG' % new_size)
self.nms.zvol.destroy(volume_name, '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
- LOG.info(_('Volume %s does not exist, it seems it was already '
- 'deleted.'), volume_name)
+ LOG.info(_LI('Volume %s does not exist, it '
+ 'seems it was already deleted.'), volume_name)
return
if 'zvol has children' in exc.args[0]:
raise exception.VolumeIsBusy(volume_name=volume_name)
try:
self.delete_snapshot({'volume_name': volume, 'name': snapshot})
except nexenta.NexentaException as exc:
- LOG.warning(_('Cannot delete snapshot %(origin)s: %(exc)s'),
+ LOG.warning(_LW('Cannot delete snapshot %(origin)s: %(exc)s'),
{'origin': origin, 'exc': exc})
def create_cloned_volume(self, volume, src_vref):
try:
self.create_volume_from_snapshot(volume, snapshot)
except nexenta.NexentaException:
- LOG.error(_('Volume creation failed, deleting created snapshot '
- '%(volume_name)s@%(name)s'), snapshot)
+ LOG.error(_LE('Volume creation failed, deleting created snapshot '
+ '%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (nexenta.NexentaException, exception.SnapshotIsBusy):
- LOG.warning(_('Failed to delete zfs snapshot '
- '%(volume_name)s@%(name)s'), snapshot)
+ LOG.warning(_LW('Failed to delete zfs snapshot '
+ '%(volume_name)s@%(name)s'), snapshot)
raise
def _get_zfs_send_recv_cmd(self, src, dst):
ssh_bound = True
break
if not ssh_bound:
- LOG.warning(_("Remote NexentaStor appliance at %s should be "
- "SSH-bound."), dst_host)
+ LOG.warning(_LW("Remote NexentaStor appliance at %s should be "
+ "SSH-bound."), dst_host)
# Create temporary snapshot of volume on NexentaStor Appliance.
snapshot = {
try:
self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except nexenta.NexentaException as exc:
- LOG.warning(_("Cannot send source snapshot %(src)s to "
- "destination %(dst)s. Reason: %(exc)s"),
+ LOG.warning(_LW("Cannot send source snapshot %(src)s to "
+ "destination %(dst)s. Reason: %(exc)s"),
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except nexenta.NexentaException as exc:
- LOG.warning(_("Cannot delete temporary source snapshot "
- "%(src)s on NexentaStor Appliance: %(exc)s"),
+ LOG.warning(_LW("Cannot delete temporary source snapshot "
+ "%(src)s on NexentaStor Appliance: %(exc)s"),
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except nexenta.NexentaException as exc:
- LOG.warning(_("Cannot delete source volume %(volume)s on "
- "NexentaStor Appliance: %(exc)s"),
+ LOG.warning(_LW("Cannot delete source volume %(volume)s on "
+ "NexentaStor Appliance: %(exc)s"),
{'volume': volume['name'], 'exc': exc})
dst_nms = self.get_nms_for_url(nms_url)
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except nexenta.NexentaException as exc:
- LOG.warning(_("Cannot delete temporary destination snapshot "
- "%(dst)s on NexentaStor Appliance: %(exc)s"),
+ LOG.warning(_LW("Cannot delete temporary destination snapshot "
+ "%(dst)s on NexentaStor Appliance: %(exc)s"),
{'dst': dst_snapshot, 'exc': exc})
provider_location = '%(host)s:%(port)s,1 %(name)s 0' % {
self.nms.snapshot.destroy(snapshot_name, '')
except nexenta.NexentaException as exc:
if "does not exist" in exc.args[0]:
- LOG.info(_('Snapshot %s does not exist, it seems it was '
- 'already deleted.'), snapshot_name)
+ LOG.info(_LI('Snapshot %s does not exist, it seems it was '
+ 'already deleted.'), snapshot_name)
return
if "snapshot has dependent clones" in exc.args[0]:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
'target_name': target_name})
except nexenta.NexentaException as exc:
if ensure and 'already configured' in exc.args[0]:
- LOG.info(_('Ignored target creation error "%s" while '
- 'ensuring export'), exc)
+ LOG.info(_LI('Ignored target creation error "%s" while '
+ 'ensuring export'), exc)
else:
raise
if not self._target_group_exists(target_group_name):
except nexenta.NexentaException as exc:
if ((ensure and 'already exists' in exc.args[0]) or
'target must be offline' in exc.args[0]):
- LOG.info(_('Ignored target group creation error "%s" '
- 'while ensuring export'), exc)
+ LOG.info(_LI('Ignored target group creation error "%s" '
+ 'while ensuring export'), exc)
else:
raise
if not self._target_member_in_target_group(target_group_name,
except nexenta.NexentaException as exc:
if ((ensure and 'already exists' in exc.args[0]) or
'target must be offline' in exc.args[0]):
- LOG.info(_('Ignored target group member addition error '
- '"%s" while ensuring export'), exc)
+ LOG.info(_LI('Ignored target group member addition error '
+ '"%s" while ensuring export'), exc)
else:
raise
if not self._lu_exists(zvol_name):
except nexenta.NexentaException as exc:
if not ensure or 'in use' not in exc.args[0]:
raise
- LOG.info(_('Ignored LU creation error "%s" while ensuring '
- 'export'), exc)
+ LOG.info(_LI('Ignored LU creation error "%s" while ensuring '
+ 'export'), exc)
if not self._is_lu_shared(zvol_name):
try:
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
except nexenta.NexentaException as exc:
if not ensure or 'view entry exists' not in exc.args[0]:
raise
- LOG.info(_('Ignored LUN mapping entry addition error "%s" '
- 'while ensuring export'), exc)
+ LOG.info(_LI('Ignored LUN mapping entry addition error "%s" '
+ 'while ensuring export'), exc)
def create_export(self, _ctx, volume):
"""Create new export for zvol.
self.nms.stmf.destroy_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
# We assume that target group is already gone
- LOG.warn(_('Got error trying to destroy target group'
- ' %(target_group)s, assuming it is '
- 'already gone: %(exc)s'),
+ LOG.warn(_LW('Got error trying to destroy target group'
+ ' %(target_group)s, assuming it is '
+ 'already gone: %(exc)s'),
{'target_group': target_group_name, 'exc': exc})
try:
self.nms.iscsitarget.delete_target(target_name)
except nexenta.NexentaException as exc:
# We assume that target is gone as well
- LOG.warn(_('Got error trying to delete target %(target)s,'
- ' assuming it is already gone: %(exc)s'),
+ LOG.warn(_LW('Got error trying to delete target %(target)s,'
+ ' assuming it is already gone: %(exc)s'),
{'target': target_name, 'exc': exc})
def get_volume_stats(self, refresh=False):
import urllib2
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from cinder.volume.drivers import nexenta
response_obj = urllib2.urlopen(request)
if response_obj.info().status == 'EOF in headers':
if not self.auto or self.scheme != 'http':
- LOG.error(_('No headers in server response'))
+ LOG.error(_LE('No headers in server response'))
raise NexentaJSONException(_('Bad response from server'))
- LOG.info(_('Auto switching to HTTPS connection to %s'), self.url)
+ LOG.info(_LI('Auto switching to HTTPS connection to %s'), self.url)
self.scheme = 'https'
request = urllib2.Request(self.url, data, headers)
response_obj = urllib2.urlopen(request)
from cinder import context
from cinder import db
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume.drivers import nexenta
try:
nms.folder.destroy('%s/%s' % (vol, folder))
except nexenta.NexentaException:
- LOG.warning(_("Cannot destroy created folder: "
- "%(vol)s/%(folder)s"),
+ LOG.warning(_LW("Cannot destroy created folder: "
+ "%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise exc
try:
nms.folder.destroy('%s/%s' % (vol, folder), '')
except nexenta.NexentaException:
- LOG.warning(_("Cannot destroy cloned folder: "
- "%(vol)s/%(folder)s"),
+ LOG.warning(_LW("Cannot destroy cloned folder: "
+ "%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
:param volume: new volume reference
:param src_vref: source volume reference
"""
- LOG.info(_('Creating clone of volume: %s'), src_vref['id'])
+ LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'name': self._get_clone_snapshot_name(volume)}
try:
return self.create_volume_from_snapshot(volume, snapshot)
except nexenta.NexentaException:
- LOG.error(_('Volume creation failed, deleting created snapshot '
- '%(volume_name)s@%(name)s'), snapshot)
+ LOG.error(_LE('Volume creation failed, deleting created snapshot '
+ '%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (nexenta.NexentaException, exception.SnapshotIsBusy):
- LOG.warning(_('Failed to delete zfs snapshot '
- '%(volume_name)s@%(name)s'), snapshot)
+ LOG.warning(_LW('Failed to delete zfs snapshot '
+ '%(volume_name)s@%(name)s'), snapshot)
raise
def delete_volume(self, volume):
nms.folder.destroy(folder, '-r')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
- LOG.info(_('Folder %s does not exist, it was '
- 'already deleted.'), folder)
+ LOG.info(_LI('Folder %s does not exist, it was '
+ 'already deleted.'), folder)
return
raise
origin = props.get('origin')
nms.snapshot.destroy(origin, '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
- LOG.info(_('Snapshot %s does not exist, it was '
- 'already deleted.'), origin)
+ LOG.info(_LI('Snapshot %s does not exist, it was '
+ 'already deleted.'), origin)
return
raise
nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
- LOG.info(_('Snapshot %s does not exist, it was '
- 'already deleted.'), '%s@%s' % (folder, snapshot))
+ LOG.info(_LI('Snapshot %s does not exist, it was '
+ 'already deleted.'), '%s@%s' % (folder, snapshot))
return
raise
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
- LOG.info(_('Creating regular file: %s.'
- 'This may take some time.') % path)
+ LOG.info(_LI('Creating regular file: %s.'
+ 'This may take some time.') % path)
nms.appliance.execute(
'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {
}
)
- LOG.info(_('Regular file: %s created.') % path)
+ LOG.info(_LI('Regular file: %s created.') % path)
def _set_rw_permissions_for_all(self, nms, path):
"""Sets 666 permissions for the path.
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
volume['provider_location'] = self._find_share(volume['size'])
- LOG.info(_('casted to %s') % volume['provider_location'])
+ LOG.info(_LI('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
- LOG.error(_('Exception during mounting %s') % (exc,))
+ LOG.error(_LE('Exception during mounting %s') % (exc,))
self._mounted_shares = mounted_shares
:param volume: volume reference
"""
if not volume['provider_location']:
- LOG.warn(_('Volume %s does not have provider_location specified, '
- 'skipping'), volume['name'])
+ LOG.warn(_LW('Volume %s does not have '
+ 'provider_location specified, '
+ 'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
else:
permissions = 'ugo+rw'
parms = {'path': path, 'perm': permissions}
- LOG.warn(_('%(path)s is being set with open permissions: '
- '%(perm)s') % parms)
+ LOG.warn(_LW('%(path)s is being set with open permissions: '
+ '%(perm)s') % parms)
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
share_opts = share_info[1].strip() if len(share_info) > 1 else None
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
- LOG.warn(_("Share %s ignored due to invalid format. Must be "
- "of form address:/export.") % share_address)
+ LOG.warn(_LW("Share %s ignored due to invalid format. Must be "
+ "of form address:/export.") % share_address)
continue
self.shares[share_address] = share_opts
self._execute(*cmd, run_as_root=True)
except putils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.stderr:
- LOG.warn(_("%s is already mounted"), share)
+ LOG.warn(_LW("%s is already mounted"), share)
else:
raise
doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
"/nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
- LOG.warn(_("The NAS file operations will be run as root: allowing "
- "root level access at the storage backend. This is "
- "considered an insecure NAS environment. Please see %s for "
- "information on a secure NAS configuration.") %
+ LOG.warn(_LW("The NAS file operations will be run as root: allowing "
+ "root level access at the storage backend. This is "
+ "considered an insecure NAS environment. "
+ "Please see %s for information on a secure NAS "
+ "configuration.") %
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
- LOG.warn(_("The NAS file permissions mode will be 666 (allowing "
- "other/world read & write access). This is considered an "
- "insecure NAS environment. Please see %s for information "
- "on a secure NFS configuration.") %
+ LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
+ "other/world read & write access). This is considered an "
+ "insecure NAS environment. Please see %s for information "
+ "on a secure NFS configuration.") %
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
file_path = os.path.join(mount_point, file_name)
if os.path.isfile(file_path):
nas_option = 'true'
- LOG.info(_('Cinder secure environment indicator file exists.'))
+ LOG.info(_LI('Cinder secure environment '
+ 'indicator file exists.'))
else:
# The indicator file does not exist. If it is a new
# installation, set to 'true' and create the indicator file.
# protect from accidental removal (owner write only).
self._execute('chmod', '640', file_path,
run_as_root=False)
- LOG.info(_('New Cinder secure environment indicator '
- 'file created at path %s.') % file_path)
+ LOG.info(_LI('New Cinder secure environment indicator'
+ ' file created at path %s.') % file_path)
except IOError as err:
- LOG.error(_('Failed to created Cinder secure '
- 'environment indicator file: %s') %
+ LOG.error(_LE('Failed to created Cinder secure '
+ 'environment indicator file: %s') %
format(err))
else:
# For existing installs, we default to 'false'. The
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
- LOG.info(_('Cloning volume %(src)s to volume %(dst)s') %
+ LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s') %
{'src': src_vref['id'],
'dst': volume['id']})
if (snapshot_file == active_file):
return
- LOG.info(_('Deleting stale snapshot: %s') % snapshot['id'])
+ LOG.info(_LI('Deleting stale snapshot: %s') % snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
- LOG.info(_('Snapshot record for %s is not present, allowing '
- 'snapshot_delete to proceed.') % snapshot['id'])
+ LOG.info(_LI('Snapshot record for %s is not present, allowing '
+ 'snapshot_delete to proceed.') % snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
Classes and utility methods for datastore selection.
"""
-from cinder.i18n import _
+from cinder.i18n import _LE, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.volume.drivers.vmware import error_util
"""
profile_id = self._vops.retrieve_profile_id(profile_name)
if profile_id is None:
- LOG.error(_("Storage profile: %s cannot be found in vCenter."),
+ LOG.error(_LE("Storage profile: %s cannot be found in vCenter."),
profile_name)
raise error_util.ProfileNotFoundException(
storage_profile=profile_name)
except error_util.VimConnectionException:
# No need to try other hosts when there is a connection problem
with excutils.save_and_reraise_exception():
- LOG.exception(_("Error occurred while selecting datastore."
- ))
+ LOG.exception(_LE("Error occurred while "
+ "selecting datastore."))
except error_util.VimException:
# TODO(vbala) volumeops.get_dss_rp shouldn't throw VimException
# for empty datastore list.
- LOG.warn(_("Unable to fetch datastores connected to host %s."),
- host_ref,
- exc_info=True)
+ LOG.warn(_LW("Unable to fetch datastores connected "
+ "to host %s."), host_ref, exc_info=True)
continue
if not datastores:
import urllib
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume.drivers.vmware import error_util
backing)
LOG.debug("Initiated deletion of VM backing: %s." % backing)
self._session.wait_for_task(task)
- LOG.info(_("Deleted the VM backing: %s.") % backing)
+ LOG.info(_LI("Deleted the VM backing: %s.") % backing)
# TODO(kartikaditya) Keep the methods not specific to volume in
# a different file
newCapacityKb=size_in_kb,
eagerZero=eager_zero)
self._session.wait_for_task(task)
- LOG.info(_("Successfully extended the volume %(name)s to "
- "%(size)s GB."),
+ LOG.info(_LI("Successfully extended the volume %(name)s to "
+ "%(size)s GB."),
{'name': name, 'size': requested_size_in_gb})
def _create_controller_config_spec(self, adapter_type):
pool=resource_pool, host=host)
task_info = self._session.wait_for_task(task)
backing = task_info.result
- LOG.info(_("Successfully created volume backing: %s."), backing)
+ LOG.info(_LI("Successfully created volume backing: %s."), backing)
return backing
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
backing, spec=relocate_spec)
LOG.debug("Initiated relocation of volume backing: %s." % backing)
self._session.wait_for_task(task)
- LOG.info(_("Successfully relocated volume backing: %(backing)s "
- "to datastore: %(ds)s and resource pool: %(rp)s.") %
+ LOG.info(_LI("Successfully relocated volume backing: %(backing)s "
+ "to datastore: %(ds)s and resource pool: %(rp)s.") %
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
def move_backing_to_folder(self, backing, folder):
LOG.debug("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s." % {'backing': backing, 'fol': folder})
self._session.wait_for_task(task)
- LOG.info(_("Successfully moved volume backing: %(backing)s into the "
- "folder: %(fol)s.") % {'backing': backing, 'fol': folder})
+ LOG.info(_LI("Successfully moved volume "
+ "backing: %(backing)s into the "
+ "folder: %(fol)s.") % {'backing': backing, 'fol': folder})
def create_snapshot(self, backing, name, description, quiesce=False):
"""Create snapshot of the backing with given name and description.
"named: %(name)s." % {'backing': backing, 'name': name})
task_info = self._session.wait_for_task(task)
snapshot = task_info.result
- LOG.info(_("Successfully created snapshot: %(snap)s for volume "
- "backing: %(backing)s.") %
+ LOG.info(_LI("Successfully created snapshot: %(snap)s for volume "
+ "backing: %(backing)s.") %
{'snap': snapshot, 'backing': backing})
return snapshot
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
- LOG.info(_("Did not find the snapshot: %(name)s for backing: "
- "%(backing)s. Need not delete anything.") %
+ LOG.info(_LI("Did not find the snapshot: %(name)s for backing: "
+ "%(backing)s. Need not delete anything.") %
{'name': name, 'backing': backing})
return
task = self._session.invoke_api(self._session.vim,
"%(backing)s." %
{'name': name, 'backing': backing})
self._session.wait_for_task(task)
- LOG.info(_("Successfully deleted snapshot: %(name)s of backing: "
- "%(backing)s.") % {'backing': backing, 'name': name})
+ LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: "
+ "%(backing)s.") % {'backing': backing, 'name': name})
def _get_folder(self, backing):
"""Get parent folder of the backing.
LOG.debug("Initiated clone of backing: %s." % name)
task_info = self._session.wait_for_task(task)
new_backing = task_info.result
- LOG.info(_("Successfully created clone: %s.") % new_backing)
+ LOG.info(_LI("Successfully created clone: %s.") % new_backing)
return new_backing
def _reconfigure_backing(self, backing, reconfig_spec):
:param backing: VM to be renamed
:param new_name: new VM name
"""
- LOG.info(_("Renaming backing VM: %(backing)s to %(new_name)s."),
+ LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
rename_task = self._session.invoke_api(self._session.vim,
newName=new_name)
LOG.debug("Task: %s created for renaming VM.", rename_task)
self._session.wait_for_task(rename_task)
- LOG.info(_("Backing VM: %(backing)s renamed to %(new_name)s."),
+ LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
datacenter=datacenter)
LOG.debug("Initiated deletion via task: %s." % task)
self._session.wait_for_task(task)
- LOG.info(_("Successfully deleted file: %s.") % file_path)
+ LOG.info(_LI("Successfully deleted file: %s.") % file_path)
def get_path_name(self, backing):
"""Get path name of the backing.
if device.__class__.__name__ == "VirtualDisk":
return device
- LOG.error(_("Virtual disk device of backing: %s not found."), backing)
+ LOG.error(_LE("Virtual disk device of "
+ "backing: %s not found."), backing)
raise error_util.VirtualDiskNotFoundException()
def get_vmdk_path(self, backing):
force=True)
LOG.debug("Initiated copying disk data via task: %s." % task)
self._session.wait_for_task(task)
- LOG.info(_("Successfully copied disk at: %(src)s to: %(dest)s.") %
+ LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s.") %
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
def delete_vmdk_file(self, vmdk_file_path, dc_ref):
datacenter=dc_ref)
LOG.debug("Initiated deleting vmdk file via task: %s." % task)
self._session.wait_for_task(task)
- LOG.info(_("Deleted vmdk file: %s.") % vmdk_file_path)
+ LOG.info(_LI("Deleted vmdk file: %s.") % vmdk_file_path)
def get_all_profiles(self):
"""Get all profiles defined in current VC.