Spelling errors fixed in comments and log messages.
Change-Id: I8ce4899fbb22136ce6d03e1796fc01d929f35562
@args('--path', required=True, help='Script path')
def script(self, path):
- """Runs the script from the specifed path with flags set properly.
+ """Runs the script from the specified path with flags set properly.
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
#TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables
- # better load ballancing and isolation of services
+ # better load balancing and isolation of services
self.backup_rpcapi.create_backup(context,
backup['host'],
backup['id'],
"""Get the real path for the volume block device.
If the volume is not a block device then issue an
- InvalidBackup exsception.
+ InvalidBackup exception.
:param volume_file: file object representing the volume
:param volume_id: Volume id for backup or as restore target
help='region name of this node'),
cfg.StrOpt('nova_ca_certificates_file',
default=None,
- help='Location of ca certicates file to use for nova client '
+ help='Location of ca certificates file to use for nova client '
'requests.'),
cfg.BoolOpt('nova_api_insecure',
default=False,
signal.signal(signal.SIGTERM, _sigterm)
# Block SIGINT and let the parent send us a SIGTERM
# signal.signal(signal.SIGINT, signal.SIG_IGN)
- # This differs from the behavior in nova in that we dont ignore this
+ # This differs from the behavior in nova in that we don't ignore this
# It allows the non-wsgi services to be terminated properly
signal.signal(signal.SIGINT, _sigterm)
model_update = {}
# TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
+ # corresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
lun = 0
self._ensure_iscsi_targets(context, volume['host'])
"""Removes an export for a logical volume."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
+ # corresponding target admin class
if isinstance(self.tgtadm, iscsi.LioAdm):
try:
"""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
+ # corresponding target admin class
if isinstance(self.tgtadm, iscsi.LioAdm):
try:
"""Ensure that target ids have been created in datastore."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
+ # corresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
host)
return foundCtrl
# Find out how many volumes are mapped to a host
- # assoociated to the LunMaskingSCSIProtocolController
+ # associated to the LunMaskingSCSIProtocolController
def get_num_volumes_mapped(self, volume, connector):
numVolumesMapped = 0
volumename = volume['name']
sp = device_info['owningsp']
endpoints = []
if sp:
- # endpointss example:
+ # endpoints example:
# [iqn.1992-04.com.emc:cx.apm00123907237.a8,
# iqn.1992-04.com.emc:cx.apm00123907237.a9]
endpoints = self.common._find_iscsi_protocol_endpoints(
help='Maximum retry count for reconnection'),
cfg.BoolOpt('eqlx_use_chap',
default=False,
- help='Use CHAP authentificaion for targets?'),
+ help='Use CHAP authentication for targets?'),
cfg.StrOpt('eqlx_chap_login',
default='admin',
help='Existing CHAP account name'),
san_ip=<ip_address>
san_login=<user name>
san_password=<user password>
- san_private_key=<file containig SSH prvate key>
+ san_private_key=<file containing SSH private key>
Thin provision of volumes is enabled by default, to disable it use:
san_thin_provision=false
volume['name'])
def terminate_connection(self, volume, connector, force=False, **kwargs):
- """Remove access restictions from a volume."""
+ """Remove access restrictions from a volume."""
try:
self._eql_execute('volume', 'select', volume['name'],
'access', 'delete', '1')
raise exception.InvalidInput(reason=msg)
if int(end) > int(maxlun):
end = maxlun
- LOG.debug(_("setting LU uppper (end) limit to %s") % maxlun)
+ LOG.debug(_("setting LU upper (end) limit to %s") % maxlun)
return (start, end)
"""Calculate the volume size.
We should divide the given volume size by 512 for the HVS system
- caculates volume size with sectors, which is 512 bytes.
+ calculates volume size with sectors, which is 512 bytes.
"""
volume_size = units.GiB / 512 # 1G
self._delete_lungroup(lungroup_id)
self._delete_lun(lun_id)
else:
- LOG.warn(_("Can't find lun or lun goup in array"))
+ LOG.warn(_("Can't find lun or lun group in array"))
def _delete_lun_from_qos_policy(self, volume, lun_id):
"""Remove lun from qos policy."""
(iscsi_iqn, target_ip) = self._get_iscsi_params(connector)
- #create host_goup if not exist
+ #create host_group if not exist
hostid, hostgroup_id = self._add_host_into_hostgroup(connector['host'],
connector['ip'])
self._ensure_initiator_added(initiator_name, hostid)
- # Mapping lungooup and hostgoup to view
+ # Mapping lungroup and hostgroup to view
lun_id = self._mapping_hostgroup_and_lungroup(volume_name,
hostgroup_id, hostid)
hostlunid = self._find_host_lun_id(hostid, lun_id)
self._assert_rest_result(result, 'Log out of session error.')
def _start_luncopy(self, luncopyid):
- """Starte a LUNcopy."""
+ """Start a LUNcopy."""
url = self.url + "/LUNCOPY/start"
data = json.dumps({"TYPE": "219", "ID": luncopyid})
result = self.call(url, data, "PUT")
params[key] = value.strip()
else:
conf = self.configuration.cinder_huawei_conf_file
- LOG.warn(_('_parse_volume_type: Unacceptable paramater '
+ LOG.warn(_('_parse_volume_type: Unacceptable parameter '
'%(key)s. Please check this key in extra_specs '
'and make it consistent with the configuration '
'file %(conf)s.') % {'key': key, 'conf': conf})
# If constant prefetch, we should specify prefetch value.
if params['PrefetchType'] == '1':
prefetch_value_or_times = '-value %s' % params['PrefetchValue']
- # If variable prefetch, we should specify prefetch mutiple.
+ # If variable prefetch, we should specify prefetch multiple.
elif params['PrefetchType'] == '2':
prefetch_value_or_times = '-times %s' % params['PrefetchTimes']
self._stats = data
def extend_volume(self, volume, new_size):
- """Extend an existing voumes size."""
+ """Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
"""Synchronously recreates an export for a logical volume."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
+ # corresponding target admin class
if isinstance(self.tgtadm, iscsi.LioAdm):
try:
"""Ensure that target ids have been created in datastore."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
+ # corresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
host)
model_update = {}
# TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
+ # corresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
lun = 0
self._ensure_iscsi_targets(context, volume['host'])
"""Removes an export for a logical volume."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
+ # corresponding target admin class
if isinstance(self.tgtadm, iscsi.LioAdm):
try:
return {'provider_location': handle}
def remove_export(self, context, volume):
- """Driver exntry point to remove an export for a volume.
+ """Driver entry point to remove an export for a volume.
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
- Unmask the LUN on the storage system so the given intiator can no
+ Unmask the LUN on the storage system so the given initiator can no
longer access it.
"""
return False
def _create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
- """Creates igoup with specified args."""
+ """Creates igroup with specified args."""
igroup_create = NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
raise NotImplementedError()
def _get_lun_by_args(self, **args):
- """Retrives luns with specified args."""
+ """Retrieves luns with specified args."""
raise NotImplementedError()
def _get_lun_attr(self, name, attr):
volume=ssc_utils.NetAppVolume(volume, self.vserver))
def _get_lun_by_args(self, **args):
- """Retrives lun with specified args."""
+ """Retrieves lun with specified args."""
lun_iter = NaElement('lun-get-iter')
lun_iter.add_new_child('max-records', '100')
query = NaElement('query')
clone_ops_info.get_child_content('reason'))
def _get_lun_by_args(self, **args):
- """Retrives luns with specified args."""
+ """Retrieves luns with specified args."""
lun_info = NaElement.create_node_with_children('lun-list-info', **args)
result = self.client.invoke_successfully(lun_info, True)
luns = result.get_child_by_name('luns')
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
- :param snapshot: shapshot reference
+ :param snapshot: snapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
self.config.hp3par_cpg)
if cpg is not self.config.hp3par_cpg:
# The cpg was specified in a volume type extra spec so it
- # needs to be validiated that it's in the correct domain.
+ # needs to be validated that it's in the correct domain.
self.validate_cpg(cpg)
# Also, look to see if the snap_cpg was specified in volume
# type extra spec, if not use the extra spec cpg as the
"""All API requests to SolidFire device go through this method.
Simple json-rpc web based API calls.
- each call takes a set of paramaters (dict)
+ each call takes a set of parameters (dict)
and returns results in a dict as well.
"""
def delete_volume(self, volume):
"""Delete SolidFire Volume from device.
- SolidFire allows multipe volumes with same name,
+ SolidFire allows multiple volumes with same name,
volumeID is what's guaranteed unique.
"""
LOG.error(_("Account for Volume ID %s was not found on "
"the SolidFire Cluster!") % volume['id'])
LOG.error(_("This usually means the volume was never "
- "succesfully created."))
+ "successfully created."))
return
params = {'accountID': sfaccount['accountID']}
LOG.debug(_('leave: extend_volume: volume %s') % volume['id'])
def migrate_volume(self, ctxt, volume, host):
- """Migrate direclty if source and dest are managed by same storage.
+ """Migrate directly if source and dest are managed by same storage.
The method uses the migratevdisk method, which returns almost
immediately, if the source and target pools have the same extent_size.
def cancel_retrieval(vim, retrieve_result):
- """Cancels the retrive operation if necessary.
+ """Cancels the retrieve operation if necessary.
:param vim: Vim object
:param retrieve_result: Result from the RetrievePropertiesEx API
{'child_folder_name': child_folder_name,
'parent_folder': parent_folder})
- # Get list of child entites for the parent folder
+ # Get list of child entities for the parent folder
prop_val = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, parent_folder,
'childEntity')
wt_disk.Extend(additional_size)
except wmi.x_wmi as exc:
err_msg = (_(
- 'extend: error when extending the volumne: %(vol_name)s '
+ 'extend: error when extending the volume: %(vol_name)s '
'.WMI exception: %(wmi_exc)s') % {'vol_name': vol_name,
'wmi_exc': exc})
LOG.error(err_msg)
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
- # For backwards compatibility use the storge_availability_zone
+ # For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
if not self.az_check_functor(availability_zone):
msg = _("Availability zone '%s' is invalid") % (availability_zone)
Accesses the database and creates a new entry for the to be created
volume using the given volume properties which are extracted from the
input kwargs (and associated requirements this task needs). These
- requirements should be previously satisifed and validated by a
+ requirements should be previously satisfied and validated by a
pre-cursor task.
"""
'volume_properties': volume_properties,
# NOTE(harlowja): it appears like further usage of this volume
# result actually depend on it being a sqlalchemy object and not
- # just a plain dictionary so thats why we are storing this here.
+ # just a plain dictionary so that's why we are storing this here.
#
# In the future where this task results can be serialized and
# restored automatically for continued running we will need to
1. Inject keys & values for dependent tasks.
2. Extracts a scheduler specification from the provided inputs.
3. Attaches 2 activated only on *failure* tasks (one to update the db
- status and one to notify on the MQ of the failure that occured).
+ status and one to notify on the MQ of the failure that occurred).
4. Uses provided driver to to then select and continue processing of
volume request.
"""
# region name of this node (string value)
#os_region_name=<None>
-# Location of ca certicates file to use for nova client
+# Location of ca certificates file to use for nova client
# requests. (string value)
#nova_ca_certificates_file=<None>
# Maximum retry count for reconnection (integer value)
#eqlx_cli_max_retries=5
-# Use CHAP authentificaion for targets? (boolean value)
+# Use CHAP authentication for targets? (boolean value)
#eqlx_use_chap=false
# Existing CHAP account name (string value)