check_exit_code=[0, 255])
except putils.ProcessExecutionError as err:
#as this might be one of many paths,
- #only set successfull logins to startup automatically
+ #only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(connection_properties,
"node.startup",
class TransitionNotifier(object):
"""A utility helper class that can be used to subscribe to
- notifications of events occuring as well as allow a entity to post said
+ notifications of events occurring as well as allow a entity to post said
notifications to subscribers.
"""
# NOTE(uni): deprecating service request key, binary takes precedence
-# Still keeping service key here for API compability sake.
+# Still keeping service key here for API compatibility sake.
class FakeRequestWithService(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"service": "cinder-volume"}
# NOTE(uni): deprecating service request key, binary takes precedence
-# Still keeping service key here for API compability sake.
+# Still keeping service key here for API compatibility sake.
class FakeRequestWithHostService(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1", "service": "cinder-volume"}
self.assertEqual(200, response.status_int)
def test_versions_action_args_index(self):
- request_enviroment = {'PATH_INFO': '/'}
+ request_environment = {'PATH_INFO': '/'}
resource = versions.Versions()
- result = resource.get_action_args(request_enviroment)
+ result = resource.get_action_args(request_environment)
self.assertEqual(result['action'], 'index')
def test_versions_action_args_multi(self):
- request_enviroment = {'PATH_INFO': '/fake/path'}
+ request_environment = {'PATH_INFO': '/fake/path'}
resource = versions.Versions()
- result = resource.get_action_args(request_enviroment)
+ result = resource.get_action_args(request_environment)
self.assertEqual(result['action'], 'multi')
def test_versions_get_most_recent_update(self):
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
- """Get data decribing a limit request verb/path."""
+ """Get data describing a limit request verb/path."""
return jsonutils.dumps({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
- """Get data decribing a limit request verb/path."""
+ """Get data describing a limit request verb/path."""
return jsonutils.dumps({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
product.appendChild(product_text)
storage.appendChild(product)
- protocal = doc.createElement('Protocol')
- protocal_text = doc.createTextNode('iSCSI')
- protocal.appendChild(protocal_text)
- storage.appendChild(protocal)
+ protocol = doc.createElement('Protocol')
+ protocol_text = doc.createTextNode('iSCSI')
+ protocol.appendChild(protocol_text)
+ storage.appendChild(protocol)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
product.appendChild(product_text)
storage.appendChild(product)
- protocal = doc.createElement('Protocol')
- protocal_text = doc.createTextNode('FC')
- protocal.appendChild(protocal_text)
- storage.appendChild(protocal)
+ protocol = doc.createElement('Protocol')
+ protocol_text = doc.createTextNode('FC')
+ protocol.appendChild(protocol_text)
+ storage.appendChild(protocol)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
return self.__dict__[key]
-class FakeResponce(object):
+class FakeResponse(object):
def __init__(self, status):
- """Initialize FakeResponce.
+ """Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.context,
snapshot_id)
- def test_cant_delete_volume_in_use(self):
+ def test_cannot_delete_volume_in_use(self):
"""Test volume can't be deleted in invalid stats."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, **self.volume_params)
# clean up
self.volume.delete_volume(self.context, volume['id'])
- def test_cant_force_delete_attached_volume(self):
+ def test_cannot_force_delete_attached_volume(self):
"""Test volume can't be force delete in attached state"""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
self.volume.delete_volume(self.context, volume['id'])
- def test_cant_delete_volume_with_snapshots(self):
+ def test_cannot_delete_volume_with_snapshots(self):
"""Test volume can't be deleted with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
for k, v in self.vol_type1_specs.iteritems():
self.assertEqual(v, new['extra_specs'][k],
- 'one of fields doesnt match')
+ 'one of fields does not match')
new_all_vtypes = volume_types.get_all_types(self.ctxt)
self.assertEqual(len(prev_all_vtypes) + 1,
raise exception.CinderException(msg)
# Paramiko by default sets the socket timeout to 0.1 seconds,
- # ignoring what we set thru the sshclient. This doesn't help for
+ # ignoring what we set through the sshclient. This doesn't help for
# keeping long lived connections. Hence we have to bypass it, by
# overriding it after the transport is initialized. We are setting
# the sockettimeout to None and setting a keepalive packet so that,
self.set_execute(execute)
self._stats = {}
- # set True by manager after succesful check_for_setup
+ # set True by manager after successful check_for_setup
self._initialized = False
def set_execute(self, execute):
res = urllib2.urlopen(req).read().decode("utf-8")
LOG.debug(_('HVS Response Data: %(res)s') % {'res': res})
except Exception as err:
- err_msg = _('Bad reponse from server: %s') % err
+ err_msg = _('Bad response from server: %s') % err
LOG.error(err_msg)
raise err
def _get_volume_size(self, poolinfo, volume):
"""Calculate the volume size.
- We should devide the given volume size by 512 for the HVS system
+ We should divide the given volume size by 512 for the HVS system
caculates volume size with sectors, which is 512 bytes.
"""
return None
def _get_host_map_info(self, hostid):
- """Get map infomation of the given host."""
+ """Get map information of the given host."""
cli_cmd = 'showhostmap -host %(hostid)s' % {'hostid': hostid}
out = self._execute_cli(cli_cmd)
def _check_conf_file(self):
"""Check the config file, make sure the key elements are set."""
root = huawei_utils.parse_xml_file(self.xml_conf)
- # Check login infomation
+ # Check login information
check_list = ['Storage/ControllerIP0', 'Storage/ControllerIP1',
'Storage/UserName', 'Storage/UserPassword']
for item in check_list:
def _create_tgtadm_target(self, iscsi_name, iscsi_target,
volume_path, chap_auth, lun=0,
check_exit_code=False, old_name=None):
- # NOTE(jdg): tgt driver has an issue where with alot of activity
+ # NOTE(jdg): tgt driver has an issue where with a lot of activity
# (or sometimes just randomly) it will get *confused* and attempt
# to reuse a target ID, resulting in a target already exists error
# Typically a simple retry will address this
client.set_api_version(major, minor)
def check_for_setup_error(self):
- """Checks if setup occured properly."""
+ """Checks if setup occurred properly."""
api_version = self._client.get_api_version()
if api_version:
major, minor = api_version
if not parent:
return depth
- # If clone depth was reached, flatten should have occured so if it has
+ # If clone depth was reached, flatten should have occurred so if it has
# been exceeded then something has gone wrong.
if depth > CONF.rbd_max_clone_depth:
raise Exception(_("clone depth exceeds limit of %s") %
return self.volumeops.get_vmfolder(datacenter)
def _select_datastore_summary(self, size_bytes, datastores):
- """Get best summary from datastore list that can accomodate volume.
+ """Get best summary from datastore list that can accommodate volume.
The implementation selects datastore based on maximum relative
free space, which is (free_space/total_space) and has free space to
best_summary = summary
if not best_summary:
- msg = _("Unable to pick datastore to accomodate %(size)s bytes "
+ msg = _("Unable to pick datastore to accommodate %(size)s bytes "
"from the datastores: %(dss)s.")
LOG.error(msg % {'size': size_bytes, 'dss': datastores})
raise error_util.VimException(msg %
# TODO(harlowja): what happens if the status changes after this
# initial snapshot status check occurs??? Seems like someone
# could delete the snapshot after this check passes but before
- # the volume is offically created?
+ # the volume is officially created?
raise exception.InvalidSnapshot(reason=msg)
snapshot_id = snapshot['id']
return snapshot_id
# TODO(harlowja): what happens if the status changes after this
# initial volume status check occurs??? Seems like someone
# could delete the volume after this check passes but before
- # the volume is offically created?
+ # the volume is officially created?
raise exception.InvalidVolume(reason=msg)
source_volid = source_volume['id']
return source_volid
# TODO(joel-coffman): This special handling of snapshots to ensure that
# their volume type matches the source volume is too convoluted. We
# should copy encryption metadata from the encrypted volume type to the
- # volume upon creation and propogate that information to each snapshot.
+ # volume upon creation and propagate that information to each snapshot.
# This strategy avoid any dependency upon the encrypted volume type.
if not volume_type and not source_volume and not snapshot:
volume_type = volume_types.get_default_volume_type()
Reversion strategy: rollback the quota reservation.
Warning Warning: if the process that is running this reserve and commit
- process fails (or is killed before the quota is rolled back or commited
+ process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
if not result:
return
if context.quota_committed:
- # The reservations have already been commited and can not be
+ # The reservations have already been committed and can not be
# rolled back at this point.
return
# We actually produced an output that we can revert so lets attempt
the initial reservation (see: QuotaReserveTask).
Warning Warning: if the process that is running this reserve and commit
- process fails (or is killed before the quota is rolled back or commited
+ process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
v_uuid = api_flow.add(EntryCreateTask(db))
api_flow.add(QuotaCommitTask())
- # If after commiting something fails, ensure we set the db to failure
+ # If after committing something fails, ensure we set the db to failure
# before reverting any prior tasks.
api_flow.add(OnFailureChangeStatusTask(db))
These listeners will log when tasks/flows are transitioning from state to
state so that said states can be seen in the debug log output which is very
- useful for figuring out where problems are occuring.
+ useful for figuring out where problems are occurring.
"""
def flow_log_change(state, details):
x="4" dy="1.2em" class="st3">Network<v:newlineChar/></tspan><tspan x="4" dy="1.2em" class="st3">VPN</tspan></text> </g>\r
<g id="shape16-56" v:mID="16" v:groupContext="shape" transform="translate(14.4132,-30.9923)">\r
<title>Sheet.16</title>\r
- <desc>VM instance Security group Volume Snapshot VM image IP addres...</desc>\r
+ <desc>VM instance Security group Volume Snapshot VM image IP address...</desc>\r
<v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>\r
<v:textRect cx="42.5197" cy="340.209" width="85.04" height="34.0157"/>\r
<rect x="0" y="323.201" width="85.0394" height="34.0157" class="st9"/>\r