def _backup_rbd(self, backup_id, volume_id, volume_file, volume_name,
length):
- """Create a incremental backup from an RBD image."""
+ """Create an incremental backup from an RBD image."""
rbd_user = volume_file.rbd_user
rbd_pool = volume_file.rbd_pool
rbd_conf = volume_file.rbd_conf
"""Perform the actual restore operation.
:param backup_path: the path the backup was created from, this
- identifes the backup to tsm
+ identifies the backup to tsm
:param restore_path: volume path to restore into
:param vol_id: volume id
:param backup_mode: mode used to create the backup ('image' or 'file')
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
- """Create a iSCSI target and logical unit."""
+ """Create an iSCSI target and logical unit."""
raise NotImplementedError()
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
- """Remove a iSCSI target and logical unit."""
+ """Remove an iSCSI target and logical unit."""
raise NotImplementedError()
def _new_target(self, name, tid, **kwargs):
# For now work-around by checking if the target was deleted,
# if it wasn't, try again without the force.
- # This will NOT do any good for the case of mutliple sessions
+ # This will NOT do any good for the case of multiple sessions
# which the force was aded for but it will however address
# the cases pointed out in bug:
# https://bugs.launchpad.net/cinder/+bug/1304122
# Insert a root entry for QoS specs
specs_root = models.QualityOfServiceSpecs()
root = dict(id=specs_id)
- # 'QoS_Specs_Name' is a internal reserved key to store
+ # 'QoS_Specs_Name' is an internal reserved key to store
# the name of QoS specs
root['key'] = 'QoS_Specs_Name'
root['value'] = values['name']
@require_admin_context
def qos_specs_update(context, qos_specs_id, specs):
- """Make updates to a existing qos specs.
+ """Make updates to an existing qos specs.
Perform add, update or delete key/values to a qos specs.
"""
class VolumeAdminMetadata(BASE, CinderBase):
- """Represents a administrator metadata key/value pair for a volume."""
+ """Represents an administrator metadata key/value pair for a volume."""
__tablename__ = 'volume_admin_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
def qemu_img_info(path):
- """Return a object containing the parsed output from qemu-img info."""
+ """Return an object containing the parsed output from qemu-img info."""
cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path)
if os.name == 'nt':
cmd = cmd[2:]
{'cause': cause, 'name': self.name})
def _notify_failure(self, context, request_spec, cause):
- """When scheduling fails send out a event that it failed."""
+ """When scheduling fails send out an event that it failed."""
payload = {
'request_spec': request_spec,
'volume_properties': request_spec.get('volume_properties', {}),
2. Extracts a scheduler specification from the provided inputs.
3. Attaches 2 activated only on *failure* tasks (one to update the db
status and one to notify on the MQ of the failure that occurred).
- 4. Uses provided driver to to then select and continue processing of
+ 4. Uses provided driver to then select and continue processing of
volume request.
"""
create_what = {
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
- # If both values aren't convertable to float, just ignore
+ # If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
volumes_links = res_dict['volumes_links']
_verify_links(volumes_links, key)
- # Number of volumes less then max, do not include
+ # Number of volumes less than max, do not include
def stub_volume_get_all2(context, marker, limit,
sort_key, sort_dir,
filters=None,
self.assertEqual(len(res_dict['volumes']), 100)
self.assertFalse('volumes_links' in res_dict)
- # Number of volumes more then the max, include next link
+ # Number of volumes more than the max, include next link
def stub_volume_get_all3(context, marker, limit,
sort_key, sort_dir,
filters=None,
self.assertEqual(len(res_dict['volumes']), CONF.osapi_max_limit)
volumes_links = res_dict['volumes_links']
_verify_links(volumes_links, key)
- # Pass a limit that is greater then the max and the total number of
+ # Pass a limit that is greater than the max and the total number of
# volumes, ensure only the maximum is returned and that the next
# link is present
for key, fn in zip(api_keys, fns):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
- mocks that can't/dont't get unset.
+ mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
# NOTE(dosaboy): mock Popen to, by default, raise Exception in order to
"""Decorator to set mocks common to all metadata backup tests.
The point of doing these mocks here is so that we don't accidentally set
- mocks that can't/dont't get unset.
+ mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.backup.drivers.ceph.rbd', spec=object)
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
- # Verify the orignal error is propagated
+ # Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
self.assertTrue(ctx.is_admin)
def test_custom_admin_role_is_admin(self):
- # define explict rules for context_is_admin
+ # define explicit rules for context_is_admin
rules = {
'context_is_admin': [["role:administrator"], ["role:johnny-admin"]]
}
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
- mocks that can't/dont't get unset.
+ mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
:param value: value for the managed object
:param type: type of the managed object
- :return: Managed object reference with with input value and type
+ :return: Managed object reference with input value and type
"""
moref = suds.sudsobject.Property(value)
moref._type = type
'type': 'source_vol',
})
elif kwargs.get('image_id'):
- # We are making a image based volume instead of a raw volume.
+ # We are making an image based volume instead of a raw volume.
image_href = kwargs['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
- and image_id. If an image_id specified, a image_meta should also be
+ and image_id. If an image_id specified, an image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
- # headers, or you want to be treated like an iterable, or or or)
+ # headers, or you want to be treated like an iterable)
res = Response();
res.app_iter = open('somefile')