import sys
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
- sys.argv[0]), os.pardir, os.pardir))
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
sys.path.insert(0, possible_topdir)
import sys
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
- sys.argv[0]), os.pardir, os.pardir))
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
sys.path.insert(0, possible_topdir)
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
- print print_format % (
- _('Binary'),
- _('Host'),
- _('Zone'),
- _('Status'),
- _('State'),
- _('Updated At'))
+ print print_format % (_('Binary'),
+ _('Host'),
+ _('Zone'),
+ _('Status'),
+ _('State'),
+ _('Updated At'))
for svc in services:
alive = utils.service_is_up(svc)
art = ":-)" if alive else "XXX"
if FLAGS.enabled_backends:
for backend in FLAGS.enabled_backends:
host = "%s@%s" % (FLAGS.host, backend)
- server = service.Service.create(
- host=host,
- service_name=backend)
+ server = service.Service.create(host=host,
+ service_name=backend)
launcher.launch_server(server)
else:
server = service.Service.create(binary='cinder-volume')
print _("Found %d volumes") % len(volumes)
for volume_ref in volumes:
try:
- cinder.volume.utils.notify_usage_exists(
- admin_context, volume_ref)
+ cinder.volume.utils.notify_usage_exists(admin_context,
+ volume_ref)
except Exception, e:
print traceback.format_exc(e)
name = transfer.get('name', None)
- LOG.audit(_("Creating transfer of volume %(volume_id)s"), locals(),
- context=context)
+ LOG.audit(_("Creating transfer of volume %s"),
+ volume_id,
+ context=context)
try:
new_transfer = self.transfer_api.create(context, volume_id, name)
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
- LOG.audit(_("Accepting transfer %(transfer_id)s"), locals(),
- context=context)
+ LOG.audit(_("Accepting transfer %s"), transfer_id,
+ context=context)
try:
accepted_transfer = self.transfer_api.accept(context, transfer_id,
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
- transfer = self._view_builder.summary(req,
- dict(accepted_transfer.iteritems()))
+ transfer = \
+ self._view_builder.summary(req,
+ dict(accepted_transfer.iteritems()))
return transfer
def delete(self, req, id):
LOG = logging.getLogger(__name__)
SCHEDULER_HINTS_NAMESPACE =\
- "http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2"
+ "http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2"
FLAGS = flags.FLAGS
def _extract_scheduler_hints(self, volume_node):
"""Marshal the scheduler hints attribute of a parsed request."""
- node = self.find_first_child_named_in_namespace(volume_node,
- SCHEDULER_HINTS_NAMESPACE, "scheduler_hints")
+ node =\
+ self.find_first_child_named_in_namespace(volume_node,
+ SCHEDULER_HINTS_NAMESPACE,
+ "scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
# check for existing key
for limit in limits:
if (limit["uri"] == rate_limit["URI"] and
- limit["regex"] == rate_limit["regex"]):
+ limit["regex"] == rate_limit["regex"]):
_rate_limit_key = limit
break
transfers_list = [func(request, transfer)['transfer'] for transfer in
transfers]
transfers_links = self._get_collection_links(request,
- transfers,
- self._collection_name)
+ transfers,
+ self._collection_name)
transfers_dict = dict(transfers=transfers_list)
if transfers_links:
import cinder.openstack.common.importutils
API = cinder.openstack.common.importutils.import_class(
- cinder.flags.FLAGS.backup_api_class)
+ cinder.flags.FLAGS.backup_api_class)
@require_context
def transfer_get(context, transfer_id, session=None):
query = model_query(context, models.Transfer,
- session=session).\
+ session=session).\
filter_by(id=transfer_id)
if not is_admin_context(context):
volume = models.Volume
query = model_query(context, models.Transfer).\
- options(joinedload('volume')).\
- filter(volume.project_id == project_id)
+ options(joinedload('volume')).\
+ filter(volume.project_id == project_id)
results = query.all()
return _translate_transfers(results)
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
- dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
+ dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
- 'QuotaUsage.deleted == 0)')
+ 'QuotaUsage.deleted == 0)')
class Snapshot(BASE, CinderBase):
scheduler_json_config_location_opt = cfg.StrOpt(
- 'scheduler_json_config_location',
- default='',
- help='Absolute path to scheduler configuration JSON file.')
+ 'scheduler_json_config_location',
+ default='',
+ help='Absolute path to scheduler configuration JSON file.')
CONF = cfg.CONF
last_modified = self._get_file_timestamp(filename)
if (not last_modified or not self.last_modified or
- last_modified > self.last_modified):
+ last_modified > self.last_modified):
self.data = self._load_file(self._get_file_handle(filename))
self.last_modified = last_modified
if not self.data:
self.assertEqual(backup_detail.item(0).attributes.length, 11)
self.assertEqual(
- backup_detail.item(0).getAttribute('availability_zone'), 'az1')
+ backup_detail.item(0).getAttribute('availability_zone'), 'az1')
self.assertEqual(
backup_detail.item(0).getAttribute('container'), 'volumebackups')
self.assertEqual(
self.assertEqual(
backup_detail.item(0).getAttribute('id'), backup_id1)
self.assertEqual(
- int(backup_detail.item(0).getAttribute('object_count')), 0)
+ int(backup_detail.item(0).getAttribute('object_count')), 0)
self.assertEqual(
- int(backup_detail.item(0).getAttribute('size')), 0)
+ int(backup_detail.item(0).getAttribute('size')), 0)
self.assertEqual(
backup_detail.item(0).getAttribute('status'), 'creating')
self.assertEqual(
- int(backup_detail.item(0).getAttribute('volume_id')), 1)
+ int(backup_detail.item(0).getAttribute('volume_id')), 1)
self.assertEqual(backup_detail.item(1).attributes.length, 11)
self.assertEqual(
- backup_detail.item(1).getAttribute('availability_zone'), 'az1')
+ backup_detail.item(1).getAttribute('availability_zone'), 'az1')
self.assertEqual(
backup_detail.item(1).getAttribute('container'), 'volumebackups')
self.assertEqual(
self.assertEqual(
backup_detail.item(2).getAttribute('id'), backup_id3)
self.assertEqual(
- int(backup_detail.item(2).getAttribute('object_count')), 0)
+ int(backup_detail.item(2).getAttribute('object_count')), 0)
self.assertEqual(
- int(backup_detail.item(2).getAttribute('size')), 0)
+ int(backup_detail.item(2).getAttribute('size')), 0)
self.assertEqual(
backup_detail.item(2).getAttribute('status'), 'creating')
self.assertEqual(
- int(backup_detail.item(2).getAttribute('volume_id')), 1)
+ int(backup_detail.item(2).getAttribute('volume_id')), 1)
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = stubs.stub_volume(1, uuid=UUID)
self.fake_instance['created_at'] =\
- datetime.datetime(2013, 1, 1, 1, 1, 1)
+ datetime.datetime(2013, 1, 1, 1, 1, 1)
self.flags(
osapi_volume_extension=[
'cinder.api.contrib.select_extensions'],
req.content_type = 'application/json'
body = {'id': id,
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'volume_id': '1',
- }
+ 'volume_id': '1', }
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
body = {'id': id,
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'volume_id': '1',
- 'scheduler_hints': {'a': 'b'},
- }
+ 'scheduler_hints': {'a': 'b'}, }
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
req.method = 'POST'
req.content_type = 'application/json'
body = {'volume': {
- 'id': id,
- 'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'volume_id': '1',
- 'scheduler_hints': 'a', }
- }
+ 'id': id,
+ 'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'volume_id': '1',
+ 'scheduler_hints': 'a', }}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
def stub_volume_type_extra_specs():
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
+ specs = {"key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
return specs
def stub_volume_type(id):
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
+ specs = {"key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
def test_accept_transfer_with_VolumeLimitExceeded(self):
def fake_transfer_api_accept_throwing_VolumeLimitExceeded(cls,
- context,
- transfer,
- volume_id):
+ context,
+ transfer,
+ volume_id):
raise exception.VolumeLimitExceeded(allowed=1)
self.stubs.Set(cinder.transfer.API, 'accept',
def test_valid_marker(self):
""" Test valid marker param. """
req = webob.Request.blank(
- '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
+ '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def stub_volume_type(id):
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
+ specs = {"key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
self.assertEquals(xfer.volume_id, volume_id1, "Unexpected volume_id")
nctxt = context.RequestContext(user_id='new_user_id',
- project_id='new_project_id')
+ project_id='new_project_id')
self.assertRaises(exception.TransferNotFound,
db.transfer_get, nctxt, xfer_id1)
"Unexpected number of transfer records")
nctxt = context.RequestContext(user_id='new_user_id',
- project_id='new_project_id')
+ project_id='new_project_id')
self.assertRaises(exception.NotAuthorized,
db.transfer_get_all_by_project,
nctxt, self.ctxt.project_id)
LOG.debug("Logs: %s" % fake_driver.LoggingVolumeDriver.all_logs())
create_actions = fake_driver.LoggingVolumeDriver.logs_like(
- 'create_volume',
- id=created_volume_id)
+ 'create_volume',
+ id=created_volume_id)
LOG.debug("Create_Actions: %s" % create_actions)
self.assertEquals(1, len(create_actions))
self.assertEquals(create_action['size'], 1)
export_actions = fake_driver.LoggingVolumeDriver.logs_like(
- 'create_export',
- id=created_volume_id)
+ 'create_export',
+ id=created_volume_id)
self.assertEquals(1, len(export_actions))
export_action = export_actions[0]
self.assertEquals(export_action['id'], created_volume_id)
self.assertEquals(export_action['availability_zone'], 'nova')
delete_actions = fake_driver.LoggingVolumeDriver.logs_like(
- 'delete_volume',
- id=created_volume_id)
+ 'delete_volume',
+ id=created_volume_id)
self.assertEquals(1, len(delete_actions))
delete_action = export_actions[0]
self.assertEquals(delete_action['id'], created_volume_id)
stub_out_https_backend(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
- ['and', ['>=', '$free_capacity_gb', 1024],
- ['>=', '$total_capacity_gb', 10 * 1024]])
+ ['and',
+ ['>=', '$free_capacity_gb', 1024],
+ ['>=', '$total_capacity_gb', 10 * 1024]])
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
self.host_manager._choose_host_filters(specified_filters).AndReturn(
- [FakeFilterClass1])
+ [FakeFilterClass1])
def _verify_result(self, info, result):
for x in info['got_fprops']:
for i in range(3):
resource = 'res%d' % i
quotas[resource] = db.quota_create(context, project_id, resource, i)
- resources[resource] = ReservableResource(resource,
- get_sync(resource, i), 'quota_res_%d' % i)
+ resources[resource] = ReservableResource(
+ resource,
+ get_sync(resource, i), 'quota_res_%d' % i)
deltas[resource] = i
return db.quota_reserve(context, resources, quotas, deltas,
- datetime.utcnow(), datetime.utcnow(),
- timedelta(days=1), project_id)
+ datetime.utcnow(), datetime.utcnow(),
+ timedelta(days=1), project_id)
class ModelsObjectComparatorMixin(object):
if ignored_keys is None:
ignored_keys = []
return dict([(k, v) for k, v in obj.iteritems()
- if k not in ignored_keys])
+ if k not in ignored_keys])
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
db.iscsi_target_create_safe(self.ctxt, {'host': host,
'target_num': 42})
target_num = db.volume_allocate_iscsi_target(self.ctxt, volume['id'],
- host)
+ host)
self.assertEqual(target_num, 42)
def test_volume_attached_invalid_uuid(self):
for j in xrange(3):
db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': 100})
for i in xrange(3):
- self.assertEqual((3, 300), db.volume_data_get_for_host(
- self.ctxt, 'h%d' % i))
+ self.assertEqual((3, 300),
+ db.volume_data_get_for_host(
+ self.ctxt, 'h%d' % i))
def test_volume_data_get_for_project(self):
for i in xrange(3):
'host': 'h-%d-%d' % (i, j),
})
for i in xrange(3):
- self.assertEqual((3, 300), db.volume_data_get_for_project(
- self.ctxt, 'p%d' % i))
+ self.assertEqual((3, 300),
+ db.volume_data_get_for_project(
+ self.ctxt, 'p%d' % i))
def test_volume_detached(self):
volume = db.volume_create(self.ctxt, {})
- db.volume_attached(self.ctxt, volume['id'],
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
+ db.volume_attached(self.ctxt,
+ volume['id'],
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
db.volume_detached(self.ctxt, volume['id'])
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual('available', volume['status'])
def test_volume_get(self):
volume = db.volume_create(self.ctxt, {})
self._assertEqualObjects(volume, db.volume_get(self.ctxt,
- volume['id']))
+ volume['id']))
def test_volume_destroy(self):
volume = db.volume_create(self.ctxt, {})
self.ctxt, volume['id'])
def test_volume_get_all(self):
- volumes = [db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': i})
- for i in xrange(3)]
+ volumes = [db.volume_create(self.ctxt,
+ {'host': 'h%d' % i, 'size': i})
+ for i in xrange(3)]
self._assertEqualListsOfObjects(volumes, db.volume_get_all(
self.ctxt, None, None, 'host', None))
volumes = []
for i in xrange(3):
volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i})
- for j in xrange(3)])
+ for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_host(
instance_uuid = str(uuidutils.uuid.uuid1())
instance_uuids.append(instance_uuid)
volumes.append([db.volume_create(self.ctxt,
- {'instance_uuid': instance_uuid}) for j in xrange(3)])
+ {'instance_uuid': instance_uuid})
+ for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_instance_uuid(
volumes = []
for i in xrange(3):
volumes.append([db.volume_create(self.ctxt, {
- 'project_id': 'p%d' % i}) for j in xrange(3)])
+ 'project_id': 'p%d' % i}) for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_project(
def test_volume_get_iscsi_target_num_nonexistent(self):
self.assertRaises(exception.ISCSITargetNotFoundForVolume,
- db.volume_get_iscsi_target_num, self.ctxt, 42)
+ db.volume_get_iscsi_target_num, self.ctxt, 42)
def test_volume_update(self):
volume = db.volume_create(self.ctxt, {'host': 'h1'})
def setUp(self):
super(DBAPIReservationTestCase, self).setUp()
self.values = {'uuid': 'sample-uuid',
- 'project_id': 'project1',
- 'resource': 'resource',
- 'delta': 42,
- 'expire': datetime.utcnow() + timedelta(days=1),
- 'usage': {'id': 1}}
+ 'project_id': 'project1',
+ 'resource': 'resource',
+ 'delta': 42,
+ 'expire': datetime.utcnow() + timedelta(days=1),
+ 'usage': {'id': 1}}
def test_reservation_create(self):
reservation = db.reservation_create(self.ctxt, **self.values)
self._assertEqualObjects(self.values, reservation, ignored_keys=(
- 'deleted', 'updated_at',
- 'deleted_at', 'id',
- 'created_at', 'usage',
- 'usage_id'))
+ 'deleted', 'updated_at',
+ 'deleted_at', 'id',
+ 'created_at', 'usage',
+ 'usage_id'))
self.assertEqual(reservation['usage_id'], self.values['usage']['id'])
def test_reservation_get(self):
self._assertEqualObjects(reservation, reservation_db)
def test_reservation_get_nonexistent(self):
- self.assertRaises(exception.ReservationNotFound, db.reservation_get,
- self.ctxt, 'non-exitent-resevation-uuid')
+ self.assertRaises(exception.ReservationNotFound,
+ db.reservation_get,
+ self.ctxt,
+ 'non-exitent-resevation-uuid')
def test_reservation_commit(self):
reservations = _quota_reserve(self.ctxt, 'project1')
expected = {'project_id': 'project1',
- 'res0': {'reserved': 0, 'in_use': 0},
- 'res1': {'reserved': 1, 'in_use': 1},
- 'res2': {'reserved': 2, 'in_use': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project(
- self.ctxt, 'project1'))
+ 'res0': {'reserved': 0, 'in_use': 0},
+ 'res1': {'reserved': 1, 'in_use': 1},
+ 'res2': {'reserved': 2, 'in_use': 2}}
+ self.assertEqual(expected,
+ db.quota_usage_get_all_by_project(
+ self.ctxt, 'project1'))
db.reservation_get(self.ctxt, reservations[0])
db.reservation_commit(self.ctxt, reservations, 'project1')
self.assertRaises(exception.ReservationNotFound,
- db.reservation_get, self.ctxt, reservations[0])
+ db.reservation_get,
+ self.ctxt,
+ reservations[0])
expected = {'project_id': 'project1',
- 'res0': {'reserved': 0, 'in_use': 0},
- 'res1': {'reserved': 0, 'in_use': 2},
- 'res2': {'reserved': 0, 'in_use': 4}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project(
- self.ctxt, 'project1'))
+ 'res0': {'reserved': 0, 'in_use': 0},
+ 'res1': {'reserved': 0, 'in_use': 2},
+ 'res2': {'reserved': 0, 'in_use': 4}}
+ self.assertEqual(expected,
+ db.quota_usage_get_all_by_project(
+ self.ctxt,
+ 'project1'))
def test_reservation_rollback(self):
reservations = _quota_reserve(self.ctxt, 'project1')
expected = {'project_id': 'project1',
- 'res0': {'reserved': 0, 'in_use': 0},
- 'res1': {'reserved': 1, 'in_use': 1},
- 'res2': {'reserved': 2, 'in_use': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project(
- self.ctxt, 'project1'))
+ 'res0': {'reserved': 0, 'in_use': 0},
+ 'res1': {'reserved': 1, 'in_use': 1},
+ 'res2': {'reserved': 2, 'in_use': 2}}
+ self.assertEqual(expected,
+ db.quota_usage_get_all_by_project(
+ self.ctxt,
+ 'project1'))
db.reservation_get(self.ctxt, reservations[0])
db.reservation_rollback(self.ctxt, reservations, 'project1')
self.assertRaises(exception.ReservationNotFound,
- db.reservation_get, self.ctxt, reservations[0])
+ db.reservation_get,
+ self.ctxt,
+ reservations[0])
expected = {'project_id': 'project1',
- 'res0': {'reserved': 0, 'in_use': 0},
- 'res1': {'reserved': 0, 'in_use': 1},
- 'res2': {'reserved': 0, 'in_use': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project(
- self.ctxt, 'project1'))
+ 'res0': {'reserved': 0, 'in_use': 0},
+ 'res1': {'reserved': 0, 'in_use': 1},
+ 'res2': {'reserved': 0, 'in_use': 2}}
+ self.assertEqual(expected,
+ db.quota_usage_get_all_by_project(
+ self.ctxt,
+ 'project1'))
def test_reservation_expire(self):
self.values['expire'] = datetime.utcnow() + timedelta(days=1)
db.reservation_expire(self.ctxt)
expected = {'project_id': 'project1',
- 'res0': {'reserved': 0, 'in_use': 0},
- 'res1': {'reserved': 0, 'in_use': 1},
- 'res2': {'reserved': 0, 'in_use': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project(
- self.ctxt, 'project1'))
+ 'res0': {'reserved': 0, 'in_use': 0},
+ 'res1': {'reserved': 0, 'in_use': 1},
+ 'res2': {'reserved': 0, 'in_use': 2}}
+ self.assertEqual(expected,
+ db.quota_usage_get_all_by_project(
+ self.ctxt,
+ 'project1'))
class DBAPIQuotaTestCase(BaseTest):
for i in range(3):
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
- 'res0': 0,
- 'res1': 1,
- 'res2': 2})
+ 'res0': 0,
+ 'res1': 1,
+ 'res2': 2})
def test_quota_update(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
def test_quota_update_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
- db.quota_update, self.ctxt, 'project1', 'resource1', 42)
+ db.quota_update,
+ self.ctxt,
+ 'project1',
+ 'resource1',
+ 42)
def test_quota_get_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
- db.quota_get, self.ctxt, 'project1', 'resource1')
+ db.quota_get,
+ self.ctxt,
+ 'project1',
+ 'resource1')
def test_quota_reserve(self):
reservations = _quota_reserve(self.ctxt, 'project1')
reservations = _quota_reserve(self.ctxt, 'project1')
db.quota_destroy_all_by_project(self.ctxt, 'project1')
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
- {'project_id': 'project1'})
- self.assertEqual(db.quota_usage_get_all_by_project(
- self.ctxt, 'project1'),
- {'project_id': 'project1'})
+ {'project_id': 'project1'})
+ self.assertEqual(db.quota_usage_get_all_by_project(self.ctxt,
+ 'project1'),
+ {'project_id': 'project1'})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
- db.reservation_get, self.ctxt, r)
+ db.reservation_get,
+ self.ctxt,
+ r)
def test_quota_usage_get_nonexistent(self):
- self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
- self.ctxt, 'p1', 'nonexitent_resource')
+ self.assertRaises(exception.QuotaUsageNotFound,
+ db.quota_usage_get,
+ self.ctxt,
+ 'p1',
+ 'nonexitent_resource')
def test_quota_usage_get(self):
reservations = _quota_reserve(self.ctxt, 'p1')
syncs = self._enum_syncsvsvs()
for sync in syncs:
if (sync['SyncedElement'] == objectpath['SyncedElement'] and
- sync['SystemElement'] == objectpath['SystemElement']):
+ sync['SystemElement'] == objectpath['SystemElement']):
foundsync = sync
break
return foundsync
self._configuration.glusterfs_sparsed_volumes = True
self.stubs = stubout.StubOutForTesting()
- self._driver = glusterfs.GlusterfsDriver(
- configuration=self._configuration)
+ self._driver =\
+ glusterfs.GlusterfsDriver(configuration=self._configuration)
self._driver.shares = {}
def tearDown(self):
ISCSI_3PAR_RET = (
'Id,Name,Persona,-WWN/iSCSI_Name-,Port,IP_addr\r\n'
'75,fakehost.foo,Generic,iqn.1993-08.org.debian:01:222,---,'
- '10.10.222.12\r\n'
+ '10.10.222.12\r\n'
'\r\n'
'Id,Name,-Initiator_CHAP_Name-,-Target_CHAP_Name-\r\n'
'75,fakehost.foo,--,--\r\n'
self.flags(iscsi_helper='lioadm')
self.script_template = "\n".join([
'rtstool create '
- '/foo iqn.2011-09.org.foo.bar:blaa test_id test_pass',
+ '/foo iqn.2011-09.org.foo.bar:blaa test_id test_pass',
'rtstool delete iqn.2010-10.org.openstack:volume-blaa'])
drv = self._driver
self.configuration.nfs_oversub_ratio = -1
self.assertRaises(exception.NfsException,
- drv.do_setup, IsA(context.RequestContext))
+ drv.do_setup,
+ IsA(context.RequestContext))
def test_setup_should_throw_error_if_used_ratio_less_than_zero(self):
"""do_setup should throw error if nfs_used_ratio is less than 0."""
drv = self._driver
self.configuration.nfs_used_ratio = -1
self.assertRaises(exception.NfsException,
- drv.do_setup, IsA(context.RequestContext))
+ drv.do_setup,
+ IsA(context.RequestContext))
def test_setup_should_throw_error_if_used_ratio_greater_than_one(self):
"""do_setup should throw error if nfs_used_ratio is greater than 1."""
drv = self._driver
self.configuration.nfs_used_ratio = 2
self.assertRaises(exception.NfsException,
- drv.do_setup, IsA(context.RequestContext))
+ drv.do_setup,
+ IsA(context.RequestContext))
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
"""do_setup should throw error if nfs client is not installed."""
def _set_brain(self, default_rule):
brain = cinder.openstack.common.policy.Brain(self.rules,
- default_rule)
+ default_rule)
cinder.openstack.common.policy.set_brain(brain)
def tearDown(self):
'auth_enabled': False,
'auth_username': None,
'secret_type': 'ceph',
- 'secret_uuid': None,
- }
+ 'secret_uuid': None, }
}
actual = self.driver.initialize_connection(dict(name=name), None)
self.assertDictMatch(expected, actual)
def _state_transition(self, function, fcmap):
if (function == 'wait' and
- 'wait' not in self._transitions[fcmap['status']]):
+ 'wait' not in self._transitions[fcmap['status']]):
return ('', '')
if fcmap['status'] == 'copying' and function == 'wait':
self.USESIM = True
if self.USESIM:
self.driver = StorwizeSVCFakeDriver(
- configuration=conf.Configuration(None))
+ configuration=conf.Configuration(None))
self._def_flags = {'san_ip': 'hostname',
'san_login': 'user',
'san_password': 'pass',
self._host_name = 'storwize-svc-test'
self._host_ip = '1.234.56.78'
self._host_wwpns = [
- str(random.randint(0, 9999999999999999)).zfill(16),
- str(random.randint(0, 9999999999999999)).zfill(16)]
+ str(random.randint(0, 9999999999999999)).zfill(16),
+ str(random.randint(0, 9999999999999999)).zfill(16)]
self._iscsi_name = ('test.initiator.%s' %
str(random.randint(10000, 99999)))
self.sim = StorwizeSVCManagementSimulator('volpool')
self.driver.set_fake_storage(self.sim)
else:
self.driver = storwize_svc.StorwizeSVCDriver(
- configuration=conf.Configuration(None))
+ configuration=conf.Configuration(None))
self._def_flags = {'san_ip': '1.111.11.11',
'san_login': 'user',
'san_password': 'password',
for line in lines:
val = line.split('=')
if (len(val) == 2 and
- val[0].strip().replace(" ", "") == 'port_name'):
+ val[0].strip().replace(" ", "") == 'port_name'):
self._host_wwpns.append(val[1].strip()[3:-1])
self.assertNotEqual(len(self._host_wwpns), 0)
'Unexpected user id')
self.assertEquals(volume['id'], response['volume_id'],
- 'Unexpected volume id in response.')
+ 'Unexpected volume id in response.')
self.assertEquals(transfer['id'], response['id'],
- 'Unexpected transfer id in response.')
+ 'Unexpected transfer id in response.')
def test_transfer_get(self):
tx_api = transfer_api.API()
import cinder.openstack.common.importutils
API = cinder.openstack.common.importutils.import_class(
- cinder.flags.FLAGS.transfer_api_class)
+ cinder.flags.FLAGS.transfer_api_class)
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from cinder.volume import <foo>' elsewhere.
-import cinder.flags
-import cinder.openstack.common.importutils
+import cinder.flags as flags
+import cinder.openstack.common.importutils as import_utils
-API = cinder.openstack.common.importutils.import_class(
- cinder.flags.FLAGS.volume_api_class)
+API = import_utils.import_class(flags.FLAGS.volume_api_class)
run_as_root=True)
for target in out.splitlines():
if (self.configuration.iscsi_ip_address in target
- and volume_name in target):
+ and volume_name in target):
return target
return None
'storage_system': storage_system})
configservice = self._find_storage_configuration_service(
- storage_system)
+ storage_system)
if configservice is None:
exception_message = (_("Error Create Volume: %(volumename)s. "
"Storage Configuration Service not found for "
'size': volumesize})
rc, job = self.conn.InvokeMethod(
- 'CreateOrModifyElementFromStoragePool',
- configservice, ElementName=volumename, InPool=pool,
- ElementType=self._getnum(5, '16'),
- Size=self._getnum(volumesize, '64'))
+ 'CreateOrModifyElementFromStoragePool',
+ configservice, ElementName=volumename, InPool=pool,
+ ElementType=self._getnum(5, '16'),
+ Size=self._getnum(volumesize, '64'))
LOG.debug(_('Create Volume: %(volumename)s Return code: %(rc)lu')
% {'volumename': volumename,
# Create a Clone from snapshot
rc, job = self.conn.InvokeMethod(
- 'CreateElementReplica', repservice,
- ElementName=volumename,
- SyncType=self._getnum(8, '16'),
- SourceElement=snapshot_instance.path)
+ 'CreateElementReplica', repservice,
+ ElementName=volumename,
+ SyncType=self._getnum(8, '16'),
+ SourceElement=snapshot_instance.path)
if rc != 0L:
rc, errordesc = self._wait_for_job_complete(job)
'sync_name': str(sync_name)})
rc, job = self.conn.InvokeMethod(
- 'ModifyReplicaSynchronization',
- repservice,
- Operation=self._getnum(8, '16'),
- Synchronization=sync_name)
+ 'ModifyReplicaSynchronization',
+ repservice,
+ Operation=self._getnum(8, '16'),
+ Synchronization=sync_name)
LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s '
- 'Snapshot: %(snapshotname)s Return code: %(rc)lu')
+ 'Snapshot: %(snapshotname)s Return code: %(rc)lu')
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc})
# Create a Clone from source volume
rc, job = self.conn.InvokeMethod(
- 'CreateElementReplica', repservice,
- ElementName=volumename,
- SyncType=self._getnum(8, '16'),
- SourceElement=src_instance.path)
+ 'CreateElementReplica', repservice,
+ ElementName=volumename,
+ SyncType=self._getnum(8, '16'),
+ SourceElement=src_instance.path)
if rc != 0L:
rc, errordesc = self._wait_for_job_complete(job)
'sync_name': str(sync_name)})
rc, job = self.conn.InvokeMethod(
- 'ModifyReplicaSynchronization',
- repservice,
- Operation=self._getnum(8, '16'),
- Synchronization=sync_name)
+ 'ModifyReplicaSynchronization',
+ repservice,
+ Operation=self._getnum(8, '16'),
+ Synchronization=sync_name)
LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s '
'Source Volume: %(srcname)s Return code: %(rc)lu')
storage_system = vol_instance['SystemName']
- configservice = self._find_storage_configuration_service(
- storage_system)
+ configservice =\
+ self._find_storage_configuration_service(storage_system)
if configservice is None:
exception_message = (_("Error Delete Volume: %(volumename)s. "
"Storage Configuration Service not found.")
'name': volumename,
'vol_instance': str(vol_instance.path)})
- rc, job = self.conn.InvokeMethod(
- 'EMCReturnToStoragePool',
- configservice, TheElements=[vol_instance.path])
+ rc, job =\
+ self.conn.InvokeMethod('EMCReturnToStoragePool',
+ configservice,
+ TheElements=[vol_instance.path])
if rc != 0L:
rc, errordesc = self._wait_for_job_complete(job)
'elementname': snapshotname,
'sourceelement': str(vol_instance.path)})
- rc, job = self.conn.InvokeMethod(
- 'CreateElementReplica', repservice,
- ElementName=snapshotname,
- SyncType=self._getnum(7, '16'),
- SourceElement=vol_instance.path)
+ rc, job =\
+ self.conn.InvokeMethod('CreateElementReplica', repservice,
+ ElementName=snapshotname,
+ SyncType=self._getnum(7, '16'),
+ SourceElement=vol_instance.path)
LOG.debug(_('Create Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Return code: %(rc)lu')
% {'snapshot': snapshotname,
'volume': volumename})
- sync_name, storage_system = self._find_storage_sync_sv_sv(
- snapshotname, volumename, False)
+ sync_name, storage_system =\
+ self._find_storage_sync_sv_sv(snapshotname, volumename, False)
if sync_name is None:
LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s '
'not found on the array. No snapshot to delete.')
'service': str(repservice),
'sync_name': str(sync_name)})
- rc, job = self.conn.InvokeMethod(
- 'ModifyReplicaSynchronization',
- repservice,
- Operation=self._getnum(19, '16'),
- Synchronization=sync_name)
+ rc, job =\
+ self.conn.InvokeMethod('ModifyReplicaSynchronization',
+ repservice,
+ Operation=self._getnum(19, '16'),
+ Synchronization=sync_name)
LOG.debug(_('Delete Snapshot: Volume: %(volumename)s Snapshot: '
'%(snapshotname)s Return code: %(rc)lu')
'initiator': initiators})
if lunmask_ctrl is None:
- rc, controller = self.conn.InvokeMethod(
- 'ExposePaths',
- configservice, LUNames=[lun_name],
- InitiatorPortIDs=initiators,
- DeviceAccesses=[self._getnum(2, '16')])
+ rc, controller =\
+ self.conn.InvokeMethod('ExposePaths',
+ configservice, LUNames=[lun_name],
+ InitiatorPortIDs=initiators,
+ DeviceAccesses=[self._getnum(2, '16')])
else:
LOG.debug(_('ExposePaths parameter '
'LunMaskingSCSIProtocolController: '
'%(lunmasking)s')
% {'lunmasking': str(lunmask_ctrl)})
- rc, controller = self.conn.InvokeMethod(
- 'ExposePaths',
- configservice, LUNames=[lun_name],
- DeviceAccesses=[self._getnum(2, '16')],
- ProtocolControllers=[lunmask_ctrl])
+ rc, controller =\
+ self.conn.InvokeMethod('ExposePaths',
+ configservice, LUNames=[lun_name],
+ DeviceAccesses=[self._getnum(2, '16')],
+ ProtocolControllers=[lunmask_ctrl])
if rc != 0L:
msg = (_('Error mapping volume %s.') % volumename)
'masking_group': str(masking_group),
'vol': str(vol_instance.path)})
- rc, job = self.conn.InvokeMethod(
- 'AddMembers', configservice,
- MaskingGroup=masking_group, Members=[vol_instance.path])
+ rc, job =\
+ self.conn.InvokeMethod('AddMembers',
+ configservice,
+ MaskingGroup=masking_group,
+ Members=[vol_instance.path])
if rc != 0L:
rc, errordesc = self._wait_for_job_complete(job)
def _get_storage_type(self, filename=None):
"""Get the storage type from the config file."""
- if filename == None:
+ if filename is None:
filename = self.configuration.cinder_emc_config_file
file = open(filename, 'r')
raise exception.VolumeBackendAPIException(data=exception_message)
def _get_masking_view(self, filename=None):
- if filename == None:
+ if filename is None:
filename = self.configuration.cinder_emc_config_file
file = open(filename, 'r')
return None
def _get_ecom_cred(self, filename=None):
- if filename == None:
+ if filename is None:
filename = self.configuration.cinder_emc_config_file
file = open(filename, 'r')
return None
def _get_ecom_server(self, filename=None):
- if filename == None:
+ if filename is None:
filename = self.configuration.cinder_emc_config_file
file = open(filename, 'r')
for ctrl in controllers:
if storage_system != ctrl['SystemName']:
continue
- associators = self.conn.Associators(
- ctrl,
- resultClass='EMC_StorageHardwareID')
+ associators =\
+ self.conn.Associators(ctrl,
+ resultClass='EMC_StorageHardwareID')
for assoc in associators:
# if EMC_StorageHardwareID matches the initiator,
# we found the existing EMC_LunMaskingSCSIProtocolController
connector):
foundCtrl = None
initiators = self._find_initiator_names(connector)
- controllers = self.conn.AssociatorNames(
- vol_instance.path,
- resultClass='EMC_LunMaskingSCSIProtocolController')
+ controllers =\
+ self.conn.AssociatorNames(
+ vol_instance.path,
+ resultClass='EMC_LunMaskingSCSIProtocolController')
for ctrl in controllers:
- associators = self.conn.Associators(
- ctrl,
- resultClass='EMC_StorageHardwareID')
+ associators =\
+ self.conn.Associators(
+ ctrl,
+ resultClass='EMC_StorageHardwareID')
for assoc in associators:
# if EMC_StorageHardwareID matches the initiator,
# we found the existing EMC_LunMaskingSCSIProtocolController
pass
unitnames = self.conn.ReferenceNames(
- vol_instance.path,
- ResultClass='CIM_ProtocolControllerForUnit')
+ vol_instance.path,
+ ResultClass='CIM_ProtocolControllerForUnit')
for unitname in unitnames:
controller = unitname['Antecedent']
sp = idarray[2]
if (storage_system == storsystemname and
- owningsp == sp):
+ owningsp == sp):
foundSystem = system
LOG.debug(_("Found Storage Processor System: %s")
% (str(system)))
def __init__(self, *args, **kwargs):
super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
- self.common = emc_smis_common.EMCSMISCommon(
- 'iSCSI',
- configuration=self.configuration)
+ self.common =\
+ emc_smis_common.EMCSMISCommon('iSCSI',
+ configuration=self.configuration)
def check_for_setup_error(self):
pass
raise exception.NfsException(msg)
if ((not self.configuration.nfs_used_ratio > 0) and
- (self.configuration.nfs_used_ratio <= 1)):
+ (self.configuration.nfs_used_ratio <= 1)):
msg = _("NFS config 'nfs_used_ratio' invalid. Must be > 0 "
"and <= 1.0: %s") % self.configuration.nfs_used_ratio
LOG.error(msg)
raise exception.InvalidInput(reason=err)
if ('domain' not in cpg
- or cpg['domain'] != self.config.hp3par_domain):
+ or cpg['domain'] != self.config.hp3par_domain):
err = ("CPG's domain '%s' and config option hp3par_domain '%s'"
" must be the same" %
(cpg['domain'], self.config.hp3par_domain))
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise exception.ProcessExecutionError(
- exit_code=last_exception.exit_code,
- stdout=last_exception.stdout,
- stderr=last_exception.stderr,
- cmd=last_exception.cmd)
+ exit_code=last_exception.exit_code,
+ stdout=last_exception.stdout,
+ stderr=last_exception.stderr,
+ cmd=last_exception.cmd)
except AttributeError:
raise exception.ProcessExecutionError(
- exit_code=-1,
- stdout="",
- stderr="Error running SSH command",
- cmd=command)
+ exit_code=-1,
+ stdout="",
+ stderr="Error running SSH command",
+ cmd=command)
except Exception as e:
LOG.error(_("Error running SSH command: %s") % command)
for num in range(0, 128):
ch = str(chr(num))
if (not ch.isalnum() and ch != ' ' and ch != '.'
- and ch != '-' and ch != '_'):
+ and ch != '-' and ch != '_'):
invalid_ch_in_host = invalid_ch_in_host + ch
self._string_host_name_filter = string.maketrans(
invalid_ch_in_host, '-' * len(invalid_ch_in_host))
# If '!' not found, return the string and two empty strings
attr_name, foo, attr_val = attr_line.partition('!')
if (attr_name == 'iscsi_name' and
- 'initiator' in connector and
- attr_val == connector['initiator']):
- return host
+ 'initiator' in connector and
+ attr_val == connector['initiator']):
+ return host
elif (attr_name == 'WWPN' and
'wwpns' in connector and
attr_val.lower() in
if opts['protocol'] == 'iSCSI':
# Implemented in base iSCSI class
return super(StorwizeSVCDriver, self).copy_image_to_volume(
- context, volume, image_service, image_id)
+ context, volume, image_service, image_id)
else:
raise NotImplementedError()
if opts['protocol'] == 'iSCSI':
# Implemented in base iSCSI class
return super(StorwizeSVCDriver, self).copy_volume_to_image(
- context, volume, image_service, image_meta)
+ context, volume, image_service, image_meta)
else:
raise NotImplementedError()
def _check_extra_specs_match(vol_type, searchdict):
for k, v in searchdict.iteritems():
if (k not in vol_type['extra_specs'].keys()
- or vol_type['extra_specs'][k] != v):
+ or vol_type['extra_specs'][k] != v):
return False
return True
volume_type = get_volume_type(context.get_admin_context(),
volume_type_id)
if (volume_type.get('extra_specs') is None or
- volume_type['extra_specs'].get(key) != value):
+ volume_type['extra_specs'].get(key) != value):
return False
else:
return True
commands = bash tools/lintstack.sh
[flake8]
-ignore = E12,E711,E712,H302,H303,H304,H401,H402,H403,H404,F
+ignore = E711,E712,H302,H303,H304,H401,H402,H403,H404,F
builtins = _
exclude = .venv,.tox,dist,doc,openstack,*egg