snap_count_total = 0
snap_sum_total = 0
resources = [{'resource': {'host': host, 'project': '(total)',
- 'volume_count': str(count),
- 'total_volume_gb': str(sum),
- 'snapshot_count': str(snap_count_total),
- 'total_snapshot_gb': str(snap_sum_total)}}]
+ 'volume_count': str(count),
+ 'total_volume_gb': str(sum),
+ 'snapshot_count': str(snap_count_total),
+ 'total_snapshot_gb': str(snap_sum_total)}}]
project_ids = [v['project_id'] for v in volume_refs]
project_ids = list(set(project_ids))
msg = _("Invalid request to attach volume to an "
"instance %(instance_uuid)s and a "
"host %(host_name)s simultaneously") % {
- 'instance_uuid': instance_uuid,
- 'host_name': host_name,
- }
+ 'instance_uuid': instance_uuid,
+ 'host_name': host_name,
+ }
raise webob.exc.HTTPBadRequest(explanation=msg)
elif instance_uuid is None and host_name is None:
msg = _("Invalid request to attach volume to an invalid target")
restore_cmd.append('-replace=yes') # suppress prompt
restore_cmd.extend(['-quiet',
- '-password=%s' % self.tsm_password,
- backup_path])
+ '-password=%s' % self.tsm_password,
+ backup_path])
if restore_path != backup_path:
restore_cmd.append(restore_path)
if actual_status != expected_status:
err = _('Create backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') % {
- 'expected_status': expected_status,
- 'actual_status': actual_status,
- }
+ 'expected_status': expected_status,
+ 'actual_status': actual_status,
+ }
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
raise exception.InvalidVolume(reason=err)
if actual_status != expected_status:
err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
- 'expected_status': expected_status,
- 'actual_status': actual_status,
- }
+ 'expected_status': expected_status,
+ 'actual_status': actual_status,
+ }
self.db.volume_update(context, volume_id, {'status': 'available'})
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
- 'configured_service': configured_service,
- 'backup_service': backup_service,
- }
+ 'configured_service': configured_service,
+ 'backup_service': backup_service,
+ }
self.db.backup_update(context, backup_id, {'status': 'available'})
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
- 'expected_status': expected_status,
- 'actual_status': actual_status,
- }
+ 'expected_status': expected_status,
+ 'actual_status': actual_status,
+ }
self.db.backup_update(context, backup_id, {'status': 'error',
'fail_reason': err})
raise exception.InvalidBackup(reason=err)
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
- 'configured_service': configured_service,
- 'backup_service': backup_service,
- }
+ 'configured_service': configured_service,
+ 'backup_service': backup_service,
+ }
self.db.backup_update(context, backup_id,
{'status': 'error'})
raise exception.InvalidBackup(reason=err)
self.db.backup_update(context,
backup_id,
{'status': 'error',
- 'fail_reason': msg})
+ 'fail_reason': msg})
raise exception.InvalidBackup(reason=msg)
required_import_options = ['display_name',
#multipath installed, discovering other targets if available
target_portal = connection_properties['target_portal']
out = self._run_iscsiadm_bare(['-m',
- 'discovery',
- '-t',
- 'sendtargets',
- '-p',
- target_portal],
+ 'discovery',
+ '-t',
+ 'sendtargets',
+ '-p',
+ target_portal],
check_exit_code=[0, 255])[0] \
or ""
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_bare(['-m',
- 'discovery',
- '-t',
- 'sendtargets',
- '-p',
+ 'discovery',
+ '-t',
+ 'sendtargets',
+ '-p',
connection_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
- (pci_num,
- target_wwn,
- connection_properties.get('target_lun', 0)))
+ (pci_num,
+ target_wwn,
+ connection_properties.get(
+ 'target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
LOG = logging.getLogger(__name__)
image_helper_opt = [cfg.StrOpt('image_conversion_dir',
- default='$state_path/conversion',
- help='Directory used for temporary storage '
- 'during image conversion'), ]
+ default='$state_path/conversion',
+ help='Directory used for temporary storage '
+ 'during image conversion'), ]
CONF = cfg.CONF
CONF.register_opts(image_helper_opt)
'volume_id': volume_id,
'last_host': last_host,
'exc': exc,
- }
+ }
LOG.error(msg)
def _populate_retry(self, filter_properties, properties):
"volume %(volume_id)s") % {
'max_attempts': max_attempts,
'volume_id': volume_id,
- }
+ }
raise exception.NoValidHost(reason=msg)
def _get_weighted_candidates(self, context, request_spec,
free = math.floor(free_space * (1 - reserved))
if free < volume_size:
LOG.warning(_("Insufficient free space for volume creation "
- "(requested / avail): "
- "%(requested)s/%(available)s")
+ "(requested / avail): "
+ "%(requested)s/%(available)s")
% {'requested': volume_size,
'available': free})
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
- 'host': 'host1', 'zone': 'cinder',
- 'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
- {'binary': 'cinder-volume',
- 'host': 'host1', 'zone': 'cinder',
- 'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
- {'binary': 'cinder-scheduler', 'host': 'host2',
- 'zone': 'cinder',
- 'status': 'enabled', 'state': 'down',
- 'updated_at': datetime(2012, 9, 19, 6, 55, 34)},
- {'binary': 'cinder-volume', 'host': 'host2',
- 'zone': 'cinder',
- 'status': 'disabled', 'state': 'down',
- 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]}
+ 'host': 'host1', 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(
+ 2012, 10, 29, 13, 42, 2)},
+ {'binary': 'cinder-volume',
+ 'host': 'host1', 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(
+ 2012, 10, 29, 13, 42, 5)},
+ {'binary': 'cinder-scheduler',
+ 'host': 'host2',
+ 'zone': 'cinder',
+ 'status': 'enabled', 'state': 'down',
+ 'updated_at': datetime(
+ 2012, 9, 19, 6, 55, 34)},
+ {'binary': 'cinder-volume',
+ 'host': 'host2',
+ 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'down',
+ 'updated_at': datetime(
+ 2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_detail(self):
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
- 'host': 'host1', 'zone': 'cinder',
- 'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 2),
- 'disabled_reason': 'test1'},
- {'binary': 'cinder-volume',
- 'host': 'host1', 'zone': 'cinder',
- 'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'},
- {'binary': 'cinder-scheduler', 'host': 'host2',
- 'zone': 'cinder',
- 'status': 'enabled', 'state': 'down',
- 'updated_at': datetime(2012, 9, 19, 6, 55, 34),
- 'disabled_reason': ''},
- {'binary': 'cinder-volume', 'host': 'host2',
- 'zone': 'cinder',
- 'status': 'disabled', 'state': 'down',
- 'updated_at': datetime(2012, 9, 18, 8, 3, 38),
- 'disabled_reason': 'test4'}]}
+ 'host': 'host1', 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(
+ 2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'cinder-volume',
+ 'host': 'host1', 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(
+ 2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'cinder-scheduler',
+ 'host': 'host2',
+ 'zone': 'cinder',
+ 'status': 'enabled', 'state': 'down',
+ 'updated_at': datetime(
+ 2012, 9, 19, 6, 55, 34),
+ 'disabled_reason': ''},
+ {'binary': 'cinder-volume',
+ 'host': 'host2',
+ 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'down',
+ 'updated_at': datetime(
+ 2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10,
29, 13, 42, 2)},
- {'binary': 'cinder-volume', 'host': 'host1',
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29,
'updated_at': datetime(2012, 10,
29, 13, 42, 2),
'disabled_reason': 'test1'},
- {'binary': 'cinder-volume', 'host': 'host1',
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29,
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id)
res_dict = self.controller._volume_upload_image(req, id, body)
- expected = {'os-volume_upload_image': {'id': id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'status': 'uploading',
- 'display_description': 'displaydesc',
- 'size': 1,
- 'volume_type': {'name': 'vol_type_name'},
- 'image_id': 1,
- 'container_format': 'bare',
- 'disk_format': 'raw',
- 'image_name': 'image_name'}}
+ expected = {'os-volume_upload_image':
+ {'id': id,
+ 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
+ 'status': 'uploading',
+ 'display_description': 'displaydesc',
+ 'size': 1,
+ 'volume_type': {'name': 'vol_type_name'},
+ 'image_id': 1,
+ 'container_format': 'bare',
+ 'disk_format': 'raw',
+ 'image_name': 'image_name'}}
self.assertDictMatch(res_dict, expected)
def test_copy_volume_to_image_volumenotfound(self):
"availability_zone": "nova",
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
ex = {'volume': {'attachments': [{'device': '/',
- 'host_name': None,
- 'id': '1',
- 'server_id': 'fakeuuid',
- 'volume_id': '1'}],
+ 'host_name': None,
+ 'id': '1',
+ 'server_id': 'fakeuuid',
+ 'volume_id': '1'}],
'availability_zone': 'nova',
'bootable': 'false',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
def create_lu(self, cmd, ip0, user, pw, hdp, size, name):
vol_id = name
_out = ("LUN: %d HDP: fs2 size: %s MB, is successfully created" %
- (self.start_lun, size))
+ (self.start_lun, size))
self.createVolume(name, vol_id, size, "create-lu")
self.start_lun += 1
return _out
def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
_out = ("LUN: %s HDP: 9 size: %s MB, is successfully created" %
- (self.start_lun, size))
+ (self.start_lun, size))
id = name
LOG.info("HNAS Create_Dup: %d" % self.start_lun)
if LUN_INFO['ID'] is None:
out = 'command operates successfully, but no information.'
elif CLONED_LUN_INFO['ID'] is None:
- out = """/>showlun
+ msg = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
---------------------------------------------------------------------------
%s %s -- Normal %s %s %s 64 THICK
===========================================================================
-""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
- str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'])
+"""
+ out = msg % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'],
+ LUN_INFO['Owner Controller'],
+ str(int(LUN_INFO['Size']) * 1024),
+ LUN_INFO['Name'])
else:
- out = """/>showlun
+ msg = """/>showlun
============================================================================
LUN Information
----------------------------------------------------------------------------
%s %s -- Normal %s %s %s 64 THICK
%s %s -- Normal %s %s %s 64 THICK
============================================================================
-""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
- str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'],
- CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'],
- CLONED_LUN_INFO['Owner Controller'],
- str(int(CLONED_LUN_INFO['Size']) * 1024),
- CLONED_LUN_INFO['Name'])
+"""
+ out = msg % (
+ LUN_INFO['ID'], LUN_INFO['RAID Group ID'],
+ LUN_INFO['Owner Controller'],
+ str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'],
+ CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'],
+ CLONED_LUN_INFO['Owner Controller'],
+ str(int(CLONED_LUN_INFO['Size']) * 1024),
+ CLONED_LUN_INFO['Name'])
elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values():
- out = """/>showlun
+ msg = """/>showlun
================================================
LUN Information
------------------------------------------------
SnapShot ID | %s
LunCopy ID | %s
================================================
-""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'],
- LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
- LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
- LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
- if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol'] else
- (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
- CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'],
- CLONED_LUN_INFO['Owner Controller'],
- CLONED_LUN_INFO['Worker Controller'],
- CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'],
- CLONED_LUN_INFO['LunCopy ID']))
+"""
+ out = msg % (
+ (LUN_INFO['ID'], LUN_INFO['Name'],
+ LUN_INFO['Visible Capacity'],
+ LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
+ LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
+ LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
+ if (params[params.index('-lun') + 1] ==
+ VOLUME_SNAP_ID['vol']) else
+ (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
+ CLONED_LUN_INFO['Visible Capacity'],
+ CLONED_LUN_INFO['RAID Group ID'],
+ CLONED_LUN_INFO['Owner Controller'],
+ CLONED_LUN_INFO['Worker Controller'],
+ CLONED_LUN_INFO['Lun Type'],
+ CLONED_LUN_INFO['SnapShot ID'],
+ CLONED_LUN_INFO['LunCopy ID']))
else:
out = 'ERROR: The object does not exist.'
return out
return out
def cli_showrg(self, params):
- out = """/>showrg
+ msg = """/>showrg
=====================================================================
RAID Group Information
---------------------------------------------------------------------
0 RAID6 Normal 1024 0,0;0,2; RAID003
%s %s %s %s %s %s
=====================================================================
--""" % (POOL_SETTING['ID'], POOL_SETTING['Level'],
- POOL_SETTING['Status'], POOL_SETTING['Free Capacity'],
- POOL_SETTING['Disk List'], POOL_SETTING['Name'])
+-"""
+ out = msg % (POOL_SETTING['ID'], POOL_SETTING['Level'],
+ POOL_SETTING['Status'], POOL_SETTING['Free Capacity'],
+ POOL_SETTING['Disk List'], POOL_SETTING['Name'])
return out
def cli_showpool(self, params):
LUNCOPY_INFO['State'] = 'Copying'
elif LUNCOPY_INFO['State'] == 'Copying':
LUNCOPY_INFO['State'] = 'Complete'
- out = """/>showluncopy
+ msg = """/>showluncopy
============================================================================
LUN Copy Information
----------------------------------------------------------------------------
----------------------------------------------------------------------------
%s %s %s %s %s
============================================================================
-""" % (LUNCOPY_INFO['Name'], LUNCOPY_INFO['ID'], LUNCOPY_INFO['Type'],
- LUNCOPY_INFO['State'], LUNCOPY_INFO['Status'])
+"""
+ out = msg % (LUNCOPY_INFO['Name'], LUNCOPY_INFO['ID'],
+ LUNCOPY_INFO['Type'], LUNCOPY_INFO['State'],
+ LUNCOPY_INFO['Status'])
return out
def cli_delluncopy(self, params):
return out
def cli_showrespool(self, params):
- out = """/>showrespool
+ msg = """/>showrespool
===========================================================================
Resource Pool Information
---------------------------------------------------------------------------
A %s 0.0 %s 80
B %s 0.0 %s 80
===========================================================================
--""" % (RESPOOL_A_SIM['Size'], RESPOOL_A_SIM['Valid Size'],
- RESPOOL_B_SIM['Size'], RESPOOL_B_SIM['Valid Size'])
+-"""
+ out = msg % (RESPOOL_A_SIM['Size'], RESPOOL_A_SIM['Valid Size'],
+ RESPOOL_B_SIM['Size'], RESPOOL_B_SIM['Valid Size'])
return out
def cli_showiscsitgtname(self, params):
if MAP_INFO['INI Port ID'] is None:
out = 'command operates successfully, but no information.'
else:
- out = """/>showhostport
+ msg = """/>showhostport
============================================================================
Host Port Information
----------------------------------------------------------------------------
----------------------------------------------------------------------------
%s %s %s %s %s Unconnected Default
============================================================================
-""" % (MAP_INFO['INI Port ID'], MAP_INFO['INI Port Name'],
- MAP_INFO['INI Port Info'], MAP_INFO['INI Port Type'],
- MAP_INFO['Host ID'])
+"""
+ out = msg % (MAP_INFO['INI Port ID'], MAP_INFO['INI Port Name'],
+ MAP_INFO['INI Port Info'], MAP_INFO['INI Port Type'],
+ MAP_INFO['Host ID'])
return out
def cli_addhostport(self, params):
if MAP_INFO['DEV LUN ID'] is None:
out = 'command operates successfully, but no information.'
else:
- out = """/>showhostmap
+ msg = """/>showhostmap
===========================================================================
Map Information
---------------------------------------------------------------------------
----------------------------------------------------------------------------
2147483649 %s %s %s %s Host: %s %s %s HOST No --
============================================================================
-""" % (LUN_INFO['Worker Controller'], LUN_INFO['ID'], LUN_INFO['LUN WWN'],
- MAP_INFO['Host LUN ID'], MAP_INFO['Host ID'], LUN_INFO['RAID Group ID'],
- str(int(LUN_INFO['Size']) * 1024))
+"""
+ out = msg % (LUN_INFO['Worker Controller'], LUN_INFO['ID'],
+ LUN_INFO['LUN WWN'], MAP_INFO['Host LUN ID'],
+ MAP_INFO['Host ID'], LUN_INFO['RAID Group ID'],
+ str(int(LUN_INFO['Size']) * 1024))
return out
def cli_addhostmap(self, params):
if LUN_INFO['ID'] is None:
out = 'command operates successfully, but no information.'
elif CLONED_LUN_INFO['ID'] is None:
- out = """/>showlun
+ msg = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
---------------------------------------------------------------------------
%s %s Normal %s %s %s 64 THICK
===========================================================================
-""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'],
- LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024),
- LUN_INFO['Name'])
+"""
+ out = msg % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'],
+ LUN_INFO['Owner Controller'],
+ str(int(LUN_INFO['Size']) * 1024),
+ LUN_INFO['Name'])
else:
- out = """/>showlun
+ msg = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
%s %s Normal %s %s %s 64 THICK
%s %s Norma %s %s %s 64 THICK
===========================================================================
-""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
- str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'],
- CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'],
- CLONED_LUN_INFO['Owner Controller'],
- str(int(CLONED_LUN_INFO['Size']) * 1024),
- CLONED_LUN_INFO['Name'])
+"""
+ out = msg % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'],
+ LUN_INFO['Owner Controller'],
+ str(int(LUN_INFO['Size']) * 1024),
+ LUN_INFO['Name'], CLONED_LUN_INFO['ID'],
+ CLONED_LUN_INFO['RAID Group ID'],
+ CLONED_LUN_INFO['Owner Controller'],
+ str(int(CLONED_LUN_INFO['Size']) * 1024),
+ CLONED_LUN_INFO['Name'])
elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values():
- out = """/>showlun
+ msg = """/>showlun
================================================
LUN Information
------------------------------------------------
SnapShot ID | %s
LunCopy ID | %s
================================================
-""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'],
- LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
- LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
- LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
- if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol'] else
- (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
- CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'],
- CLONED_LUN_INFO['Owner Controller'],
- CLONED_LUN_INFO['Worker Controller'],
- CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'],
- CLONED_LUN_INFO['LunCopy ID']))
+"""
+ out = msg % (
+ (LUN_INFO['ID'], LUN_INFO['Name'],
+ LUN_INFO['Visible Capacity'],
+ LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
+ LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
+ LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
+ if (params[params.index('-lun') + 1] ==
+ VOLUME_SNAP_ID['vol']) else
+ (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
+ CLONED_LUN_INFO['Visible Capacity'],
+ CLONED_LUN_INFO['RAID Group ID'],
+ CLONED_LUN_INFO['Owner Controller'],
+ CLONED_LUN_INFO['Worker Controller'],
+ CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'],
+ CLONED_LUN_INFO['LunCopy ID']))
else:
out = 'ERROR: The object does not exist.'
return out
if LUN_INFO['ID'] is None:
out = 'command operates successfully, but no information.'
elif CLONED_LUN_INFO['ID'] is None:
- out = """/>showlun
+ msg = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
---------------------------------------------------------------------------
%s Normal %s %s %s THICK
===========================================================================
-""" % (LUN_INFO['ID'], LUN_INFO['Owner Controller'],
- str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'])
+"""
+ out = msg % (LUN_INFO['ID'], LUN_INFO['Owner Controller'],
+ str(int(LUN_INFO['Size']) * 1024),
+ LUN_INFO['Name'])
else:
- out = """/>showlun
+ msg = """/>showlun
===========================================================================
LUN Information
---------------------------------------------------------------------------
%s Normal %s %s %s THICK
%s Normal %s %s %s THICK
===========================================================================
-""" % (LUN_INFO['ID'], LUN_INFO['Owner Controller'],
- str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'],
- CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Owner Controller'],
- str(int(CLONED_LUN_INFO['Size']) * 1024), CLONED_LUN_INFO['Name'])
+"""
+ out = msg % (LUN_INFO['ID'], LUN_INFO['Owner Controller'],
+ str(int(LUN_INFO['Size']) * 1024),
+ LUN_INFO['Name'],
+ CLONED_LUN_INFO['ID'],
+ CLONED_LUN_INFO['Owner Controller'],
+ str(int(CLONED_LUN_INFO['Size']) * 1024),
+ CLONED_LUN_INFO['Name'])
elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values():
- out = """/>showlun
+ msg = """/>showlun
================================================
LUN Information
------------------------------------------------
SnapShot ID | %s
LunCopy ID | %s
================================================
-""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'],
- LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
- LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
- LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
- if params[params.index('-lun')] == VOLUME_SNAP_ID['vol'] else
- (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
- CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'],
- CLONED_LUN_INFO['Owner Controller'],
- CLONED_LUN_INFO['Worker Controller'],
- CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'],
- CLONED_LUN_INFO['LunCopy ID']))
+"""
+ out = msg % (
+ (LUN_INFO['ID'], LUN_INFO['Name'],
+ LUN_INFO['Visible Capacity'],
+ LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'],
+ LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'],
+ LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID'])
+ if params[params.index('-lun')] == VOLUME_SNAP_ID['vol'] else
+ (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'],
+ CLONED_LUN_INFO['Visible Capacity'],
+ CLONED_LUN_INFO['RAID Group ID'],
+ CLONED_LUN_INFO['Owner Controller'],
+ CLONED_LUN_INFO['Worker Controller'],
+ CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'],
+ CLONED_LUN_INFO['LunCopy ID']))
else:
out = 'ERROR: The object does not exist.'
utils.execute(
'env', 'LC_ALL=C', 'qemu-img', 'info',
TEST_PATH, run_as_root=True).AndReturn(
- (TEST_RETURN, 'ignored')
- )
+ (TEST_RETURN, 'ignored'))
mox.ReplayAll()
utils.execute(
'env', 'LC_ALL=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
- (src_inf, 'ignored')
- )
+ (src_inf, 'ignored'))
if has_qemu and dest_inf:
if bps_limit:
utils.execute(
'env', 'LC_ALL=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
- (dest_inf, 'ignored')
- )
+ (dest_inf, 'ignored'))
self._mox.ReplayAll()
utils.execute(
'env', 'LC_ALL=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
- (qemu_info, 'ignored')
- )
+ (qemu_info, 'ignored'))
self._mox.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
utils.execute(
'env', 'LC_ALL=C', 'qemu-img', 'info',
mox.IgnoreArg(), run_as_root=True).AndReturn(
- (TEST_RET, 'ignored')
- )
+ (TEST_RET, 'ignored'))
m.ReplayAll()
utils.execute(
'env', 'LC_ALL=C', 'qemu-img', 'info',
mox.IgnoreArg(), run_as_root=True).AndReturn(
- (TEST_RET, 'ignored')
- )
+ (TEST_RET, 'ignored'))
m.ReplayAll()
utils.execute(
'tar', '-xzf', 'archive.tgz', '-C', 'targetpath').AndReturn(
- ('ignored', 'ignored')
- )
+ ('ignored', 'ignored'))
mox.ReplayAll()
utils.execute(
'vhd-util', 'modify', '-n', 'child', '-p', 'parent').AndReturn(
- ('ignored', 'ignored')
- )
+ ('ignored', 'ignored'))
mox.ReplayAll()
utils.execute(
'vhd-util', 'query', '-n', 'vhdfile', '-v').AndReturn(
- ('1024', 'ignored')
- )
+ ('1024', 'ignored'))
mox.ReplayAll()
LOG.info('Called Fake GetClusterCapacity...')
data = {'result':
{'clusterCapacity': {'maxProvisionedSpace': 107374182400,
- 'usedSpace': 1073741824,
- 'compressionPercent': 100,
- 'deDuplicationPercent': 100,
- 'thinProvisioningPercent': 100}}}
+ 'usedSpace': 1073741824,
+ 'compressionPercent': 100,
+ 'deDuplicationPercent': 100,
+ 'thinProvisioningPercent': 100}}}
return data
elif method is 'GetClusterInfo' and version == '1.0':
for host_info in host_infos:
for wwpn in host_info['wwpns']:
rows.append([wwpn, '123456', host_info['id'], 'nodeN',
- 'AABBCCDDEEFF0011', '1', '0123ABC', 'active',
- host_info['host_name'], '', 'host'])
+ 'AABBCCDDEEFF0011', '1', '0123ABC', 'active',
+ host_info['host_name'], '', 'host'])
if self._next_cmd_error['lsfabric'] == 'header_mismatch':
rows[0].pop(0)
else:
cap = vol['capacity']
rows.append([str(vol['id']), vol['name'], vol['IO_group_id'],
- vol['IO_group_name'], 'online', '0',
- self._flags['storwize_svc_volpool_name'],
- cap, 'striped',
- fcmap_info['fc_id'], fcmap_info['fc_name'],
- '', '', vol['uid'],
- fcmap_info['fc_map_count'], '1', 'empty',
- '1', 'no'])
+ vol['IO_group_name'], 'online', '0',
+ self._flags['storwize_svc_volpool_name'],
+ cap, 'striped',
+ fcmap_info['fc_id'], fcmap_info['fc_name'],
+ '', '', vol['uid'],
+ fcmap_info['fc_map_count'], '1', 'empty',
+ '1', 'no'])
if 'obj' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
to_delete.append(k)
else:
rows.append([v['id'], v['name'], source['id'],
- source['name'], target['id'], target['name'],
- '', '', v['status'], v['progress'],
- v['copyrate'], '100', 'off', '', '', 'no', '',
- 'no'])
+ source['name'], target['id'], target['name'],
+ '', '', v['status'], v['progress'],
+ v['copyrate'], '100', 'off', '', '', 'no', '',
+ 'no'])
for d in to_delete:
del self._fcmappings_list[d]
'k2': 'v2',
'k3': 'v3'})
type_ref = volume_types.create(self.ctxt, "type1", {"key2": "val2",
- "key3": "val3"})
+ "key3": "val3"})
res = volume_types.get_volume_type_qos_specs(type_ref['id'])
self.assertIsNone(res['qos_specs'])
qos_specs.associate_qos_with_type(self.ctxt,
volume_image_metadata = self.get_volume_image_metadata(context,
volume)
custom_property_set = (set(volume_image_metadata).difference
- (set(glance_core_properties)))
+ (set(glance_core_properties)))
if custom_property_set:
metadata.update(dict(properties=dict((custom_property,
volume_image_metadata
storage_system)
if configservice is None:
exception_message = (_("Error Create Volume: %(volumename)s. "
- "Storage Configuration Service not found for "
- "pool %(storage_type)s.")
+ "Storage Configuration Service not found "
+ "for pool %(storage_type)s.")
% {'volumename': volumename,
'storage_type': storage_type})
LOG.error(exception_message)
isVMAX = storage_system.find('SYMMETRIX')
if isVMAX > -1:
exception_message = (_('Error Create Volume from Snapshot: '
- 'Volume: %(volumename)s Snapshot: '
- '%(snapshotname)s. Create Volume '
- 'from Snapshot is NOT supported on VMAX.')
+ 'Volume: %(volumename)s Snapshot: '
+ '%(snapshotname)s. Create Volume '
+ 'from Snapshot is NOT supported on VMAX.')
% {'volumename': volumename,
'snapshotname': snapshotname})
LOG.error(exception_message)
repservice = self._find_replication_service(storage_system)
if repservice is None:
exception_message = (_('Error Create Volume from Snapshot: '
- 'Volume: %(volumename)s Snapshot: '
- '%(snapshotname)s. Cannot find Replication '
- 'Service to create volume from snapshot.')
+ 'Volume: %(volumename)s Snapshot: '
+ '%(snapshotname)s. Cannot find Replication '
+ 'Service to create volume from snapshot.')
% {'volumename': volumename,
'snapshotname': snapshotname})
LOG.error(exception_message)
rc, errordesc = self._wait_for_job_complete(job)
if rc != 0L:
exception_message = (_('Error Create Volume from Snapshot: '
- 'Volume: %(volumename)s Snapshot:'
- '%(snapshotname)s. Return code: %(rc)lu.'
- 'Error: %(error)s')
+ 'Volume: %(volumename)s Snapshot:'
+ '%(snapshotname)s. Return code: '
+ '%(rc)lu. Error: %(error)s')
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc,
rc, errordesc = self._wait_for_job_complete(job)
if rc != 0L:
exception_message = (_('Error Create Volume from Snapshot: '
- 'Volume: %(volumename)s '
- 'Snapshot: %(snapshotname)s. '
- 'Return code: %(rc)lu. Error: %(error)s')
+ 'Volume: %(volumename)s '
+ 'Snapshot: %(snapshotname)s. '
+ 'Return code: %(rc)lu. Error: '
+ '%(error)s')
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc,
repservice = self._find_replication_service(storage_system)
if repservice is None:
exception_message = (_('Error Create Cloned Volume: '
- 'Volume: %(volumename)s Source Volume: '
- '%(srcname)s. Cannot find Replication '
- 'Service to create cloned volume.')
+ 'Volume: %(volumename)s Source Volume: '
+ '%(srcname)s. Cannot find Replication '
+ 'Service to create cloned volume.')
% {'volumename': volumename,
'srcname': srcname})
LOG.error(exception_message)
rc, errordesc = self._wait_for_job_complete(job)
if rc != 0L:
exception_message = (_('Error Create Cloned Volume: '
- 'Volume: %(volumename)s Source Volume:'
- '%(srcname)s. Return code: %(rc)lu.'
- 'Error: %(error)s')
+ 'Volume: %(volumename)s Source Volume:'
+ '%(srcname)s. Return code: %(rc)lu.'
+ 'Error: %(error)s')
% {'volumename': volumename,
'srcname': srcname,
'rc': rc,
rc, errordesc = self._wait_for_job_complete(job)
if rc != 0L:
exception_message = (_('Error Create Cloned Volume: '
- 'Volume: %(volumename)s '
- 'Source Volume: %(srcname)s. '
- 'Return code: %(rc)lu. Error: %(error)s')
+ 'Volume: %(volumename)s '
+ 'Source Volume: %(srcname)s. '
+ 'Return code: %(rc)lu. Error: '
+ '%(error)s')
% {'volumename': volumename,
'srcname': srcname,
'rc': rc,
self._find_storage_configuration_service(storage_system)
if configservice is None:
exception_message = (_("Error Delete Volume: %(volumename)s. "
- "Storage Configuration Service not found.")
+ "Storage Configuration Service not found.")
% {'volumename': volumename})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
rc, errordesc = self._wait_for_job_complete(job)
if rc != 0L:
exception_message = (_('Error Delete Volume: %(volumename)s. '
- 'Return code: %(rc)lu. Error: %(error)s')
+ 'Return code: %(rc)lu. Error: '
+ '%(error)s')
% {'volumename': volumename,
'rc': rc,
'error': errordesc})
rc, errordesc = self._wait_for_job_complete(job)
if rc != 0L:
exception_message = (_('Error Create Snapshot: %(snapshot)s '
- 'Volume: %(volume)s Error: %(errordesc)s')
+ 'Volume: %(volume)s Error: '
+ '%(errordesc)s')
% {'snapshot': snapshotname, 'volume':
volumename, 'errordesc': errordesc})
LOG.error(exception_message)
rc, errordesc = self._wait_for_job_complete(job)
if rc != 0L:
exception_message = (_('Error Delete Snapshot: Volume: '
- '%(volumename)s Snapshot: '
- '%(snapshotname)s. Return code: %(rc)lu.'
- ' Error: %(error)s')
+ '%(volumename)s Snapshot: '
+ '%(snapshotname)s. Return code: '
+ '%(rc)lu. Error: %(error)s')
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc,
self._find_storage_sync_sv_sv(snapshot, volume, False)
if sync_name is None:
LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
- 'Snapshot is deleted.')
+ 'Snapshot is deleted.')
% {'snapshot': snapshotname,
'volume': volumename})
break
if ex.args[0] == 6:
# 6 means object not found, so snapshot is deleted cleanly
LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
- 'Snapshot is deleted.')
+ 'Snapshot is deleted.')
% {'snapshot': snapshotname,
'volume': volumename})
else:
'out': out})
if rc == 97:
msg = (_('The LUN cannot be expanded or shrunk because '
- 'it has snapshots. Command to extend the specified '
- 'volume failed.'))
+ 'it has snapshots. Command to extend the specified '
+ 'volume failed.'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if rc != 0:
if len(host_lun_id_list) >= self.max_luns:
msg = (_('The storage group has reached the '
- 'maximum capacity of LUNs. '
- 'Command to add LUN for volume - %s '
- 'in storagegroup failed') % (volumename))
+ 'maximum capacity of LUNs. '
+ 'Command to add LUN for volume - %s '
+ 'in storagegroup failed') % (volumename))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if host_lun_id is None:
msg = (_('Unable to get new host lun id. Please '
- 'check if the storage group can accommodate '
- 'new LUN. '
- 'Command to add LUN for volume - %s '
- 'in storagegroup failed') % (volumename))
+ 'check if the storage group can accommodate '
+ 'new LUN. '
+ 'Command to add LUN for volume - %s '
+ 'in storagegroup failed') % (volumename))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
break
else:
LOG.warning(_('Unable to find a preferred node match '
- 'for node %(node)s in the list of '
- 'available WWPNs on %(host)s. '
- 'Using first available.') %
+ 'for node %(node)s in the list of '
+ 'available WWPNs on %(host)s. '
+ 'Using first available.') %
{'node': preferred_node,
'host': host_name})
properties['target_wwn'] = conn_wwpns[0]
'iscsi':
'cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver',
'nfs': 'cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver'
- }, 'ontap_7mode':
- {
- 'iscsi':
- 'cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver',
- 'nfs':
- 'cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver'
- }, 'eseries':
- {
- 'iscsi':
- 'cinder.volume.drivers.netapp.eseries.iscsi.Driver'
- },
+ },
+ 'ontap_7mode':
+ {
+ 'iscsi':
+ 'cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver',
+ 'nfs':
+ 'cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver'
+ },
+ 'eseries':
+ {
+ 'iscsi':
+ 'cinder.volume.drivers.netapp.eseries.iscsi.Driver'
+ },
}
#NOTE(singn): Holds family:protocol information.
# Due to hacking 0.9.2 following checking are ignored on purpose for now
# E111,E112,E113,E121,E122,E123,E126,E128,E251,E265
# E713,F403,F841,H302,H305,H307,H402,H405,H803,H904
-ignore = E121,E122,E123,E126,E128,E251,E265,E711,E712,E713,F402,F841,H104,H302,H305,H307,H402,H405,H803,H904
+ignore = E251,E265,E711,E712,E713,F402,F841,H104,H302,H305,H307,H402,H405,H803,H904
builtins = _
exclude = .git,.venv,.tox,dist,tools,doc,common,*egg,build