# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
import os
import re
import cinder.volume.drivers.emc.emc_vnx_cli as emc_vnx_cli
from cinder.volume.drivers.emc.emc_vnx_cli import CommandLineHelper
from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCLICmdError
-from cinder.volume import volume_types
from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
SUCCEED = ("", 0)
'id': '1',
'provider_auth': None,
'project_id': 'project',
+ 'provider_location': 'system^FNM11111|type^lun|lun_id^1',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'display_description': 'test failed volume',
'volume_type_id': None}
+
+ test_volume1_in_sg = {
+ 'name': 'vol1_in_sg',
+ 'size': 1,
+ 'volume_name': 'vol1_in_sg',
+ 'id': '4',
+ 'provider_auth': None,
+ 'project_id': 'project',
+ 'display_name': 'failed_vol',
+ 'display_description': 'Volume 1 in SG',
+ 'volume_type_id': None,
+ 'provider_location': 'system^fakesn|type^lun|id^4'}
+
+ test_volume2_in_sg = {
+ 'name': 'vol2_in_sg',
+ 'size': 1,
+ 'volume_name': 'vol2_in_sg',
+ 'id': '5',
+ 'provider_auth': None,
+ 'project_id': 'project',
+ 'display_name': 'failed_vol',
+ 'display_description': 'Volume 2 in SG',
+ 'volume_type_id': None,
+ 'provider_location': 'system^fakesn|type^lun|id^3'}
+
test_snapshot = {
'name': 'snapshot1',
'size': 1,
'id': '4444',
'volume_name': 'vol1',
+ 'volume': test_volume,
'volume_size': 1,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'size': 1,
'id': '5555',
'volume_name': 'vol-vol1',
+ 'volume': test_volume,
'volume_size': 1,
'project_id': 'project'}
test_clone = {
'attach_status': 'detached',
'volume_type': [],
'attached_host': None,
+ 'provider_location': 'system^FNM11111|type^lun|lun_id^1',
'_name_id': None, 'volume_metadata': []}
test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
'-allowReadWrite', 'yes',
'-allowAutoDelete', 'no')
+ def SNAP_LIST_CMD(self, res_id=1, poll=True):
+ cmd = ('snap', '-list', '-res', res_id)
+ if not poll:
+ cmd = ('-np',) + cmd
+ return cmd
+
def LUN_DELETE_CMD(self, name):
return ('lun', '-destroy', '-name', name, '-forceDetach', '-o')
- def LUN_CREATE_CMD(self, name, isthin=False):
- return ('lun', '-create', '-type', 'Thin' if isthin else 'NonThin',
- '-capacity', 1, '-sq', 'gb', '-poolName',
- 'unit_test_pool', '-name', name)
-
def LUN_EXTEND_CMD(self, name, newsize):
return ('lun', '-expand', '-name', name, '-capacity', newsize,
'-sq', 'gb', '-o', '-ignoreThresholds')
'-attachedSnapshot')
def MIGRATION_CMD(self, src_id=1, dest_id=1):
- return ("migrate", "-start", "-source", src_id, "-dest", dest_id,
- "-rate", "high", "-o")
+ cmd = ("migrate", "-start", "-source", src_id, "-dest", dest_id,
+ "-rate", "high", "-o")
+ return cmd
def MIGRATION_VERIFY_CMD(self, src_id):
return ("migrate", "-list", "-source", src_id)
def PINGNODE_CMD(self, sp, portid, vportid, ip):
return ("connection", "-pingnode", "-sp", sp, '-portid', portid,
- "-vportid", vportid, "-address", ip)
+ "-vportid", vportid, "-address", ip, '-count', '1')
def GETFCPORT_CMD(self):
return ('port', '-list', '-sp')
return ('compression', '-on',
'-l', lun_id, '-ignoreThresholds', '-o')
+ def STORAGEGROUP_LIST_CMD(self, gname=None):
+ if gname:
+ return ('storagegroup', '-list', '-gname', gname)
+ else:
+ return ('storagegroup', '-list')
+
+ def STORAGEGROUP_REMOVEHLU_CMD(self, gname, hlu):
+ return ('storagegroup', '-removehlu',
+ '-hlu', hlu, '-gname', gname, '-o')
+
provisioning_values = {
'thin': ['-type', 'Thin'],
'thick': ['-type', 'NonThin'],
'-initialTier', 'optimizePool',
'-tieringPolicy', 'noMovement']}
- def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering):
+ def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering,
+ poll=True):
initial = ['lun', '-create',
'-capacity', size,
'-sq', 'gb',
'-poolName', pool,
'-name', name]
+ if not poll:
+ initial = ['-np'] + initial
if provisioning:
initial.extend(self.provisioning_values[provisioning])
else:
return tuple(initial)
def CHECK_FASTCACHE_CMD(self, storage_pool):
- return ('-np', 'storagepool', '-list', '-name',
+ return ('storagepool', '-list', '-name',
storage_pool, '-fastcache')
def CREATE_CONSISTENCYGROUP_CMD(self, cg_name):
POOL_PROPERTY = ("""\
Pool Name: unit_test_pool
Pool ID: 1
-User Capacity (Blocks): 5769501696
-User Capacity (GBs): 10000.5
-Available Capacity (Blocks): 5676521472
-Available Capacity (GBs): 1000.6
- """, 0)
+User Capacity (Blocks): 6881061888
+User Capacity (GBs): 3281.146
+Available Capacity (Blocks): 6832207872
+Available Capacity (GBs): 3257.851
+
+""", 0)
ALL_PORTS = ("SP: A\n" +
"Port ID: 4\n" +
'target_discovered': True,
'target_iqn':
'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
- 'target_lun': 1,
+ 'target_lun': 2,
'target_portal': '10.244.214.118:3260'},
'driver_volume_type': 'iscsi'}
'target_discovered': True,
'target_iqn':
'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
- 'target_lun': 1,
+ 'target_lun': 2,
'target_portal': '10.244.214.118:3260'},
'driver_volume_type': 'iscsi'}
1 1
Shareable: YES""" % sgname, 0)
+ def STORAGE_GROUP_HAS_MAP_2(self, sgname):
+
+ return ("""\
+ Storage Group Name: %s
+ Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
+ HBA/SP Pairs:
+
+ HBA UID SP Name SPPort
+ ------- ------- ------
+ iqn.1993-08.org.debian:01:222 SP A 4
+
+ HLU/ALU Pairs:
+
+ HLU Number ALU Number
+ ---------- ----------
+ 1 1
+ 2 3
+ Shareable: YES""" % sgname, 0)
+
+ def POOL_FEATURE_INFO_POOL_LUNS_CMD(self):
+ cmd = ('storagepool', '-feature', '-info',
+ '-maxPoolLUNs', '-numPoolLUNs')
+ return cmd
+
+ def POOL_FEATURE_INFO_POOL_LUNS(self, max, total):
+ return (('Max. Pool LUNs: %s\n' % max) +
+ ('Total Number of Pool LUNs: %s\n' % total), 0)
+
+ def STORAGE_GROUPS_HAS_MAP(self, sgname1, sgname2):
+
+ return ("""
+
+ Storage Group Name: irrelative
+ Storage Group UID: 9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
+ HBA/SP Pairs:
+
+ HBA UID SP Name SPPort
+ ------- ------- ------
+ iqn.1993-08.org.debian:01:5741c6307e60 SP A 6
+
+ Storage Group Name: %(sgname1)s
+ Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
+ HBA/SP Pairs:
+
+ HBA UID SP Name SPPort
+ ------- ------- ------
+ iqn.1993-08.org.debian:01:222 SP A 4
+
+ HLU/ALU Pairs:
+
+ HLU Number ALU Number
+ ---------- ----------
+ 31 3
+ 41 4
+ Shareable: YES
+
+ Storage Group Name: %(sgname2)s
+ Storage Group UID: 9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
+ HBA/SP Pairs:
+
+ HBA UID SP Name SPPort
+ ------- ------- ------
+ iqn.1993-08.org.debian:01:5741c6307e60 SP A 6
+
+ HLU/ALU Pairs:
+
+ HLU Number ALU Number
+ ---------- ----------
+ 32 3
+ 42 4
+ Shareable: YES""" % {'sgname1': sgname1,
+ 'sgname2': sgname2}, 0)
+
+ def LUN_DELETE_IN_SG_ERROR(self, up_to_date=True):
+ if up_to_date:
+ return ("Cannot unbind LUN "
+ "because it's contained in a Storage Group",
+ 156)
+ else:
+ return ("SP B: Request failed. "
+ "Host LUN/LUN mapping still exists.",
+ 0)
+
class EMCVNXCLIDriverISCSITestCase(test.TestCase):
super(EMCVNXCLIDriverISCSITestCase, self).setUp()
self.stubs.Set(CommandLineHelper, 'command_execute',
- self.succeed_fake_command_execute)
+ self.fake_setup_command_execute)
self.stubs.Set(CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
'fakeSerial'}))
#set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
self.configuration.default_timeout = 0.0002
self.configuration.initiator_auto_registration = True
+ self.configuration.check_max_pool_luns_threshold = False
self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
self.testData = EMCVNXCLIDriverTestData()
self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
- def tearDown(self):
- super(EMCVNXCLIDriverISCSITestCase, self).tearDown()
-
def driverSetup(self, commands=tuple(), results=tuple()):
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
fake_command_execute = self.get_command_execute_simulator(
def get_command_execute_simulator(self, commands=tuple(),
results=tuple()):
-
assert(len(commands) == len(results))
def fake_command_execute(*args, **kwargv):
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
- 'thick', None)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+ 'thick', None, False)),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=False),
mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
def test_create_volume_compressed(self):
- extra_specs = {'storagetype:provisioning': 'compressed'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+ self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'compressed', None)),
+ 'compressed', None, False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol_with_type')),
+ 'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol_with_type')),
+ 'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'compressed',
+ 'storagetype:tiering': 'HighestAvailable'}))
def test_create_volume_compressed_tiering_highestavailable(self):
- extra_specs = {'storagetype:provisioning': 'compressed',
- 'storagetype:tiering': 'HighestAvailable'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+ self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'compressed', 'highestavailable')),
+ 'compressed', 'highestavailable', False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol_with_type')),
+ 'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol_with_type')),
+ 'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
def test_create_volume_deduplicated(self):
- extra_specs = {'storagetype:provisioning': 'deduplicated'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+ self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'deduplicated', None))]
+ 'deduplicated', None, False))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
def test_create_volume_tiering_auto(self):
- extra_specs = {'storagetype:tiering': 'Auto'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+ self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- None, 'auto'))]
+ None, 'auto', False))]
fake_cli.assert_has_calls(expect_cmd)
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:tiering': 'Auto',
+ 'storagetype:provisioning': 'Deduplicated'}))
def test_create_volume_deduplicated_tiering_auto(self):
- extra_specs = {'storagetype:tiering': 'Auto',
- 'storagetype:provisioning': 'Deduplicated'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
re.match(r".*deduplicated and auto tiering can't be both enabled",
ex.msg))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'Compressed'}))
def test_create_volume_compressed_no_enabler(self):
- extra_specs = {'storagetype:provisioning': 'Compressed'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
+ 'storagetype:pool': 'unit_test_pool'}))
def test_create_compression_volume_on_array_backend(self):
"""Unit test for create a compression volume on array
backend.
self.driver = EMCCLIISCSIDriver(configuration=config)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
- extra_specs = {'storagetype:provisioning': 'Compressed',
- 'storagetype:pool': 'unit_test_pool'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+ self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_command_execute = self.get_command_execute_simulator(
commands, results)
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
- 'compressed', None)),
+ 'compressed', None, False)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol_with_type')),
+ 'vol_with_type'), poll=False),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol_with_type')),
+ 'vol_with_type'), poll=True),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
def test_get_volume_stats(self):
- #expect_result = [POOL_PROPERTY]
self.driverSetup()
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] is not None,
- "dirver_version is not returned")
+ "driver_version is not returned")
self.assertTrue(
- stats['free_capacity_gb'] == 1000.6,
+ stats['free_capacity_gb'] == 3257.851,
"free_capacity_gb is not correct")
self.assertTrue(
- stats['reserved_percentage'] == 0,
+ stats['reserved_percentage'] == 3,
"reserved_percentage is not correct")
self.assertTrue(
stats['storage_protocol'] == 'iSCSI',
"storage_protocol is not correct")
self.assertTrue(
- stats['total_capacity_gb'] == 10000.5,
+ stats['total_capacity_gb'] == 3281.146,
"total_capacity_gb is not correct")
self.assertTrue(
stats['vendor_name'] == "EMC",
"volume backend name is not correct")
self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
self.assertTrue(
- stats['driver_version'] == "04.01.00",
+ stats['driver_version'] == "05.00.00",
"driver version is incorrect.")
+ def test_get_volume_stats_too_many_luns(self):
+ commands = [self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
+ results = [self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
+ fake_cli = self.driverSetup(commands, results)
+
+ self.driver.cli.check_max_pool_luns_threshold = True
+ stats = self.driver.get_volume_stats(True)
+ self.assertTrue(
+ stats['free_capacity_gb'] == 0,
+ "free_capacity_gb is not correct")
+ expect_cmd = [
+ mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
+ poll=False)]
+ fake_cli.assert_has_calls(expect_cmd)
+ expect_cmd = [
+ mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
+ poll=False)]
+ fake_cli.assert_has_calls(expect_cmd)
+
+ self.driver.cli.check_max_pool_luns_threshold = False
+ stats = self.driver.get_volume_stats(True)
+ self.assertTrue(stats['driver_version'] is not None,
+ "driver_version is not returned")
+ self.assertTrue(
+ stats['free_capacity_gb'] == 3257.851,
+ "free_capacity_gb is not correct")
+
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
- mock.Mock(return_value=True))
+ mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
Time Remaining: 0 second(s)
"""
results = [(FAKE_ERROR_MSG, 255),
- [SUCCEED,
- (FAKE_MIGRATE_PROPERTY, 0),
- ('The specified source LUN is not currently migrating',
- 23)]]
+ [(FAKE_MIGRATE_PROPERTY, 0),
+ (FAKE_MIGRATE_PROPERTY, 0),
+ ('The specified source LUN is not currently migrating',
+ 23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
self.assertTrue(ret)
#verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1),
- retry_disable=True),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
- return_value=True))
+ return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
Percent Complete: 100
Time Remaining: 0 second(s)
"""
- results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0),
- ('The specified source LUN is not '
- 'currently migrating',
- 23)]]
+ results = [SUCCEED,
+ [(FAKE_MIGRATE_PROPERTY, 0),
+ ('The specified source LUN is not '
+ 'currently migrating', 23)]]
fake_cli = self.driverSetup(commands, results)
- fakehost = {'capabilities': {'location_info':
- "unit_test_pool2|fakeSerial",
- 'storage_protocol': 'iSCSI'}}
+ fake_host = {'capabilities': {'location_info':
+ "unit_test_pool2|fakeSerial",
+ 'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
- fakehost)[0]
+ fake_host)[0]
self.assertTrue(ret)
#verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
- retry_disable=True),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
- return_value=True))
+ return_value={'lun_id': 5}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
"get_lun_id_by_name",
mock.Mock(return_value=5))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
def test_volume_migration_02(self):
commands = [self.testData.MIGRATION_CMD(5, 5),
Percent Complete: 100
Time Remaining: 0 second(s)
"""
- results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0),
- ('The specified source LUN is not '
- 'currently migrating',
- 23)]]
+ results = [SUCCEED,
+ [(FAKE_MIGRATE_PROPERTY, 0),
+ ('The specified source LUN is not currently migrating',
+ 23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
self.assertTrue(ret)
#verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5),
- retry_disable=True),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(5)),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(5))]
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
+ poll=False)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
- return_value=True))
+ return_value={'lun_id': 1}))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
self.assertFalse(ret)
#verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
- retry_disable=True)]
+ retry_disable=True,
+ poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_destroy_volume_snapshot(self):
self.driver.delete_snapshot(self.testData.test_snapshot)
#verification
- expect_cmd = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1')),
- mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'))]
+ expect_cmd = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
+ poll=False),
+ mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'),
+ poll=True)]
fake_cli.assert_has_calls(expect_cmd)
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
- @mock.patch("random.shuffle", mock.Mock())
+ @mock.patch('random.randint',
+ mock.Mock(return_value=0))
def test_initialize_connection(self):
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
- self.testData.GETPORT_CMD(),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[("No group", 83),
- self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
- self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
- self.testData.ALL_PORTS,
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
+
connection_info = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertEqual(connection_info,
self.testData.iscsi_connection_info_ro)
- expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+ expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
- mock.call('storagegroup', '-list'),
- mock.call(*self.testData.GETPORT_CMD()),
mock.call('storagegroup', '-gname', 'fakehost', '-setpath',
'-hbauid', 'iqn.1993-08.org.debian:01:222',
'-sp', 'A', '-spport', 4, '-spvport', 0,
'-ip', '10.0.0.2', '-host', 'fakehost', '-o'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
- '-gname', 'fakehost'),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call(*self.testData.GETPORT_CMD()),
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
+ mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
+ '-gname', 'fakehost',
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=False),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
- self.testData.GETPORT_CMD(),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
- results = [[("No group", 83),
- self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
- self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
- self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
- ('', 0),
- self.testData.ALL_PORTS,
- self.testData.PING_OK]
+ results = [
+ [("No group", 83),
+ self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+ ('', 0),
+ self.testData.PING_OK
+ ]
fake_cli = self.driverSetup(commands, results)
+ test_volume_rw = self.testData.test_volume_rw.copy()
+ test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
connection_info = self.driver.initialize_connection(
- self.testData.test_volume_rw,
+ test_volume_rw,
self.testData.connector)
self.assertEqual(connection_info,
self.testData.iscsi_connection_info_rw)
- expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+ expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
- '-gname', 'fakehost'),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call('connection', '-getport', '-address', '-vlanid')]
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
+ mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
+ '-gname', 'fakehost', poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=False),
+ mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
+ '10.0.0.2'))]
+ fake_cli.assert_has_calls(expected)
+
+ @mock.patch(
+ "oslo_concurrency.processutils.execute",
+ mock.Mock(
+ return_value=(
+ "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
+ @mock.patch(
+ "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+ mock.Mock(
+ return_value=3))
+ @mock.patch('random.randint',
+ mock.Mock(return_value=0))
+ def test_initialize_connection_exist(self):
+ """A LUN is added to the SG right before the attach,
+ it may not exists in the first SG query
+ """
+ # Test for auto registration
+ self.configuration.initiator_auto_registration = True
+ self.configuration.max_luns_per_storage_group = 2
+ commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+ ('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
+ '-gname', 'fakehost'),
+ self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
+ results = [[self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+ self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')],
+ ("fakeerror", 23),
+ self.testData.PING_OK]
+
+ fake_cli = self.driverSetup(commands, results)
+
+ iscsi_data = self.driver.initialize_connection(
+ self.testData.test_volume,
+ self.testData.connector
+ )
+ self.assertTrue(iscsi_data['data']['target_lun'] == 2,
+ "iSCSI initialize connection returned wrong HLU")
+ expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=False),
+ mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
+ '-gname', 'fakehost',
+ poll=False),
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=False),
+ mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
+ '10.0.0.2'))]
+ fake_cli.assert_has_calls(expected)
+
+ @mock.patch(
+ "oslo_concurrency.processutils.execute",
+ mock.Mock(
+ return_value=(
+ "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
+ @mock.patch(
+ "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+ mock.Mock(
+ return_value=4))
+ @mock.patch('random.randint',
+ mock.Mock(return_value=0))
+ def test_initialize_connection_no_hlu_left_1(self):
+ """There is no hlu per the first SG query
+ But there are hlu left after the full poll
+ """
+ # Test for auto registration
+ self.configuration.initiator_auto_registration = True
+ self.configuration.max_luns_per_storage_group = 2
+ commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+ ('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
+ '-gname', 'fakehost'),
+ self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
+ results = [[self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
+ self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+ ("", 0),
+ self.testData.PING_OK]
+
+ fake_cli = self.driverSetup(commands, results)
+
+ iscsi_data = self.driver.initialize_connection(
+ self.testData.test_volume,
+ self.testData.connector)
+ self.assertTrue(iscsi_data['data']['target_lun'] == 2,
+ "iSCSI initialize connection returned wrong HLU")
+ expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=False),
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
+ mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
+ '-gname', 'fakehost',
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=False),
+ mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
+ u'10.0.0.2'))]
+ fake_cli.assert_has_calls(expected)
+
+ @mock.patch(
+ "oslo_concurrency.processutils.execute",
+ mock.Mock(
+ return_value=(
+ "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
+ @mock.patch(
+ "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+ mock.Mock(
+ return_value=4))
+ @mock.patch('random.randint',
+ mock.Mock(return_value=0))
+ def test_initialize_connection_no_hlu_left_2(self):
+ """There is no usable hlu for the SG
+ """
+ # Test for auto registration
+ self.configuration.initiator_auto_registration = True
+ self.configuration.max_luns_per_storage_group = 2
+ commands = [('storagegroup', '-list', '-gname', 'fakehost')]
+ results = [
+ [self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
+ self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')]
+ ]
+
+ fake_cli = self.driverSetup(commands, results)
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.initialize_connection,
+ self.testData.test_volume,
+ self.testData.connector)
+ expected = [
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=False),
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
+ ]
fake_cli.assert_has_calls(expected)
def test_terminate_connection(self):
def test_create_volume_cli_failed(self):
commands = [self.testData.LUN_CREATION_CMD(
- 'failed_vol1', 1, 'unit_test_pool', None, None)]
+ 'failed_vol1', 1, 'unit_test_pool', None, None, False)]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume,
self.testData.test_failed_volume)
expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
- 'failed_vol1', 1, 'unit_test_pool', None, None))]
+ 'failed_vol1', 1, 'unit_test_pool', None, None, False))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_snapshot_failed(self):
#verification
expect_cmd = [
mock.call(
- *self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol-vol1')),
- mock.call(
- *self.testData.SNAP_CREATE_CMD(
- 'failed_snapshot'))]
+ *self.testData.SNAP_CREATE_CMD('failed_snapshot'),
+ poll=False)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_snapshot(self):
#set up
- cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
- output_smp = ("""LOGICAL UNIT NUMBER 1
- Name: vol2
- Attached Snapshot: N/A""", 0)
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+ cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
- commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
- results = [output_smp, output_dest, output_migrate,
+ commands = [cmd_dest, cmd_dest_np, cmd_migrate,
+ cmd_migrate_verify]
+ results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
- name='vol2', source='vol1')),
+ name='vol2', source='vol1'),
+ poll=False),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+ poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
- retry_disable=True),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
-
- mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot')]
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
new=ZeroIntervalLoopingCall)
def test_create_volume_from_snapshot_sync_failed(self):
- output_smp = ("""LOGICAL UNIT NUMBER 1
- Name: vol1
- Attached Snapshot: fakesnap""", 0)
- cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+ cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
+ cmd_detach_lun = ('lun', '-detach', '-name', 'vol2')
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
- output_migrate_verify = (r'The specified source LUN '
- 'is not currently migrating', 23)
- commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
- results = [output_smp, output_dest, output_migrate,
- output_migrate_verify]
+
+ commands = [cmd_dest, cmd_dest_np, cmd_migrate,
+ cmd_migrate_verify]
+ results = [output_dest, output_dest, output_migrate,
+ FAKE_ERROR_RETURN]
+ fake_cli = self.driverSetup(commands, results)
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.create_volume_from_snapshot,
+ self.testData.test_volume2,
+ self.testData.test_snapshot)
+ expect_cmd = [
+ mock.call(
+ *self.testData.SNAP_MP_CREATE_CMD(
+ name='vol2', source='vol1'),
+ poll=False),
+ mock.call(
+ *self.testData.SNAP_ATTACH_CMD(
+ name='vol2', snapName='snapshot1')),
+ mock.call(*self.testData.LUN_CREATION_CMD(
+ 'vol2_dest', 1, 'unit_test_pool', None, None)),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+ poll=True),
+ mock.call(*self.testData.MIGRATION_CMD(1, 1),
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True),
+ mock.call(*self.testData.LUN_DELETE_CMD('vol2_dest')),
+ mock.call(*cmd_detach_lun),
+ mock.call(*self.testData.LUN_DELETE_CMD('vol2'))]
+ fake_cli.assert_has_calls(expect_cmd)
+
+ def test_create_vol_from_snap_failed_in_migrate_lun(self):
+ cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+ output_dest = self.testData.LUN_PROPERTY("vol2_dest")
+ cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
+ cmd_detach_lun = ('lun', '-detach', '-name', 'vol2')
+ commands = [cmd_dest, cmd_migrate]
+ results = [output_dest, FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
- name='vol2', source='vol1')),
+ name='vol2', source='vol1'), poll=False),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'), poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
+ poll=True,
retry_disable=True),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+ mock.call(*self.testData.LUN_DELETE_CMD('vol2_dest')),
+ mock.call(*cmd_detach_lun),
+ mock.call(*self.testData.LUN_DELETE_CMD('vol2'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume(self):
- cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot')
- output_smp = ("""LOGICAL UNIT NUMBER 1
- Name: vol1
- Attached Snapshot: N/A""", 0)
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
+ cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
output_dest = self.testData.LUN_PROPERTY("vol1_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
- commands = [cmd_smp, cmd_dest, cmd_migrate,
+ commands = [cmd_dest, cmd_dest_p, cmd_migrate,
cmd_migrate_verify,
self.testData.NDU_LIST_CMD]
- results = [output_smp, output_dest, output_migrate,
+ results = [output_dest, output_dest, output_migrate,
output_migrate_verify,
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.testData.test_snapshot)
tmp_snap = 'tmp-snap-' + self.testData.test_volume['id']
expect_cmd = [
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('snapshot1')),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
+ poll=True),
mock.call(
- *self.testData.SNAP_CREATE_CMD(tmp_snap)),
- mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
- source='snapshot1')),
+ *self.testData.SNAP_CREATE_CMD(tmp_snap), poll=False),
+ mock.call(*self.testData.SNAP_MP_CREATE_CMD(
+ name='vol1',
+ source='snapshot1'), poll=False),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol1', snapName=tmp_snap)),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1_dest', 1, 'unit_test_pool', None, None)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
+ poll=True,
retry_disable=True),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
- mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap))]
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True),
+ mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
+ poll=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_delete_volume_failed(self):
expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))]
fake_cli.assert_has_calls(expected)
+ def test_delete_volume_in_sg_failed(self):
+ commands = [self.testData.LUN_DELETE_CMD('vol1_in_sg'),
+ self.testData.LUN_DELETE_CMD('vol2_in_sg')]
+ results = [self.testData.LUN_DELETE_IN_SG_ERROR(),
+ self.testData.LUN_DELETE_IN_SG_ERROR(False)]
+ self.driverSetup(commands, results)
+ self.assertRaises(EMCVnxCLICmdError,
+ self.driver.delete_volume,
+ self.testData.test_volume1_in_sg)
+ self.assertRaises(EMCVnxCLICmdError,
+ self.driver.delete_volume,
+ self.testData.test_volume2_in_sg)
+
+ def test_delete_volume_in_sg_force(self):
+ commands = [self.testData.LUN_DELETE_CMD('vol1_in_sg'),
+ self.testData.STORAGEGROUP_LIST_CMD(),
+ self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
+ '41'),
+ self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
+ '42'),
+ self.testData.LUN_DELETE_CMD('vol2_in_sg'),
+ self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
+ '31'),
+ self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
+ '32')]
+ results = [[self.testData.LUN_DELETE_IN_SG_ERROR(),
+ SUCCEED],
+ self.testData.STORAGE_GROUPS_HAS_MAP('fakehost1',
+ 'fakehost2'),
+ SUCCEED,
+ SUCCEED,
+ [self.testData.LUN_DELETE_IN_SG_ERROR(False),
+ SUCCEED],
+ SUCCEED,
+ SUCCEED]
+ fake_cli = self.driverSetup(commands, results)
+ self.driver.cli.force_delete_lun_in_sg = True
+ self.driver.delete_volume(self.testData.test_volume1_in_sg)
+ self.driver.delete_volume(self.testData.test_volume2_in_sg)
+ expected = [mock.call(*self.testData.LUN_DELETE_CMD('vol1_in_sg')),
+ mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
+ poll=True),
+ mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
+ 'fakehost1', '41'), poll=False),
+ mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
+ 'fakehost2', '42'), poll=False),
+ mock.call(*self.testData.LUN_DELETE_CMD('vol1_in_sg')),
+ mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg')),
+ mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
+ poll=True),
+ mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
+ 'fakehost1', '31'), poll=False),
+ mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
+ 'fakehost2', '32'), poll=False),
+ mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg'))]
+ fake_cli.assert_has_calls(expected)
+
def test_extend_volume(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [self.testData.LUN_PROPERTY('vol1', size=2)]
# case
self.driver.extend_volume(self.testData.test_volume, 2)
- expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
- 'vol1'))]
+ expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=False)]
fake_cli.assert_has_calls(expected)
def test_extend_volume_has_snapshot(self):
self.driver.extend_volume,
self.testData.test_failed_volume,
2)
- expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2))]
+ expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2),
+ poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
3)
expected = [
mock.call(
- *self.testData.LUN_EXTEND_CMD('failed_vol1', 3)),
+ *self.testData.LUN_EXTEND_CMD('failed_vol1', 3),
+ poll=False),
mock.call(
- *self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'))]
- fake_cli.assert_has_calls(expected)
-
- def test_create_remove_export(self):
- fake_cli = self.driverSetup()
-
- self.driver.create_export(None, self.testData.test_volume)
- self.driver.remove_export(None, self.testData.test_volume)
- expected = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'))]
+ *self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'),
+ poll=False)]
fake_cli.assert_has_calls(expected)
def test_manage_existing(self):
- """Unit test for the manage_existing function
- of driver
- """
- get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
- '-state', '-userCap', '-owner',
- '-attachedSnapshot', '-poolName')
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
- commands = [get_lun_cmd, lun_rename_cmd]
+ commands = [lun_rename_cmd]
- results = [self.testData.LUN_PROPERTY('lun_name'), SUCCEED]
+ results = [SUCCEED]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
- expected = [mock.call(*get_lun_cmd),
- mock.call(*lun_rename_cmd)]
+ expected = [mock.call(*lun_rename_cmd, poll=False)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_lun_in_another_pool(self):
- """Unit test for the manage_existing function
- of driver with a invalid pool backend.
- An exception would occur in this case
- """
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
self.driver.cli._client.command_execute = fake_cli
ex = self.assertRaises(
exception.ManageExistingInvalidReference,
- self.driver.manage_existing,
+ self.driver.manage_existing_get_size,
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
self.assertTrue(
re.match(r'.*not in a manageable pool backend by cinder',
ex.msg))
- expected = [mock.call(*get_lun_cmd)]
+ expected = [mock.call(*get_lun_cmd, poll=True)]
fake_cli.assert_has_calls(expected)
- def test_manage_existing_get_size(self):
- """Unit test for the manage_existing_get_size
- function of driver.
- """
+ def test_manage_existing_get_size_pool_backend(self):
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
- '-state', '-status', '-opDetails', '-userCap', '-owner',
- '-attachedSnapshot')
+ '-state', '-userCap', '-owner',
+ '-attachedSnapshot', '-poolName')
test_size = 2
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
get_size = self.driver.manage_existing_get_size(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
- expected = [mock.call(*get_lun_cmd)]
+ expected = [mock.call(*get_lun_cmd, poll=True)]
assert get_size == test_size
fake_cli.assert_has_calls(expected)
#Test the function with invalid reference.
self.testData.test_volume_with_type,
invaild_ref)
+ def test_manage_existing_get_size_array_backend(self):
+ get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+ '-state', '-status', '-opDetails', '-userCap', '-owner',
+ '-attachedSnapshot',)
+ test_size = 2
+ commands = [get_lun_cmd]
+ results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
+
+ self.configuration.safe_get = mock.Mock(return_value=None)
+ self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+ assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
+
+ # Mock the command executor
+ fake_command_execute = self.get_command_execute_simulator(
+ commands, results)
+ fake_cli = mock.MagicMock(side_effect=fake_command_execute)
+ self.driver.cli._client.command_execute = fake_cli
+
+ get_size = self.driver.manage_existing_get_size(
+ self.testData.test_volume_with_type,
+ self.testData.test_existing_ref)
+ expected = [mock.call(*get_lun_cmd, poll=True)]
+ assert get_size == test_size
+ fake_cli.assert_has_calls(expected)
+ self.configuration.safe_get = self.fake_safe_get
+
def test_manage_existing_with_array_backend(self):
"""Unit test for the manage_existing with the
array backend which is not support the manage
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
- expected = [mock.call(*lun_rename_cmd)]
+ expected = [mock.call(*lun_rename_cmd, poll=False)]
fake_cli.assert_has_calls(expected)
@mock.patch(
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
def test_retype_compressed_to_deduplicated(self):
"""Unit test for retype compressed to deduplicated."""
diff_data = {'encryption': {}, 'qos_specs': {},
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
+ cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
+ output_migrate_verify = (r'The specified source LUN '
+ 'is not currently migrating', 23)
commands = [self.testData.NDU_LIST_CMD,
- ('snap', '-list', '-res', 1)]
+ self.testData.SNAP_LIST_CMD(),
+ cmd_migrate_verify]
results = [self.testData.NDU_LIST_RESULT,
- ('No snap', 1023)]
+ ('No snap', 1023),
+ output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
- extra_specs = {'storagetype:provisioning': 'compressed'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
- mock.call('snap', '-list', '-res', 1),
+ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456')),
- mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456'),
+ poll=False),
+ mock.call(*self.testData.MIGRATION_CMD(1, None),
+ retry_disable=True,
+ poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'thin'}))
def test_retype_thin_to_compressed_auto(self):
"""Unit test for retype thin to compressed and auto tiering."""
diff_data = {'encryption': {}, 'qos_specs': {},
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
-
+ cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
+ output_migrate_verify = (r'The specified source LUN '
+ 'is not currently migrating', 23)
commands = [self.testData.NDU_LIST_CMD,
- ('snap', '-list', '-res', 1)]
+ self.testData.SNAP_LIST_CMD(),
+ cmd_migrate_verify]
results = [self.testData.NDU_LIST_RESULT,
- ('No snap', 1023)]
+ ('No snap', 1023),
+ output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
- extra_specs = {'storagetype:provisioning': 'thin'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
- mock.call('snap', '-list', '-res', 1),
+ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'compressed', 'auto')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
- mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+ mock.call(*self.testData.MIGRATION_CMD(),
+ retry_disable=True,
+ poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'deduplicated',
+ 'storagetype:pool': 'unit_test_pool'}))
def test_retype_pool_changed_dedup_to_compressed_auto(self):
"""Unit test for retype dedup to compressed and auto tiering
and pool changed
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
- ('snap', '-list', '-res', 1)]
+ self.testData.SNAP_LIST_CMD(),
+ self.testData.MIGRATION_VERIFY_CMD(1)]
results = [self.testData.NDU_LIST_RESULT,
- ('No snap', 1023)]
+ ('No snap', 1023),
+ ('The specified source LUN is not currently migrating', 23)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
- extra_specs = {'storagetype:provisioning': 'deduplicated',
- 'storagetype:pool': 'unit_test_pool'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
- mock.call('snap', '-list', '-res', 1),
+ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool2', 'compressed', 'auto')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
- mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+ mock.call(*self.testData.MIGRATION_CMD(),
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'compressed',
+ 'storagetype:pool': 'unit_test_pool',
+ 'storagetype:tiering': 'auto'}))
def test_retype_compressed_auto_to_compressed_nomovement(self):
"""Unit test for retype only tiering changed."""
diff_data = {'encryption': {}, 'qos_specs': {},
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
- ('snap', '-list', '-res', 1)]
+ self.testData.SNAP_LIST_CMD(poll=False)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
- extra_specs = {'storagetype:provisioning': 'compressed',
- 'storagetype:pool': 'unit_test_pool',
- 'storagetype:tiering': 'auto'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'thin',
+ 'storagetype:pool': 'unit_test_pool'}))
def test_retype_compressed_to_thin_cross_array(self):
"""Unit test for retype cross array."""
diff_data = {'encryption': {}, 'qos_specs': {},
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
- ('snap', '-list', '-res', 1)]
+ self.testData.SNAP_LIST_CMD(poll=False)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
self.driverSetup(commands, results)
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
- extra_specs = {'storagetype:provisioning': 'thin',
- 'storagetype:pool': 'unit_test_pool'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
retyped = self.driver.retype(None, self.testData.test_volume3,
new_type_data, diff_data,
host_test_data)
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'thin',
+ 'storagetype:tiering': 'auto',
+ 'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_to_dedup_diff_procotol(self):
"""Unit test for retype different procotol."""
diff_data = {'encryption': {}, 'qos_specs': {},
'storage_protocol': 'FC'}}
commands = [self.testData.NDU_LIST_CMD,
- ('snap', '-list', '-res', 1)]
+ self.testData.SNAP_LIST_CMD(),
+ self.testData.MIGRATION_VERIFY_CMD(1)]
results = [self.testData.NDU_LIST_RESULT,
- ('No snap', 1023)]
+ ('No snap', 1023),
+ ('The specified source LUN is not currently migrating', 23)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
- extra_specs = {'storagetype:provisioning': 'thin',
- 'storagetype:tiering': 'auto',
- 'storagetype:pool': 'unit_test_pool'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
- mock.call('snap', '-list', '-res', 1),
+ mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
- mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+ mock.call(*self.testData.MIGRATION_CMD(),
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'thin',
+ 'storagetype:tiering': 'auto',
+ 'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_has_snap_to_thick_highestavailable(self):
"""Unit test for retype volume has snap when need migration."""
diff_data = {'encryption': {}, 'qos_specs': {},
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
- ('snap', '-list', '-res', 1)]
+ self.testData.SNAP_LIST_CMD(poll=False)]
results = [self.testData.NDU_LIST_RESULT,
('Has snap', 0)]
self.driverSetup(commands, results)
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
- extra_specs = {'storagetype:provisioning': 'thin',
- 'storagetype:tiering': 'auto',
- 'storagetype:pool': 'unit_test_pool'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
retyped = self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'storagetype:provisioning': 'thin',
+ 'storagetype:tiering': 'auto',
+ 'storagetype:pool': 'unit_test_pool'}))
def test_retype_thin_auto_to_thin_auto(self):
"""Unit test for retype volume which has no change."""
diff_data = {'encryption': {}, 'qos_specs': {},
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
- extra_specs = {'storagetype:provisioning': 'thin',
- 'storagetype:tiering': 'auto',
- 'storagetype:pool': 'unit_test_pool'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
+ @mock.patch(
+ "cinder.volume.volume_types."
+ "get_volume_type_extra_specs",
+ mock.Mock(return_value={'fast_cache_enabled': 'True'}))
def test_create_volume_with_fastcache(self):
'''enable fastcache when creating volume.'''
- extra_specs = {'fast_cache_enabled': 'True'}
- volume_types.get_volume_type_extra_specs = \
- mock.Mock(return_value=extra_specs)
-
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+ self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD,
self.testData.CHECK_FASTCACHE_CMD(
self.testData.test_pool_name)]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+ self.testData.LUN_PROPERTY('vol_with_type', True),
SUCCEED,
('FAST Cache: Enabled', 0)]
fake_cli = self.driverSetup(commands, results)
cli_helper.command_execute = fake_cli
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
+ cli_helper.get_pool = mock.Mock(return_value={'lun_nums': 1000,
+ 'total_capacity_gb': 10,
+ 'free_capacity_gb': 5})
self.driver.update_volume_stats()
self.driver.create_volume(self.testData.test_volume_with_type)
self.assertEqual(self.driver.cli.stats['fast_cache_enabled'], 'True')
expect_cmd = [
+ mock.call('connection', '-getport', '-address', '-vlanid',
+ poll=False),
mock.call('storagepool', '-list', '-name',
- 'Pool_02_SASFLASH', '-userCap', '-availableCap'),
- mock.call('-np', 'storagepool', '-list', '-name',
- 'Pool_02_SASFLASH', '-fastcache'),
- mock.call('lun', '-create', '-capacity',
+ 'Pool_02_SASFLASH', '-fastcache', poll=False),
+ mock.call('-np', 'lun', '-create', '-capacity',
1, '-sq', 'gb', '-poolName', 'Pool_02_SASFLASH',
'-name', 'vol_with_type', '-type', 'NonThin')
]
mock.call(
*self.testData.DELETE_CONSISTENCYGROUP_CMD(
cg_name)),
- mock.call(
- *self.testData.LUN_DELETE_CMD('vol1')),
- mock.call(
- *self.testData.LUN_DELETE_CMD('vol1'))]
+ mock.call(*self.testData.LUN_DELETE_CMD('vol1')),
+ mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cgsnapshot(self):
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
- None, None)),
- mock.call('lun', '-list', '-name', 'vol1',
- '-state', '-status', '-opDetails',
- '-userCap', '-owner', '-attachedSnapshot'),
+ None, None, False)),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+ poll=False),
mock.call(*self.testData.ADD_LUN_TO_CG_CMD(
'cg_id', 1))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume_from_consistnecy_group(self):
- cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot')
- output_smp = ("""LOGICAL UNIT NUMBER 1
- Name: vol1
- Attached Snapshot: N/A""", 0)
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
+ cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
output_dest = self.testData.LUN_PROPERTY("vol1_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
'is not currently migrating', 23)
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
- commands = [cmd_smp, cmd_dest, cmd_migrate,
+ commands = [cmd_dest, cmd_dest_p, cmd_migrate,
cmd_migrate_verify]
- results = [output_smp, output_dest, output_migrate,
+ results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(cg_name, tmp_cgsnapshot)),
mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
- source='clone1')),
+ source='clone1'),
+ poll=False),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol1', snapName=tmp_cgsnapshot)),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1_dest', 1, 'unit_test_pool', None, None)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'), poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
- retry_disable=True),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
- mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True),
mock.call(*self.testData.DELETE_CG_SNAPSHOT(tmp_cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_cgsnapshot(self):
- cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
- output_smp = ("""LOGICAL UNIT NUMBER 1
- Name: vol2
- Attached Snapshot: N/A""", 0)
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+ cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
- commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
- results = [output_smp, output_dest, output_migrate,
+ commands = [cmd_dest, cmd_dest_np, cmd_migrate,
+ cmd_migrate_verify]
+ results = [output_dest, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
- name='vol2', source='vol1')),
+ name='vol2', source='vol1'),
+ poll=False),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='cgsnapshot_id')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+ poll=False),
+ mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+ poll=True),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
- retry_disable=True),
- mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
- mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'))]
+ retry_disable=True,
+ poll=True),
+ mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+ poll=True)]
+ fake_cli.assert_has_calls(expect_cmd)
+
+ def test_deregister_initiator(self):
+ fake_cli = self.driverSetup()
+ self.driver.cli.destroy_empty_sg = True
+ self.driver.cli.itor_auto_dereg = True
+ cli_helper = self.driver.cli._client
+ data = {'storage_group_name': "fakehost",
+ 'storage_group_uid': "2F:D4:00:00:00:00:00:"
+ "00:00:00:FF:E5:3A:03:FD:6D",
+ 'lunmap': {1: 16}}
+ cli_helper.get_storage_group = mock.Mock(
+ return_value=data)
+ lun_info = {'lun_name': "unit_test_lun",
+ 'lun_id': 1,
+ 'pool': "unit_test_pool",
+ 'attached_snapshot': "N/A",
+ 'owner': "A",
+ 'total_capacity_gb': 1.0,
+ 'state': "Ready"}
+ cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
+ cli_helper.remove_hlu_from_storagegroup = mock.Mock()
+ cli_helper.disconnect_host_from_storage_group = mock.Mock()
+ cli_helper.delete_storage_group = mock.Mock()
+ self.driver.terminate_connection(self.testData.test_volume,
+ self.testData.connector)
+ expect_cmd = [
+ mock.call('port', '-removeHBA', '-hbauid',
+ self.testData.connector['initiator'],
+ '-o')]
fake_cli.assert_has_calls(expect_cmd)
def succeed_fake_command_execute(self, *command, **kwargv):
return SUCCEED
+ def fake_setup_command_execute(self, *command, **kwargv):
+ return self.testData.ALL_PORTS
+
def fake_get_pool_properties(self, filter_option, properties=None):
pool_info = {'pool_name': "unit_test_pool0",
'total_capacity_gb': 1000.0,
super(EMCVNXCLIDriverFCTestCase, self).setUp()
self.stubs.Set(CommandLineHelper, 'command_execute',
- self.succeed_fake_command_execute)
+ self.fake_setup_command_execute)
self.stubs.Set(CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
"fakeSerial"}))
#set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
self.configuration.default_timeout = 0.0002
self.configuration.initiator_auto_registration = True
+ self.configuration.check_max_pool_luns_threshold = False
self.configuration.zoning_mode = None
+ self.configuration.max_luns_per_storage_pool = 4000
self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
self.testData = EMCVNXCLIDriverTestData()
self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
- def tearDown(self):
- super(EMCVNXCLIDriverFCTestCase, self).tearDown()
-
def driverSetup(self, commands=tuple(), results=tuple()):
self.driver = EMCCLIFCDriver(configuration=self.configuration)
fake_command_execute = self.get_command_execute_simulator(
return standard_default
- def succeed_fake_command_execute(self, *command, **kwargv):
- return SUCCEED
+ def fake_setup_command_execute(self, *command, **kwargv):
+ return self.testData.ALL_PORTS
def fake_get_pool_properties(self, filter_option, properties=None):
pool_info = {'pool_name': "unit_test_pool0",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
- @mock.patch("random.shuffle", mock.Mock())
+ @mock.patch('random.randint',
+ mock.Mock(return_value=0))
def test_initialize_connection_fc_auto_reg(self):
# Test for auto registration
+ test_volume = self.testData.test_volume.copy()
+ test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
self.configuration.initiator_auto_registration = True
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
- ('storagegroup', '-list'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
- self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
- self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
- data = self.driver.initialize_connection(
- self.testData.test_volume,
+ self.driver.initialize_connection(
+ test_volume,
self.testData.connector)
- self.assertEqual(data['data']['access_mode'], 'ro')
-
- expected = [
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call('storagegroup', '-create', '-gname', 'fakehost'),
- mock.call('storagegroup', '-list'),
- mock.call('port', '-list', '-sp'),
- mock.call('storagegroup', '-gname', 'fakehost',
- '-setpath', '-hbauid',
- '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56',
- '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
- '-host', 'fakehost', '-o'),
- mock.call('port', '-list', '-sp'),
- mock.call('storagegroup', '-gname', 'fakehost',
- '-setpath', '-hbauid',
- '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16',
- '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
- '-host', 'fakehost', '-o'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
- '-gname', 'fakehost'),
- mock.call('port', '-list', '-gname', 'fakehost'),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call('port', '-list', '-sp')]
+ expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=False),
+ mock.call('storagegroup', '-create', '-gname', 'fakehost'),
+ mock.call('port', '-list', '-sp'),
+ mock.call('storagegroup', '-gname', 'fakehost',
+ '-setpath', '-hbauid',
+ '22:34:56:78:90:12:34:56:12:34:56:78:'
+ '90:12:34:56',
+ '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
+ '-host', 'fakehost', '-o'),
+ mock.call('storagegroup', '-gname', 'fakehost',
+ '-setpath', '-hbauid',
+ '22:34:56:78:90:54:32:16:12:34:56:78:'
+ '90:54:32:16',
+ '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
+ '-host', 'fakehost', '-o'),
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
+ mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
+ '-gname', 'fakehost',
+ poll=False),
+ mock.call('port', '-list', '-gname', 'fakehost')
+ ]
fake_cli.assert_has_calls(expected)
# Test for manaul registration
self.configuration.initiator_auto_registration = False
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
- ('storagegroup', '-list'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
- self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
- self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
- self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+ self.testData.STORAGE_GROUP_NO_MAP('fakehost')],
('', 0),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
- data = self.driver.initialize_connection(
- self.testData.test_volume_rw,
+ self.driver.initialize_connection(
+ test_volume,
self.testData.connector)
- self.assertEqual(data['data']['access_mode'], 'rw')
-
- expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+ expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
- mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
- '-gname', 'fakehost'),
- mock.call('port', '-list', '-gname', 'fakehost'),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
- mock.call('port', '-list', '-sp')]
+ '-gname', 'fakehost', poll=False),
+ mock.call('port', '-list', '-gname', 'fakehost')
+ ]
fake_cli.assert_has_calls(expected)
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
- @mock.patch("random.shuffle", mock.Mock())
+ @mock.patch('random.randint',
+ mock.Mock(return_value=0))
def test_initialize_connection_fc_auto_zoning(self):
# Test for auto zoning
self.configuration.zoning_mode = 'fabric'
self.configuration.initiator_auto_registration = False
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
- ('storagegroup', '-list'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
- self.testData.GETFCPORT_CMD(),
- ('port', '-list', '-gname', 'fakehost')]
+ self.testData.GETFCPORT_CMD()]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
- self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
('', 0),
- self.testData.FC_PORTS,
- self.testData.FAKEHOST_PORTS]
+ self.testData.FC_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
EMCVNXCLIDriverTestData.i_t_map)
self.assertEqual(conn_info['data']['target_wwn'],
['1122334455667777'])
- expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+ expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=False),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
- mock.call('lun', '-list', '-name', 'vol1',
- '-state', '-status', '-opDetails',
- '-userCap', '-owner', '-attachedSnapshot'),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
- '-gname', 'fakehost'),
- mock.call('port', '-list', '-gname', 'fakehost'),
- mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+ '-gname', 'fakehost',
+ poll=False),
+ mock.call('storagegroup', '-list', '-gname', 'fakehost',
+ poll=True),
mock.call('port', '-list', '-sp')]
fake_cli.assert_has_calls(expected)
'lunmap': {1: 16, 2: 88, 3: 47}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
- lun_info = {'lun_name': "unit_test_lun",
- 'lun_id': 1,
- 'pool': "unit_test_pool",
- 'attached_snapshot': "N/A",
- 'owner': "A",
- 'total_capacity_gb': 1.0,
- 'state': "Ready"}
- cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
- self.assertFalse('initiator_target_map' in connection_info['data'],
- 'initiator_target_map should not appear.')
+ self.assertFalse(connection_info['data'],
+ 'connection_info data should not be None.')
cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
16, self.testData.connector["host"])
'lunmap': {}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
- lun_info = {'lun_name': "unit_test_lun",
- 'lun_id': 1,
- 'pool': "unit_test_pool",
- 'attached_snapshot': "N/A",
- 'owner': "A",
- 'total_capacity_gb': 1.0,
- 'state': "Ready"}
- cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
EMCVNXCLIDriverTestData.i_t_map)
def test_get_volume_stats(self):
- #expect_result = [POOL_PROPERTY]
self.driverSetup()
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] is not None,
- "dirver_version is not returned")
+ "driver_version is not returned")
self.assertTrue(
- stats['free_capacity_gb'] == 1000.6,
+ stats['free_capacity_gb'] == 3257.851,
"free_capacity_gb is not correct")
self.assertTrue(
- stats['reserved_percentage'] == 0,
+ stats['reserved_percentage'] == 3,
"reserved_percentage is not correct")
self.assertTrue(
stats['storage_protocol'] == 'FC',
"storage_protocol is not correct")
self.assertTrue(
- stats['total_capacity_gb'] == 10000.5,
+ stats['total_capacity_gb'] == 3281.146,
"total_capacity_gb is not correct")
self.assertTrue(
stats['vendor_name'] == "EMC",
"volume backend name is not correct")
self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
self.assertTrue(
- stats['driver_version'] == "04.01.00",
+ stats['driver_version'] == "05.00.00",
"driver version is incorrect.")
+ def test_get_volume_stats_too_many_luns(self):
+ commands = [self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
+ results = [self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
+ fake_cli = self.driverSetup(commands, results)
+
+ self.driver.cli.check_max_pool_luns_threshold = True
+ stats = self.driver.get_volume_stats(True)
+ self.assertTrue(
+ stats['free_capacity_gb'] == 0,
+ "free_capacity_gb is not correct")
+ expect_cmd = [
+ mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
+ poll=False)]
+ fake_cli.assert_has_calls(expect_cmd)
+ expect_cmd = [
+ mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
+ poll=False)]
+ fake_cli.assert_has_calls(expect_cmd)
+
+ self.driver.cli.check_max_pool_luns_threshold = False
+ stats = self.driver.get_volume_stats(True)
+ self.assertTrue(stats['driver_version'] is not None,
+ "driver_version is not returned")
+ self.assertTrue(
+ stats['free_capacity_gb'] == 3257.851,
+ "free_capacity_gb is not correct")
+
+ def test_deregister_initiator(self):
+ fake_cli = self.driverSetup()
+ self.driver.cli.destroy_empty_sg = True
+ self.driver.cli.itor_auto_dereg = True
+ cli_helper = self.driver.cli._client
+ data = {'storage_group_name': "fakehost",
+ 'storage_group_uid': "2F:D4:00:00:00:00:00:"
+ "00:00:00:FF:E5:3A:03:FD:6D",
+ 'lunmap': {1: 16}}
+ cli_helper.get_storage_group = mock.Mock(
+ return_value=data)
+ lun_info = {'lun_name': "unit_test_lun",
+ 'lun_id': 1,
+ 'pool': "unit_test_pool",
+ 'attached_snapshot': "N/A",
+ 'owner': "A",
+ 'total_capacity_gb': 1.0,
+ 'state': "Ready"}
+ cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
+ cli_helper.remove_hlu_from_storagegroup = mock.Mock()
+ cli_helper.disconnect_host_from_storage_group = mock.Mock()
+ cli_helper.delete_storage_group = mock.Mock()
+ self.driver.terminate_connection(self.testData.test_volume,
+ self.testData.connector)
+ fc_itor_1 = '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56'
+ fc_itor_2 = '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16'
+ expect_cmd = [
+ mock.call('port', '-removeHBA', '-hbauid', fc_itor_1, '-o'),
+ mock.call('port', '-removeHBA', '-hbauid', fc_itor_2, '-o')]
+ fake_cli.assert_has_calls(expect_cmd)
+
class EMCVNXCLIToggleSPTestData():
def FAKE_COMMAND_PREFIX(self, sp_address):
configuration=self.configuration)
self.test_data = EMCVNXCLIToggleSPTestData()
- def tearDown(self):
- super(EMCVNXCLIToggleSPTestCase, self).tearDown()
-
def test_no_sp_toggle(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
- SIDE_EFFECTS = [FAKE_SUCCESS_RETURN, FAKE_SUCCESS_RETURN]
+ SIDE_EFFECTS = [FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.10")
- expected = [mock.call(*('ping', '-c', 1, '10.10.10.10'),
- check_exit_code=True),
- mock.call(
- *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
- + FAKE_COMMAND),
- check_exit_code=True)]
+ expected = [
+ mock.call(*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ + FAKE_COMMAND), check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_server_unavailabe(self):
Message : HTTP/1.1 503 Service Unavailable"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
- SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
- processutils.ProcessExecutionError(
- exit_code=255, stdout=FAKE_ERROR_MSG),
- FAKE_SUCCESS_RETURN]
+ SIDE_EFFECTS = [processutils.ProcessExecutionError(
+ exit_code=255, stdout=FAKE_ERROR_MSG),
+ FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
Message : End of data stream"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
- SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
- processutils.ProcessExecutionError(
- exit_code=255, stdout=FAKE_ERROR_MSG),
- FAKE_SUCCESS_RETURN]
+ SIDE_EFFECTS = [processutils.ProcessExecutionError(
+ exit_code=255, stdout=FAKE_ERROR_MSG),
+ FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
- SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
- processutils.ProcessExecutionError(
- exit_code=255, stdout=FAKE_ERROR_MSG),
- FAKE_SUCCESS_RETURN]
+ SIDE_EFFECTS = [processutils.ProcessExecutionError(
+ exit_code=255, stdout=FAKE_ERROR_MSG),
+ FAKE_SUCCESS_RETURN]
+
+ with mock.patch('cinder.utils.execute') as mock_utils:
+ mock_utils.side_effect = SIDE_EFFECTS
+ self.cli_client.command_execute(*FAKE_COMMAND)
+ self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+ expected = [
+ mock.call(
+ *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ + FAKE_COMMAND),
+ check_exit_code=True),
+ mock.call(
+ *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ + FAKE_COMMAND),
+ check_exit_code=True)]
+ mock_utils.assert_has_calls(expected)
+
+ def test_toggle_sp_with_connection_error(self):
+ self.cli_client.active_storage_ip = '10.10.10.10'
+ FAKE_ERROR_MSG = """\
+A network error occurred while trying to connect: '192.168.1.56'.
+Message : Error occurred because of time out"""
+ FAKE_SUCCESS_RETURN = ('success', 0)
+ FAKE_COMMAND = ('list', 'pool')
+ SIDE_EFFECTS = [processutils.ProcessExecutionError(
+ exit_code=255, stdout=FAKE_ERROR_MSG),
+ FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
"""
VNX CLI
"""
-
+import math
import os
import random
import re
import time
+import types
+import eventlet
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import timeutils
import six
+import taskflow.engines
+from taskflow.patterns import linear_flow
+from taskflow import task
+from taskflow.types import failure
from cinder import exception
from cinder.exception import EMCVnxCLICmdError
LOG = logging.getLogger(__name__)
INTERVAL_5_SEC = 5
+INTERVAL_20_SEC = 20
INTERVAL_30_SEC = 30
INTERVAL_60_SEC = 60
-NO_POLL = True
+ENABLE_TRACE = False
loc_opts = [
cfg.StrOpt('storage_vnx_authentication_type',
default=False,
help='Automatically register initiators. '
'By default, the value is False.'),
+ cfg.BoolOpt('initiator_auto_deregistration',
+ default=False,
+ help='Automatically deregister initiators after the related '
+ 'storage group is destroyed. '
+ 'By default, the value is False.'),
+ cfg.BoolOpt('check_max_pool_luns_threshold',
+ default=False,
+ help='Report free_capacity_gb as 0 when the limit to '
+ 'maximum number of pool LUNs is reached. '
+ 'By default, the value is False.'),
+ cfg.BoolOpt('force_delete_lun_in_storagegroup',
+ default=False,
+ help='Delete a LUN even if it is in Storage Groups.')
]
CONF.register_opts(loc_opts)
+def decorate_all_methods(method_decorator):
+ """Applies decorator on the methods of a class.
+
+ This is a class decorator, which will apply method decorator referred
+ by method_decorator to all the public methods (without underscore as
+ the prefix) in a class.
+ """
+ if not ENABLE_TRACE:
+ return lambda cls: cls
+
+ def _decorate_all_methods(cls):
+ for attr_name, attr_val in cls.__dict__.items():
+ if (isinstance(attr_val, types.FunctionType) and
+ not attr_name.startswith("_")):
+ setattr(cls, attr_name, method_decorator(attr_val))
+ return cls
+
+ return _decorate_all_methods
+
+
def log_enter_exit(func):
+ if not CONF.debug:
+ return func
+
def inner(self, *args, **kwargs):
- LOG.debug("Entering %(cls)s.%(method)s" %
+ LOG.debug("Entering %(cls)s.%(method)s",
{'cls': self.__class__.__name__,
'method': func.__name__})
start = timeutils.utcnow()
end = timeutils.utcnow()
LOG.debug("Exiting %(cls)s.%(method)s. "
"Spent %(duration)s sec. "
- "Return %(return)s" %
+ "Return %(return)s",
{'cls': self.__class__.__name__,
'duration': timeutils.delta_seconds(start, end),
'method': func.__name__,
self.converter = converter
+@decorate_all_methods(log_enter_exit)
class CommandLineHelper(object):
LUN_STATE = PropertyDescriptor(
POOL_ALL = [POOL_TOTAL_CAPACITY, POOL_FREE_CAPACITY]
+ MAX_POOL_LUNS = PropertyDescriptor(
+ '-maxPoolLUNs',
+ 'Max. Pool LUNs:\s*(.*)\s*',
+ 'max_pool_luns',
+ int)
+ TOTAL_POOL_LUNS = PropertyDescriptor(
+ '-numPoolLUNs',
+ 'Total Number of Pool LUNs:\s*(.*)\s*',
+ 'total_pool_luns',
+ int)
+
+ POOL_FEATURE_DEFAULT = (MAX_POOL_LUNS, TOTAL_POOL_LUNS)
+
CLI_RESP_PATTERN_CG_NOT_FOUND = 'Cannot find'
CLI_RESP_PATTERN_SNAP_NOT_FOUND = 'The specified snapshot does not exist'
+ CLI_RESP_PATTERN_LUN_NOT_EXIST = 'The (pool lun) may not exist'
+ CLI_RESP_PATTERN_SMP_NOT_ATTACHED = ('The specified Snapshot mount point '
+ 'is not currently attached.')
+ CLI_RESP_PATTERN_SG_NAME_IN_USE = "Storage Group name already in use"
+ CLI_RESP_PATTERN_LUN_IN_SG_1 = "contained in a Storage Group"
+ CLI_RESP_PATTERN_LUN_IN_SG_2 = "Host LUN/LUN mapping still exists"
def __init__(self, configuration):
configuration.append_config_values(san.san_opts)
self.primary_storage_ip = self.active_storage_ip
self.secondary_storage_ip = configuration.san_secondary_ip
if self.secondary_storage_ip == self.primary_storage_ip:
- LOG.warn(_LE("san_secondary_ip is configured as "
- "the same value as san_ip."))
+ LOG.warning(_LE("san_secondary_ip is configured as "
+ "the same value as san_ip."))
self.secondary_storage_ip = None
if not configuration.san_ip:
err_msg = _('san_ip: Mandatory field configuration. '
# if there is security file path provided, use this security file
if storage_vnx_security_file:
self.credentials = ('-secfilepath', storage_vnx_security_file)
- LOG.info(_LI("Using security file in %s for authentication") %
+ LOG.info(_LI("Using security file in %s for authentication"),
storage_vnx_security_file)
# if there is a username/password provided, use those in the cmd line
elif storage_username is not None and len(storage_username) > 0 and\
'-initialTier', 'optimizePool',
'-tieringPolicy', 'noMovement']}
- @log_enter_exit
+ def _raise_cli_error(self, cmd=None, rc=None, out='', **kwargs):
+ raise EMCVnxCLICmdError(cmd=cmd,
+ rc=rc,
+ out=out.split('\n'),
+ **kwargs)
+
def create_lun_with_advance_feature(self, pool, name, size,
provisioning, tiering,
- consistencygroup_id=None):
+ consistencygroup_id=None,
+ poll=True):
command_create_lun = ['lun', '-create',
'-capacity', size,
'-sq', 'gb',
'-poolName', pool,
'-name', name]
+ if not poll:
+ command_create_lun = ['-np'] + command_create_lun
# provisioning
if provisioning:
command_create_lun.extend(self.provisioning_values[provisioning])
except EMCVnxCLICmdError as ex:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
- LOG.error(_LE("Error on enable compression on lun %s.")
- % six.text_type(ex))
+ LOG.error(_LE("Error on enable compression on lun %s."),
+ six.text_type(ex))
# handle consistency group
try:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on adding lun to consistency"
- " group. %s") % six.text_type(ex))
+ " group. %s"), six.text_type(ex))
return data
- @log_enter_exit
def create_lun_by_cmd(self, cmd, name):
out, rc = self.command_execute(*cmd)
if rc != 0:
# Ignore the error that due to retry
if rc == 4 and out.find('(0x712d8d04)') >= 0:
- LOG.warn(_LW('LUN already exists, LUN name %(name)s. '
- 'Message: %(msg)s') %
- {'name': name, 'msg': out})
+ LOG.warning(_LW('LUN already exists, LUN name %(name)s. '
+ 'Message: %(msg)s'),
+ {'name': name, 'msg': out})
else:
- raise EMCVnxCLICmdError(cmd, rc, out)
+ self._raise_cli_error(cmd, rc, out)
def lun_is_ready():
- data = self.get_lun_by_name(name)
- return data[self.LUN_STATE.key] == 'Ready' and \
- data[self.LUN_STATUS.key] == 'OK(0x0)' and \
- data[self.LUN_OPERATION.key] == 'None'
+ try:
+ data = self.get_lun_by_name(name, self.LUN_ALL, False)
+ return (data[self.LUN_STATE.key] == 'Ready' and
+ data[self.LUN_STATUS.key] == 'OK(0x0)' and
+ data[self.LUN_OPERATION.key] == 'None')
+ except EMCVnxCLICmdError as ex:
+ orig_out = "\n".join(ex.kwargs["out"])
+ if orig_out.find(
+ self.CLI_RESP_PATTERN_LUN_NOT_EXIST) >= 0:
+ return False
+ else:
+ raise ex
- self._wait_for_a_condition(lun_is_ready)
- lun = self.get_lun_by_name(name)
+ self._wait_for_a_condition(lun_is_ready,
+ None,
+ INTERVAL_5_SEC)
+ lun = self.get_lun_by_name(name, self.LUN_ALL, False)
return lun
- @log_enter_exit
def delete_lun(self, name):
command_delete_lun = ['lun', '-destroy',
'-o']
# executing cli command to delete volume
out, rc = self.command_execute(*command_delete_lun)
- if rc != 0:
+ if rc != 0 or out.strip():
# Ignore the error that due to retry
- if rc == 9 and out.find("not exist") >= 0:
- LOG.warn(_LW("LUN is already deleted, LUN name %(name)s. "
- "Message: %(msg)s") %
- {'name': name, 'msg': out})
+ if rc == 9 and self.CLI_RESP_PATTERN_LUN_NOT_EXIST in out:
+ LOG.warning(_LW("LUN is already deleted, LUN name %(name)s. "
+ "Message: %(msg)s"),
+ {'name': name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_delete_lun, rc, out)
+ self._raise_cli_error(command_delete_lun, rc, out)
+
+ def get_hlus(self, lun_id, poll=True):
+ hlus = list()
+ command_storage_group_list = ('storagegroup', '-list')
+ out, rc = self.command_execute(*command_storage_group_list,
+ poll=poll)
+ if rc != 0:
+ self._raise_cli_error(command_storage_group_list, rc, out)
+ sg_name_p = re.compile(r'^\s*(?P<sg_name>[^\n\r]+)')
+ hlu_alu_p = re.compile(r'HLU/ALU Pairs:'
+ r'\s*HLU Number\s*ALU Number'
+ r'\s*[-\s]*'
+ r'(\d|\s)*'
+ r'\s+(?P<hlu>\d+)( |\t)+%s' % lun_id)
+ for sg_info in out.split('Storage Group Name:'):
+ hlu_alu_m = hlu_alu_p.search(sg_info)
+ if hlu_alu_m is None:
+ continue
+ sg_name_m = sg_name_p.search(sg_info)
+ if sg_name_m:
+ hlus.append((hlu_alu_m.group('hlu'),
+ sg_name_m.group('sg_name')))
+ return hlus
def _wait_for_a_condition(self, testmethod, timeout=None,
interval=INTERVAL_5_SEC):
testValue = False
LOG.debug('CommandLineHelper.'
'_wait_for_condition: %(method_name)s '
- 'execution failed for %(exception)s'
- % {'method_name': testmethod.__name__,
- 'exception': ex.message})
+ 'execution failed for %(exception)s',
+ {'method_name': testmethod.__name__,
+ 'exception': six.text_type(ex)})
if testValue:
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
- @log_enter_exit
- def expand_lun(self, name, new_size):
+ def expand_lun(self, name, new_size, poll=True):
command_expand_lun = ('lun', '-expand',
'-name', name,
'-sq', 'gb',
'-o',
'-ignoreThresholds')
- out, rc = self.command_execute(*command_expand_lun)
+ out, rc = self.command_execute(*command_expand_lun,
+ poll=poll)
if rc != 0:
# Ignore the error that due to retry
if rc == 4 and out.find("(0x712d8e04)") >= 0:
- LOG.warn(_LW("LUN %(name)s is already expanded. "
- "Message: %(msg)s") %
- {'name': name, 'msg': out})
+ LOG.warning(_LW("LUN %(name)s is already expanded. "
+ "Message: %(msg)s"),
+ {'name': name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_expand_lun, rc, out)
+ self._raise_cli_error(command_expand_lun, rc, out)
- @log_enter_exit
def expand_lun_and_wait(self, name, new_size):
- self.expand_lun(name, new_size)
+ self.expand_lun(name, new_size, poll=False)
def lun_is_extented():
- data = self.get_lun_by_name(name)
+ data = self.get_lun_by_name(name, poll=False)
return new_size == data[self.LUN_CAPACITY.key]
self._wait_for_a_condition(lun_is_extented)
- @log_enter_exit
- def lun_rename(self, lun_id, new_name):
+ def lun_rename(self, lun_id, new_name, poll=False):
"""This function used to rename a lun to match
the expected name for the volume.
"""
'-newName', new_name,
'-o')
- out, rc = self.command_execute(*command_lun_rename)
+ out, rc = self.command_execute(*command_lun_rename,
+ poll=poll)
if rc != 0:
- raise EMCVnxCLICmdError(command_lun_rename, rc, out)
+ self._raise_cli_error(command_lun_rename, rc, out)
- @log_enter_exit
def modify_lun_tiering(self, name, tiering):
"""This function used to modify a lun's tiering policy."""
command_modify_lun = ['lun', '-modify',
out, rc = self.command_execute(*command_modify_lun)
if rc != 0:
- raise EMCVnxCLICmdError(command_modify_lun, rc, out)
+ self._raise_cli_error(command_modify_lun, rc, out)
- @log_enter_exit
def create_consistencygroup(self, context, group):
"""create the consistency group."""
cg_name = group['id']
# Ignore the error if consistency group already exists
if (rc == 33 and
out.find("(0x716d8021)") >= 0):
- LOG.warn(_LW('Consistency group %(name)s already '
- 'exists. Message: %(msg)s') %
- {'name': cg_name, 'msg': out})
+ LOG.warning(_LW('Consistency group %(name)s already '
+ 'exists. Message: %(msg)s'),
+ {'name': cg_name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_create_cg, rc, out)
+ self._raise_cli_error(command_create_cg, rc, out)
- @log_enter_exit
def get_consistency_group_by_name(self, cg_name):
cmd = ('snap', '-group', '-list', '-id', cg_name)
data = {
luns_of_cg = m.groups()[3].split(',')
if luns_of_cg:
data['Luns'] = [lun.strip() for lun in luns_of_cg]
- LOG.debug("Found consistent group %s." % data['Name'])
+ LOG.debug("Found consistent group %s.", data['Name'])
return data
- @log_enter_exit
def add_lun_to_consistency_group(self, cg_name, lun_id):
add_lun_to_cg_cmd = ('-np', 'snap', '-group',
'-addmember', '-id',
"group %(cg_name)s.") % {'lun': lun_id,
'cg_name': cg_name})
LOG.error(msg)
- raise EMCVnxCLICmdError(add_lun_to_cg_cmd, rc, out)
+ self._raise_cli_error(add_lun_to_cg_cmd, rc, out)
def add_lun_to_consistency_success():
data = self.get_consistency_group_by_name(cg_name)
if str(lun_id) in data['Luns']:
LOG.debug(("Add lun %(lun)s to consistency "
- "group %(cg_name)s successfully.") %
+ "group %(cg_name)s successfully."),
{'lun': lun_id, 'cg_name': cg_name})
return True
else:
LOG.debug(("Adding lun %(lun)s to consistency "
- "group %(cg_name)s.") %
+ "group %(cg_name)s."),
{'lun': lun_id, 'cg_name': cg_name})
return False
self._wait_for_a_condition(add_lun_to_consistency_success,
interval=INTERVAL_30_SEC)
- @log_enter_exit
def delete_consistencygroup(self, cg_name):
delete_cg_cmd = ('-np', 'snap', '-group',
'-destroy', '-id', cg_name)
if rc != 0:
# Ignore the error if CG doesn't exist
if rc == 13 and out.find(self.CLI_RESP_PATTERN_CG_NOT_FOUND) >= 0:
- LOG.warn(_LW("CG %(cg_name)s does not exist. "
- "Message: %(msg)s") %
- {'cg_name': cg_name, 'msg': out})
+ LOG.warning(_LW("CG %(cg_name)s does not exist. "
+ "Message: %(msg)s"),
+ {'cg_name': cg_name, 'msg': out})
elif rc == 1 and out.find("0x712d8801") >= 0:
- LOG.warn(_LW("CG %(cg_name)s is deleting. "
- "Message: %(msg)s") %
- {'cg_name': cg_name, 'msg': out})
+ LOG.warning(_LW("CG %(cg_name)s is deleting. "
+ "Message: %(msg)s"),
+ {'cg_name': cg_name, 'msg': out})
else:
- raise EMCVnxCLICmdError(delete_cg_cmd, rc, out)
+ self._raise_cli_error(delete_cg_cmd, rc, out)
else:
LOG.info(_LI('Consistency group %s was deleted '
- 'successfully.') % cg_name)
+ 'successfully.'), cg_name)
- @log_enter_exit
def create_cgsnapshot(self, cgsnapshot):
"""Create a cgsnapshot (snap group)."""
cg_name = cgsnapshot['consistencygroup_id']
# Ignore the error if cgsnapshot already exists
if (rc == 5 and
out.find("(0x716d8005)") >= 0):
- LOG.warn(_LW('Cgsnapshot name %(name)s already '
- 'exists. Message: %(msg)s') %
- {'name': snap_name, 'msg': out})
+ LOG.warning(_LW('Cgsnapshot name %(name)s already '
+ 'exists. Message: %(msg)s'),
+ {'name': snap_name, 'msg': out})
else:
- raise EMCVnxCLICmdError(create_cg_snap_cmd, rc, out)
+ self._raise_cli_error(create_cg_snap_cmd, rc, out)
- @log_enter_exit
def delete_cgsnapshot(self, cgsnapshot):
"""Delete a cgsnapshot (snap group)."""
snap_name = cgsnapshot['id']
# Ignore the error if cgsnapshot does not exist.
if (rc == 5 and
out.find(self.CLI_RESP_PATTERN_SNAP_NOT_FOUND) >= 0):
- LOG.warn(_LW('Snapshot %(name)s for consistency group '
- 'does not exist. Message: %(msg)s') %
- {'name': snap_name, 'msg': out})
+ LOG.warning(_LW('Snapshot %(name)s for consistency group '
+ 'does not exist. Message: %(msg)s'),
+ {'name': snap_name, 'msg': out})
else:
- raise EMCVnxCLICmdError(delete_cg_snap_cmd, rc, out)
+ self._raise_cli_error(delete_cg_snap_cmd, rc, out)
- @log_enter_exit
- def create_snapshot(self, volume_name, name):
- data = self.get_lun_by_name(volume_name)
- if data[self.LUN_ID.key] is not None:
+ def create_snapshot(self, lun_id, name):
+ if lun_id is not None:
command_create_snapshot = ('snap', '-create',
- '-res', data[self.LUN_ID.key],
+ '-res', lun_id,
'-name', name,
'-allowReadWrite', 'yes',
'-allowAutoDelete', 'no')
- out, rc = self.command_execute(*command_create_snapshot)
+ out, rc = self.command_execute(*command_create_snapshot,
+ poll=False)
if rc != 0:
# Ignore the error that due to retry
if (rc == 5 and
out.find("(0x716d8005)") >= 0):
- LOG.warn(_LW('Snapshot %(name)s already exists. '
- 'Message: %(msg)s') %
- {'name': name, 'msg': out})
+ LOG.warning(_LW('Snapshot %(name)s already exists. '
+ 'Message: %(msg)s'),
+ {'name': name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_create_snapshot, rc, out)
+ self._raise_cli_error(command_create_snapshot, rc, out)
else:
- msg = _('Failed to get LUN ID for volume %s.') % volume_name
+ msg = _('Failed to create snapshot as no LUN ID is specified')
raise exception.VolumeBackendAPIException(data=msg)
- @log_enter_exit
def delete_snapshot(self, name):
def delete_snapshot_success():
command_delete_snapshot = ('snap', '-destroy',
'-id', name,
'-o')
- out, rc = self.command_execute(*command_delete_snapshot)
+ out, rc = self.command_execute(*command_delete_snapshot,
+ poll=True)
if rc != 0:
# Ignore the error that due to retry
if rc == 5 and out.find("not exist") >= 0:
- LOG.warn(_LW("Snapshot %(name)s may deleted already. "
- "Message: %(msg)s") %
- {'name': name, 'msg': out})
+ LOG.warning(_LW("Snapshot %(name)s may deleted already. "
+ "Message: %(msg)s"),
+ {'name': name, 'msg': out})
return True
# The snapshot cannot be destroyed because it is
# attached to a snapshot mount point. Wait
elif rc == 3 and out.find("(0x716d8003)") >= 0:
- LOG.warn(_LW("Snapshot %(name)s is in use, retry. "
- "Message: %(msg)s") %
- {'name': name, 'msg': out})
+ LOG.warning(_LW("Snapshot %(name)s is in use, retry. "
+ "Message: %(msg)s"),
+ {'name': name, 'msg': out})
return False
else:
- raise EMCVnxCLICmdError(command_delete_snapshot, rc, out)
+ self._raise_cli_error(command_delete_snapshot, rc, out)
else:
- LOG.info(_LI('Snapshot %s was deleted successfully.') %
+ LOG.info(_LI('Snapshot %s was deleted successfully.'),
name)
return True
interval=INTERVAL_30_SEC,
timeout=INTERVAL_30_SEC * 3)
- @log_enter_exit
def create_mount_point(self, primary_lun_name, name):
command_create_mount_point = ('lun', '-create',
'-primaryLunName', primary_lun_name,
'-name', name)
- out, rc = self.command_execute(*command_create_mount_point)
+ out, rc = self.command_execute(*command_create_mount_point,
+ poll=False)
if rc != 0:
# Ignore the error that due to retry
if rc == 4 and out.find("(0x712d8d04)") >= 0:
- LOG.warn(_LW("Mount point %(name)s already exists. "
- "Message: %(msg)s") %
- {'name': name, 'msg': out})
+ LOG.warning(_LW("Mount point %(name)s already exists. "
+ "Message: %(msg)s"),
+ {'name': name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_create_mount_point, rc, out)
+ self._raise_cli_error(command_create_mount_point, rc, out)
return rc
- @log_enter_exit
def attach_mount_point(self, name, snapshot_name):
command_attach_mount_point = ('lun', '-attach',
if rc != 0:
# Ignore the error that due to retry
if rc == 85 and out.find('(0x716d8055)') >= 0:
- LOG.warn(_LW("Snapshot %(snapname)s is attached to snapshot "
- "mount point %(mpname)s already. "
- "Message: %(msg)s") %
- {'snapname': snapshot_name,
- 'mpname': name,
- 'msg': out})
+ LOG.warning(_LW("Snapshot %(snapname)s is attached to "
+ "snapshot mount point %(mpname)s already. "
+ "Message: %(msg)s"),
+ {'snapname': snapshot_name,
+ 'mpname': name,
+ 'msg': out})
else:
- raise EMCVnxCLICmdError(command_attach_mount_point, rc, out)
+ self._raise_cli_error(command_attach_mount_point, rc, out)
return rc
- @log_enter_exit
- def check_smp_not_attached(self, smp_name):
- """Ensure a snap mount point with snap become a LUN."""
+ def detach_mount_point(self, smp_name):
- def _wait_for_sync_status():
- lun_list = ('lun', '-list', '-name', smp_name,
- '-attachedSnapshot')
- out, rc = self.command_execute(*lun_list)
- if rc == 0:
- vol_details = out.split('\n')
- snap_name = vol_details[2].split(':')[1].strip()
- if (snap_name == 'N/A'):
- return True
- return False
+ command_detach_mount_point = ('lun', '-detach',
+ '-name', smp_name)
+
+ out, rc = self.command_execute(*command_detach_mount_point)
+ if rc != 0:
+ # Ignore the error that due to retry
+ if (rc == 162 and
+ out.find(self.CLI_RESP_PATTERN_SMP_NOT_ATTACHED) >= 0):
+ LOG.warning(_LW("The specified Snapshot mount point %s is not "
+ "currently attached."), smp_name)
+ else:
+ self._raise_cli_error(command_detach_mount_point, rc, out)
- self._wait_for_a_condition(_wait_for_sync_status)
+ return rc
- @log_enter_exit
- def migrate_lun(self, src_id, dst_id, log_failure_as_error=True):
+ def migrate_lun(self, src_id, dst_id):
command_migrate_lun = ('migrate', '-start',
'-source', src_id,
'-dest', dst_id,
'-o')
# SP HA is not supported by LUN migration
out, rc = self.command_execute(*command_migrate_lun,
- retry_disable=True)
+ retry_disable=True,
+ poll=True)
if 0 != rc:
- raise EMCVnxCLICmdError(command_migrate_lun, rc, out,
- log_failure_as_error)
+ self._raise_cli_error(command_migrate_lun, rc, out)
return rc
- @log_enter_exit
def migrate_lun_with_verification(self, src_id,
dst_id=None,
dst_name=None):
try:
- self.migrate_lun(src_id, dst_id, log_failure_as_error=False)
+ self.migrate_lun(src_id, dst_id)
except EMCVnxCLICmdError as ex:
migration_succeed = False
- if self._is_sp_unavailable_error(ex.out):
- LOG.warn(_LW("Migration command may get network timeout. "
- "Double check whether migration in fact "
- "started successfully. Message: %(msg)s") %
- {'msg': ex.out})
+ orig_out = "\n".join(ex.kwargs["out"])
+ if self._is_sp_unavailable_error(orig_out):
+ LOG.warning(_LW("Migration command may get network timeout. "
+ "Double check whether migration in fact "
+ "started successfully. Message: %(msg)s"),
+ {'msg': ex.kwargs["out"]})
command_migrate_list = ('migrate', '-list',
'-source', src_id)
- rc = self.command_execute(*command_migrate_list)[1]
+ rc = self.command_execute(*command_migrate_list,
+ poll=True)[1]
if rc == 0:
migration_succeed = True
if not migration_succeed:
- LOG.warn(_LW("Start migration failed. Message: %s") %
- ex.out)
- LOG.debug("Delete temp LUN after migration "
- "start failed. LUN: %s" % dst_name)
- if(dst_name is not None):
+ LOG.warning(_LW("Start migration failed. Message: %s"),
+ ex.kwargs["out"])
+ if dst_name is not None:
+ LOG.warning(_LW("Delete temp LUN after migration "
+ "start failed. LUN: %s"), dst_name)
self.delete_lun(dst_name)
return False
# Set the proper interval to verify the migration status
- def migration_is_ready():
+ def migration_is_ready(poll=False):
mig_ready = False
- command_migrate_list = ('migrate', '-list',
- '-source', src_id)
- out, rc = self.command_execute(*command_migrate_list)
- LOG.debug("Migration output: %s" % out)
+ cmd_migrate_list = ('migrate', '-list', '-source', src_id)
+ out, rc = self.command_execute(*cmd_migrate_list,
+ poll=poll)
+ LOG.debug("Migration output: %s", out)
if rc == 0:
# parse the percentage
out = re.split(r'\n', out)
else:
if re.search(r'The specified source LUN '
'is not currently migrating', out):
- LOG.debug("Migration of LUN %s is finished." % src_id)
+ LOG.debug("Migration of LUN %s is finished.", src_id)
mig_ready = True
else:
reason = _("Querying migrating status error.")
{'reason': reason, 'output': out})
return mig_ready
+ eventlet.sleep(INTERVAL_30_SEC)
+ if migration_is_ready(True):
+ return True
self._wait_for_a_condition(migration_is_ready,
interval=INTERVAL_30_SEC)
return True
- @log_enter_exit
- def get_storage_group(self, name):
+ def get_storage_group(self, name, poll=True):
# ALU/HLU as key/value map
lun_map = {}
data = {'storage_group_name': name,
'storage_group_uid': None,
- 'lunmap': lun_map}
+ 'lunmap': lun_map,
+ 'raw_output': ''}
command_get_storage_group = ('storagegroup', '-list',
'-gname', name)
- out, rc = self.command_execute(*command_get_storage_group)
+ out, rc = self.command_execute(*command_get_storage_group,
+ poll=poll)
if rc != 0:
- raise EMCVnxCLICmdError(command_get_storage_group, rc, out)
+ self._raise_cli_error(command_get_storage_group, rc, out)
+ data['raw_output'] = out
re_stroage_group_id = 'Storage Group UID:\s*(.*)\s*'
m = re.search(re_stroage_group_id, out)
if m is not None:
return data
- @log_enter_exit
def create_storage_group(self, name):
command_create_storage_group = ('storagegroup', '-create',
out, rc = self.command_execute(*command_create_storage_group)
if rc != 0:
# Ignore the error that due to retry
- if rc == 66 and out.find("name already in use") >= 0:
- LOG.warn(_LW('Storage group %(name)s already exists. '
- 'Message: %(msg)s') %
- {'name': name, 'msg': out})
+ if rc == 66 and self.CLI_RESP_PATTERN_SG_NAME_IN_USE in out >= 0:
+ LOG.warning(_LW('Storage group %(name)s already exists. '
+ 'Message: %(msg)s'),
+ {'name': name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_create_storage_group, rc, out)
+ self._raise_cli_error(command_create_storage_group, rc, out)
- @log_enter_exit
def delete_storage_group(self, name):
command_delete_storage_group = ('storagegroup', '-destroy',
# Ignore the error that due to retry
if rc == 83 and out.find("group name or UID does not "
"match any storage groups") >= 0:
- LOG.warn(_LW("Storage group %(name)s doesn't exist, "
- "may have already been deleted. "
- "Message: %(msg)s") %
- {'name': name, 'msg': out})
+ LOG.warning(_LW("Storage group %(name)s doesn't exist, "
+ "may have already been deleted. "
+ "Message: %(msg)s"),
+ {'name': name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_delete_storage_group, rc, out)
+ self._raise_cli_error(command_delete_storage_group, rc, out)
- @log_enter_exit
def connect_host_to_storage_group(self, hostname, sg_name):
command_host_connect = ('storagegroup', '-connecthost',
out, rc = self.command_execute(*command_host_connect)
if rc != 0:
- raise EMCVnxCLICmdError(command_host_connect, rc, out)
+ self._raise_cli_error(command_host_connect, rc, out)
- @log_enter_exit
def disconnect_host_from_storage_group(self, hostname, sg_name):
command_host_disconnect = ('storagegroup', '-disconnecthost',
'-host', hostname,
if rc == 116 and \
re.search("host is not.*connected to.*storage group",
out) is not None:
- LOG.warn(_LW("Host %(host)s has already disconnected from "
- "storage group %(sgname)s. Message: %(msg)s") %
- {'host': hostname, 'sgname': sg_name, 'msg': out})
+ LOG.warning(_LW("Host %(host)s has already disconnected from "
+ "storage group %(sgname)s. Message: %(msg)s"),
+ {'host': hostname, 'sgname': sg_name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_host_disconnect, rc, out)
+ self._raise_cli_error(command_host_disconnect, rc, out)
- @log_enter_exit
def add_hlu_to_storage_group(self, hlu, alu, sg_name):
+ """Adds a lun into storage group as specified hlu number.
+
+ Return True if the hlu is as specified, otherwise False.
+ """
command_add_hlu = ('storagegroup', '-addhlu',
'-hlu', hlu,
'-alu', alu,
'-gname', sg_name)
- out, rc = self.command_execute(*command_add_hlu)
+ out, rc = self.command_execute(*command_add_hlu, poll=False)
if rc != 0:
- # Ignore the error that due to retry
- if rc == 66 and \
- re.search("LUN.*already.*added to.*Storage Group",
- out) is not None:
- LOG.warn(_LW("LUN %(lun)s has already added to "
- "Storage Group %(sgname)s. "
- "Message: %(msg)s") %
- {'lun': alu, 'sgname': sg_name, 'msg': out})
- else:
- raise EMCVnxCLICmdError(command_add_hlu, rc, out)
+ # Do not need to consider the retry for add hlu
+ # Retry is handled in the caller
+ self._raise_cli_error(command_add_hlu, rc, out)
- @log_enter_exit
- def remove_hlu_from_storagegroup(self, hlu, sg_name):
+ return True
+
+ def remove_hlu_from_storagegroup(self, hlu, sg_name, poll=False):
command_remove_hlu = ('storagegroup', '-removehlu',
'-hlu', hlu,
'-gname', sg_name,
'-o')
- out, rc = self.command_execute(*command_remove_hlu)
+ out, rc = self.command_execute(*command_remove_hlu, poll=poll)
if rc != 0:
# Ignore the error that due to retry
if rc == 66 and\
out.find("No such Host LUN in this Storage Group") >= 0:
- LOG.warn(_LW("HLU %(hlu)s has already been removed from "
- "%(sgname)s. Message: %(msg)s") %
- {'hlu': hlu, 'sgname': sg_name, 'msg': out})
+ LOG.warning(_LW("HLU %(hlu)s has already been removed from "
+ "%(sgname)s. Message: %(msg)s"),
+ {'hlu': hlu, 'sgname': sg_name, 'msg': out})
else:
- raise EMCVnxCLICmdError(command_remove_hlu, rc, out)
+ self._raise_cli_error(command_remove_hlu, rc, out)
- @log_enter_exit
def get_iscsi_protocol_endpoints(self, device_sp):
command_get_port = ('connection', '-getport',
out, rc = self.command_execute(*command_get_port)
if rc != 0:
- raise EMCVnxCLICmdError(command_get_port, rc, out)
+ self._raise_cli_error(command_get_port, rc, out)
re_port_wwn = 'Port WWN:\s*(.*)\s*'
initiator_address = re.findall(re_port_wwn, out)
return initiator_address
- @log_enter_exit
- def get_pool_name_of_lun(self, lun_name):
+ def get_pool_name_of_lun(self, lun_name, poll=True):
data = self.get_lun_properties(
- ('-name', lun_name), self.LUN_WITH_POOL)
+ ('-name', lun_name), self.LUN_WITH_POOL, poll=poll)
return data.get('pool', '')
- @log_enter_exit
- def get_lun_by_name(self, name, properties=LUN_ALL):
- data = self.get_lun_properties(('-name', name), properties)
+ def get_lun_by_name(self, name, properties=LUN_ALL, poll=True):
+ data = self.get_lun_properties(('-name', name),
+ properties,
+ poll=poll)
return data
- @log_enter_exit
- def get_lun_by_id(self, lunid, properties=LUN_ALL):
- data = self.get_lun_properties(('-l', lunid), properties)
+ def get_lun_by_id(self, lunid, properties=LUN_ALL, poll=True):
+ data = self.get_lun_properties(('-l', lunid),
+ properties, poll=poll)
return data
- @log_enter_exit
- def get_pool(self, name):
- data = self.get_pool_properties(('-name', name))
+ def get_pool(self, name, poll=True):
+ data = self.get_pool_properties(('-name', name),
+ poll=poll)
return data
- @log_enter_exit
- def get_pool_properties(self, filter_option, properties=POOL_ALL):
+ def get_pool_properties(self, filter_option, properties=POOL_ALL,
+ poll=True):
module_list = ('storagepool', '-list')
- data = self._get_lun_or_pool_properties(
+ data = self._get_obj_properties(
module_list, filter_option,
base_properties=[self.POOL_NAME],
- adv_properties=properties)
+ adv_properties=properties,
+ poll=poll)
return data
- @log_enter_exit
- def get_lun_properties(self, filter_option, properties=LUN_ALL):
+ def get_lun_properties(self, filter_option, properties=LUN_ALL,
+ poll=True):
module_list = ('lun', '-list')
- data = self._get_lun_or_pool_properties(
+ data = self._get_obj_properties(
module_list, filter_option,
base_properties=[self.LUN_NAME, self.LUN_ID],
- adv_properties=properties)
+ adv_properties=properties,
+ poll=poll)
+ return data
+
+ def get_pool_feature_properties(self, properties=POOL_FEATURE_DEFAULT,
+ poll=True):
+ module_list = ("storagepool", '-feature', '-info')
+ data = self._get_obj_properties(
+ module_list, tuple(),
+ base_properties=[],
+ adv_properties=properties,
+ poll=poll)
return data
- def _get_lun_or_pool_properties(self, module_list,
- filter_option,
- base_properties=tuple(),
- adv_properties=tuple()):
+ def _get_obj_properties(self, module_list,
+ filter_option,
+ base_properties=tuple(),
+ adv_properties=tuple(),
+ poll=True):
# to do instance check
- command_get_lun = module_list + filter_option
+ command_get = module_list + filter_option
for prop in adv_properties:
- command_get_lun += (prop.option, )
- out, rc = self.command_execute(*command_get_lun)
+ command_get += (prop.option, )
+ out, rc = self.command_execute(*command_get, poll=poll)
if rc != 0:
- raise EMCVnxCLICmdError(command_get_lun, rc, out)
+ self._raise_cli_error(command_get, rc, out)
data = {}
for baseprop in base_properties:
for prop in adv_properties:
data[prop.key] = self._get_property_value(out, prop)
- LOG.debug('Return LUN or Pool properties. Data: %s' % data)
+ LOG.debug('Return Object properties. Data: %s', data)
return data
def _get_property_value(self, out, propertyDescriptor):
return propertyDescriptor.converter(m.group(1))
except ValueError:
LOG.error(_LE("Invalid value for %(key)s, "
- "value is %(value)s.") %
+ "value is %(value)s."),
{'key': propertyDescriptor.key,
'value': m.group(1)})
return None
else:
return m.group(1)
else:
- LOG.debug('%s value is not found in the output.'
- % propertyDescriptor.label)
+ LOG.debug('%s value is not found in the output.',
+ propertyDescriptor.label)
return None
- @log_enter_exit
def check_lun_has_snap(self, lun_id):
cmd = ('snap', '-list', '-res', lun_id)
- rc = self.command_execute(*cmd)[1]
+ rc = self.command_execute(*cmd, poll=False)[1]
if rc == 0:
- LOG.debug("Find snapshots for %s." % lun_id)
+ LOG.debug("Find snapshots for %s.", lun_id)
return True
else:
return False
- # Return a pool list
- @log_enter_exit
- def get_pool_list(self, no_poll=False):
+ def get_pool_list(self, poll=True):
temp_cache = []
- cmd = ('-np', 'storagepool', '-list', '-availableCap', '-state') \
- if no_poll \
- else ('storagepool', '-list', '-availableCap', '-state')
- out, rc = self.command_execute(*cmd)
+ cmd = ('storagepool', '-list', '-availableCap', '-state')
+ out, rc = self.command_execute(*cmd, poll=poll)
if rc != 0:
- raise EMCVnxCLICmdError(cmd, rc, out)
+ self._raise_cli_error(cmd, rc, out)
try:
for pool in out.split('\n\n'):
pool, self.POOL_FREE_CAPACITY)
temp_cache.append(obj)
except Exception as ex:
- LOG.error(_LE("Error happened during storage pool querying, %s.")
- % ex)
+ LOG.error(_LE("Error happened during storage pool querying, %s."),
+ ex)
# NOTE: Do not want to continue raise the exception
# as the pools may temporarly unavailable
pass
return temp_cache
- @log_enter_exit
- def get_array_serial(self, no_poll=False):
+ def get_array_serial(self, poll=False):
"""return array Serial No for pool backend."""
data = {'array_serial': 'unknown'}
- command_get_array_serial = ('-np', 'getagent', '-serial') \
- if no_poll else ('getagent', '-serial')
+ command_get_array_serial = ('getagent', '-serial')
# Set the property timeout to get array serial
- out, rc = self.command_execute(*command_get_array_serial)
+ out, rc = self.command_execute(*command_get_array_serial,
+ poll=poll)
if 0 == rc:
m = re.search(r'Serial No:\s+(\w+)', out)
if m:
data['array_serial'] = m.group(1)
else:
- LOG.warn(_LW("No array serial number returned, "
- "set as unknown."))
+ LOG.warning(_LW("No array serial number returned, "
+ "set as unknown."))
else:
- raise EMCVnxCLICmdError(command_get_array_serial, rc, out)
+ self._raise_cli_error(command_get_array_serial, rc, out)
return data
- @log_enter_exit
- def get_status_up_ports(self, storage_group_name):
+ def get_status_up_ports(self, storage_group_name, poll=True):
"""Function to get ports whose status are up."""
cmd_get_hba = ('storagegroup', '-list', '-gname', storage_group_name)
- out, rc = self.command_execute(*cmd_get_hba)
+ out, rc = self.command_execute(*cmd_get_hba, poll=poll)
wwns = []
if 0 == rc:
_re_hba_sp_pair = re.compile('((\w\w:){15}(\w\w)\s*' +
cmd_get_port = ('port', '-list', '-sp')
out, rc = self.command_execute(*cmd_get_port)
if 0 != rc:
- raise EMCVnxCLICmdError(cmd_get_port, rc, out)
+ self._raise_cli_error(cmd_get_port, rc, out)
for i, sp in enumerate(sps):
wwn = self.get_port_wwn(sp, portid[i], out)
if (wwn is not None) and (wwn not in wwns):
- LOG.debug('Add wwn:%(wwn)s for sg:%(sg)s.'
- % {'wwn': wwn,
- 'sg': storage_group_name})
+ LOG.debug('Add wwn:%(wwn)s for sg:%(sg)s.',
+ {'wwn': wwn,
+ 'sg': storage_group_name})
wwns.append(wwn)
+ elif 83 == rc:
+ LOG.warning(_LW("Storage Group %s is not found."),
+ storage_group_name)
else:
- raise EMCVnxCLICmdError(cmd_get_hba, rc, out)
+ self._raise_cli_error(cmd_get_hba, rc, out)
return wwns
- @log_enter_exit
def get_login_ports(self, storage_group_name, connector_wwpns):
cmd_list_hba = ('port', '-list', '-gname', storage_group_name)
if wwn:
wwns.append(wwn)
else:
- raise EMCVnxCLICmdError(cmd_list_hba, rc, out)
+ self._raise_cli_error(cmd_list_hba, rc, out)
return wwns
- @log_enter_exit
def get_port_wwn(self, sp, port_id, allports=None):
wwn = None
if allports is None:
cmd_get_port = ('port', '-list', '-sp')
out, rc = self.command_execute(*cmd_get_port)
if 0 != rc:
- raise EMCVnxCLICmdError(cmd_get_port, rc, out)
+ self._raise_cli_error(cmd_get_port, rc, out)
else:
allports = out
_re_port_wwn = re.compile('SP Name:\s*' + sp +
wwn = _obj_search.group(1).replace(':', '')[16:]
return wwn
- @log_enter_exit
def get_fc_targets(self):
fc_getport = ('port', '-list', '-sp')
out, rc = self.command_execute(*fc_getport)
if rc != 0:
- raise EMCVnxCLICmdError(fc_getport, rc, out)
+ self._raise_cli_error(fc_getport, rc, out)
fc_target_dict = {'A': [], 'B': []}
'Port ID': sp_port_id})
return fc_target_dict
- @log_enter_exit
- def get_iscsi_targets(self):
+ def get_iscsi_targets(self, poll=True):
cmd_getport = ('connection', '-getport', '-address', '-vlanid')
- out, rc = self.command_execute(*cmd_getport)
+ out, rc = self.command_execute(*cmd_getport, poll=poll)
if rc != 0:
- raise EMCVnxCLICmdError(cmd_getport, rc, out)
+ self._raise_cli_error(cmd_getport, rc, out)
iscsi_target_dict = {'A': [], 'B': []}
iscsi_spport_pat = r'(A|B)\s*' + \
return iscsi_target_dict
- @log_enter_exit
- def get_registered_spport_set(self, initiator_iqn, sgname):
- sg_list = ('storagegroup', '-list', '-gname', sgname)
- out, rc = self.command_execute(*sg_list)
+ def get_registered_spport_set(self, initiator_iqn, sgname, sg_raw_out):
spport_set = set()
- if rc == 0:
- for m_spport in re.finditer(r'\n\s+%s\s+SP\s(A|B)\s+(\d+)' %
- initiator_iqn,
- out,
- flags=re.IGNORECASE):
- spport_set.add((m_spport.group(1), int(m_spport.group(2))))
- LOG.debug('See path %(path)s in %(sg)s'
- % ({'path': m_spport.group(0),
- 'sg': sgname}))
- else:
- raise EMCVnxCLICmdError(sg_list, rc, out)
+ for m_spport in re.finditer(r'\n\s+%s\s+SP\s(A|B)\s+(\d+)' %
+ initiator_iqn,
+ sg_raw_out,
+ flags=re.IGNORECASE):
+ spport_set.add((m_spport.group(1), int(m_spport.group(2))))
+ LOG.debug('See path %(path)s in %(sg)s',
+ {'path': m_spport.group(0),
+ 'sg': sgname})
return spport_set
- @log_enter_exit
def ping_node(self, target_portal, initiator_ip):
connection_pingnode = ('connection', '-pingnode', '-sp',
target_portal['SP'], '-portid',
target_portal['Port ID'], '-vportid',
target_portal['Virtual Port ID'],
- '-address', initiator_ip)
+ '-address', initiator_ip,
+ '-count', '1')
out, rc = self.command_execute(*connection_pingnode)
if rc == 0:
ping_ok = re.compile(r'Reply from %s' % initiator_ip)
LOG.debug("See available iSCSI target: %s",
connection_pingnode)
return True
- LOG.warn(_LW("See unavailable iSCSI target: %s"), connection_pingnode)
+ LOG.warning(_LW("See unavailable iSCSI target: %s"),
+ connection_pingnode)
return False
- @log_enter_exit
def find_avaialable_iscsi_target_one(self, hostname,
preferred_sp,
- registered_spport_set):
+ registered_spport_set,
+ all_iscsi_targets):
if self.iscsi_initiator_map and hostname in self.iscsi_initiator_map:
iscsi_initiator_ips = list(self.iscsi_initiator_map[hostname])
random.shuffle(iscsi_initiator_ips)
else:
target_sps = ('B', 'A')
- iscsi_targets = self.get_iscsi_targets()
for target_sp in target_sps:
- target_portals = list(iscsi_targets[target_sp])
+ target_portals = list(all_iscsi_targets[target_sp])
random.shuffle(target_portals)
for target_portal in target_portals:
spport = (target_portal['SP'], target_portal['Port ID'])
if spport not in registered_spport_set:
LOG.debug("Skip SP Port %(port)s since "
- "no path from %(host)s is through it"
- % {'port': spport,
- 'host': hostname})
+ "no path from %(host)s is through it",
+ {'port': spport,
+ 'host': hostname})
continue
if iscsi_initiator_ips is not None:
for initiator_ip in iscsi_initiator_ips:
return target_portal
else:
LOG.debug("No iSCSI IP address of %(hostname)s is known. "
- "Return a random iSCSI target portal %(portal)s."
- %
- {'hostname': hostname, 'portal': target_portal})
+ "Return a random target portal %(portal)s.",
+ {'hostname': hostname,
+ 'portal': target_portal})
return target_portal
return None
def _is_sp_unavailable_error(self, out):
error_pattern = '(^Error.*Message.*End of data stream.*)|'\
'(.*Message.*connection refused.*)|'\
- '(^Error.*Message.*Service Unavailable.*)'
+ '(^Error.*Message.*Service Unavailable.*)|'\
+ '(^A network error occurred while trying to'\
+ ' connect.* )|'\
+ '(^Exception: Error occurred because of time out\s*)'
pattern = re.compile(error_pattern)
return pattern.match(out)
- @log_enter_exit
def command_execute(self, *command, **kwargv):
+ """Executes command against the VNX array.
+
+ When there is named parameter poll=False, the command will be sent
+ alone with option -np.
+ """
# NOTE: retry_disable need to be removed from kwargv
# before it pass to utils.execute, otherwise exception will thrown
retry_disable = kwargv.pop('retry_disable', False)
- if self._is_sp_alive(self.active_storage_ip):
- out, rc = self._command_execute_on_active_ip(*command, **kwargv)
- if not retry_disable and self._is_sp_unavailable_error(out):
- # When active sp is unavailble, swith to another sp
- # and set it to active
- if self._toggle_sp():
- LOG.debug('EMC: Command Exception: %(rc) %(result)s. '
- 'Retry on another SP.' % {'rc': rc,
- 'result': out})
- out, rc = self._command_execute_on_active_ip(*command,
- **kwargv)
- elif self._toggle_sp() and not retry_disable:
- # If active ip is not accessible, toggled to another sp
- out, rc = self._command_execute_on_active_ip(*command, **kwargv)
- else:
- # Active IP is inaccessible, and cannot toggle to another SP,
- # return Error
- out, rc = "Server Unavailable", 255
-
- LOG.debug('EMC: Command: %(command)s.'
- % {'command': self.command + command})
- LOG.debug('EMC: Command Result: %(result)s.' %
- {'result': out.replace('\n', '\\n')})
+ out, rc = self._command_execute_on_active_ip(*command, **kwargv)
+ if not retry_disable and self._is_sp_unavailable_error(out):
+ # When active sp is unavailble, swith to another sp
+ # and set it to active and force a poll
+ if self._toggle_sp():
+ LOG.debug('EMC: Command Exception: %(rc) %(result)s. '
+ 'Retry on another SP.', {'rc': rc,
+ 'result': out})
+ kwargv['poll'] = True
+ out, rc = self._command_execute_on_active_ip(*command,
+ **kwargv)
return out, rc
kwargv["check_exit_code"] = True
rc = 0
out = ""
+ need_poll = kwargv.pop('poll', True)
+ if "-np" not in command and not need_poll:
+ command = ("-np",) + command
+
try:
active_ip = (self.active_storage_ip,)
out, err = utils.execute(
rc = pe.exit_code
out = pe.stdout
out = out.replace('\n', '\\n')
+
+ LOG.debug('EMC: Command: %(command)s. Result: %(result)s.',
+ {'command': self.command + active_ip + command,
+ 'result': out.replace('\n', '\\n')})
+
return out, rc
def _is_sp_alive(self, ipaddr):
out = pe.stdout
rc = pe.exit_code
if rc != 0:
- LOG.debug('%s is unavaialbe' % ipaddr)
+ LOG.debug('%s is unavaialbe', ipaddr)
return False
- LOG.debug('Ping SP %(spip)s Command Result: %(result)s.' %
+ LOG.debug('Ping SP %(spip)s Command Result: %(result)s.',
{'spip': self.active_storage_ip, 'result': out})
return True
self.primary_storage_ip
LOG.info(_LI('Toggle storage_vnx_ip_address from %(old)s to '
- '%(new)s.') %
+ '%(new)s.'),
{'old': old_ip,
- 'new': self.primary_storage_ip})
+ 'new': self.active_storage_ip})
return True
- @log_enter_exit
- def get_enablers_on_array(self, no_poll=False):
+ def get_enablers_on_array(self, poll=False):
"""The function would get all the enabler installed
on array.
"""
enablers = []
- cmd_list = ('-np', 'ndu', '-list') \
- if no_poll else ('ndu', '-list')
- out, rc = self.command_execute(*cmd_list)
+ cmd_list = ('ndu', '-list')
+ out, rc = self.command_execute(*cmd_list, poll=poll)
if rc != 0:
- raise EMCVnxCLICmdError(cmd_list, rc, out)
+ self._raise_cli_error(cmd_list, rc, out)
else:
enabler_pat = r'Name of the software package:\s*(\S+)\s*'
for m in re.finditer(enabler_pat, out):
enablers.append(m.groups()[0])
- LOG.debug('Enablers on array %s.' % enablers)
+ LOG.debug('Enablers on array %s.', enablers)
return enablers
- @log_enter_exit
def enable_or_disable_compression_on_lun(self, volumename, compression):
"""The function will enable or disable the compression
on lun
out, rc = self.command_execute(*command_compression_cmd)
if 0 != rc:
- raise EMCVnxCLICmdError(command_compression_cmd, rc, out)
+ self._raise_cli_error(command_compression_cmd, rc, out)
return rc, out
+ def deregister_initiator(self, initiator_uid):
+ """This function tries to deregister initiators on VNX."""
+ command_deregister = ('port', '-removeHBA',
+ '-hbauid', initiator_uid,
+ '-o')
+ out, rc = self.command_execute(*command_deregister)
+ return rc, out
+ def is_pool_fastcache_enabled(self, storage_pool, poll=False):
+ command_check_fastcache = ('storagepool', '-list', '-name',
+ storage_pool, '-fastcache')
+ out, rc = self.command_execute(*command_check_fastcache, poll=poll)
+
+ if 0 != rc:
+ self._raise_cli_error(command_check_fastcache, rc, out)
+ else:
+ re_fastcache = 'FAST Cache:\s*(.*)\s*'
+ m = re.search(re_fastcache, out)
+ if m is not None:
+ result = True if 'Enabled' == m.group(1) else False
+ else:
+ LOG.error(_LE("Error parsing output for FastCache Command."))
+ return result
+
+
+@decorate_all_methods(log_enter_exit)
class EMCVnxCliBase(object):
"""This class defines the functions to use the native CLI functionality."""
- VERSION = '04.01.00'
+ VERSION = '05.00.00'
stats = {'driver_version': VERSION,
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
def __init__(self, prtcl, configuration=None):
self.protocol = prtcl
self.configuration = configuration
- self.timeout = self.configuration.default_timeout * 60
self.max_luns_per_sg = self.configuration.max_luns_per_storage_group
self.destroy_empty_sg = self.configuration.destroy_empty_storage_group
self.itor_auto_reg = self.configuration.initiator_auto_registration
+ self.itor_auto_dereg = self.configuration.initiator_auto_deregistration
+ self.check_max_pool_luns_threshold = (
+ self.configuration.check_max_pool_luns_threshold)
# if zoning_mode is fabric, use lookup service to build itor_tgt_map
self.zonemanager_lookup_service = None
zm_conf = Configuration(manager.volume_manager_opts)
FCSanLookupService(configuration=configuration)
self.max_retries = 5
if self.destroy_empty_sg:
- LOG.warn(_LW("destroy_empty_storage_group: True. "
- "Empty storage group will be deleted "
- "after volume is detached."))
+ LOG.warning(_LW("destroy_empty_storage_group: True. "
+ "Empty storage group will be deleted "
+ "after volume is detached."))
if not self.itor_auto_reg:
LOG.info(_LI("initiator_auto_registration: False. "
"Initiator auto registration is not enabled. "
self.hlu_set = set(xrange(1, self.max_luns_per_sg + 1))
self._client = CommandLineHelper(self.configuration)
self.array_serial = None
+ if self.protocol == 'iSCSI':
+ self.iscsi_targets = self._client.get_iscsi_targets(poll=True)
+ self.hlu_cache = {}
+ self.force_delete_lun_in_sg = (
+ self.configuration.force_delete_lun_in_storagegroup)
+ if self.force_delete_lun_in_sg:
+ LOG.warning(_LW("force_delete_lun_in_storagegroup=True"))
def get_target_storagepool(self, volume, source_volume_name=None):
raise NotImplementedError
self.array_serial = self._client.get_array_serial()
return self.array_serial['array_serial']
- @log_enter_exit
+ def _construct_store_spec(self, volume, snapshot):
+ if snapshot['cgsnapshot_id']:
+ snapshot_name = snapshot['cgsnapshot_id']
+ else:
+ snapshot_name = snapshot['name']
+ source_volume_name = snapshot['volume_name']
+ volume_name = volume['name']
+ volume_size = snapshot['volume_size']
+ dest_volume_name = volume_name + '_dest'
+
+ pool_name = self.get_target_storagepool(volume, source_volume_name)
+ specs = self.get_volumetype_extraspecs(volume)
+ provisioning, tiering = self._get_extra_spec_value(specs)
+ store_spec = {
+ 'source_vol_name': source_volume_name,
+ 'volume': volume,
+ 'snap_name': snapshot_name,
+ 'dest_vol_name': dest_volume_name,
+ 'pool_name': pool_name,
+ 'provisioning': provisioning,
+ 'tiering': tiering,
+ 'volume_size': volume_size,
+ 'client': self._client
+ }
+ return store_spec
+
def create_volume(self, volume):
"""Creates a EMC volume."""
- volumesize = volume['size']
- volumename = volume['name']
+ volume_size = volume['size']
+ volume_name = volume['name']
self._volume_creation_check(volume)
# defining CLI command
specs = self.get_volumetype_extraspecs(volume)
pool = self.get_target_storagepool(volume)
- provisioning, tiering = self.get_extra_spec_value(specs)
+ provisioning, tiering = self._get_extra_spec_value(specs)
if not provisioning:
provisioning = 'thick'
LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s '
'pool: %(pool)s '
'provisioning: %(provisioning)s '
- 'tiering: %(tiering)s.')
- % {'volume': volumename,
- 'size': volumesize,
- 'pool': pool,
- 'provisioning': provisioning,
- 'tiering': tiering})
+ 'tiering: %(tiering)s.'),
+ {'volume': volume_name,
+ 'size': volume_size,
+ 'pool': pool,
+ 'provisioning': provisioning,
+ 'tiering': tiering})
data = self._client.create_lun_with_advance_feature(
- pool, volumename, volumesize,
- provisioning, tiering, volume['consistencygroup_id'])
+ pool, volume_name, volume_size,
+ provisioning, tiering, volume['consistencygroup_id'], False)
pl_dict = {'system': self.get_array_serial(),
'type': 'lun',
'id': str(data['lun_id'])}
"""
specs = self.get_volumetype_extraspecs(volume)
- provisioning, tiering = self.get_extra_spec_value(specs)
+ provisioning, tiering = self._get_extra_spec_value(specs)
# step 1: check extra spec value
if provisioning:
- self.check_extra_spec_value(
+ self._check_extra_spec_value(
provisioning,
self._client.provisioning_values.keys())
if tiering:
- self.check_extra_spec_value(
+ self._check_extra_spec_value(
tiering,
self._client.tiering_values.keys())
# step 2: check extra spec combination
- self.check_extra_spec_combination(specs)
+ self._check_extra_spec_combination(specs)
- def check_extra_spec_value(self, extra_spec, valid_values):
- """check whether an extra spec's value is valid."""
+ def _check_extra_spec_value(self, extra_spec, valid_values):
+ """Checks whether an extra spec's value is valid."""
if not extra_spec or not valid_values:
LOG.error(_LE('The given extra_spec or valid_values is None.'))
raise exception.VolumeBackendAPIException(data=msg)
return
- def get_extra_spec_value(self, extra_specs):
- """get EMC extra spec values."""
+ def _get_extra_spec_value(self, extra_specs):
+ """Gets EMC extra spec values."""
provisioning = 'thick'
tiering = None
return provisioning, tiering
- def check_extra_spec_combination(self, extra_specs):
- """check whether extra spec combination is valid."""
+ def _check_extra_spec_combination(self, extra_specs):
+ """Checks whether extra spec combination is valid."""
- provisioning, tiering = self.get_extra_spec_value(extra_specs)
+ provisioning, tiering = self._get_extra_spec_value(extra_specs)
enablers = self.enablers
# check provisioning and tiering
raise exception.VolumeBackendAPIException(data=msg)
return
- @log_enter_exit
def delete_volume(self, volume):
"""Deletes an EMC volume."""
- self._client.delete_lun(volume['name'])
+ try:
+ self._client.delete_lun(volume['name'])
+ except EMCVnxCLICmdError as ex:
+ orig_out = "\n".join(ex.kwargs["out"])
+ if (self.force_delete_lun_in_sg and
+ (self._client.CLI_RESP_PATTERN_LUN_IN_SG_1 in orig_out or
+ self._client.CLI_RESP_PATTERN_LUN_IN_SG_2 in orig_out)):
+ LOG.warning(_LW('LUN corresponding to %s is still '
+ 'in some Storage Groups.'
+ 'Try to bring the LUN out of Storage Groups '
+ 'and retry the deletion.'),
+ volume['name'])
+ lun_id = self.get_lun_id(volume)
+ for hlu, sg in self._client.get_hlus(lun_id):
+ self._client.remove_hlu_from_storagegroup(hlu, sg)
+ self._client.delete_lun(volume['name'])
+ else:
+ with excutils.save_and_reraise_exception():
+ # Reraise the original exceiption
+ pass
- @log_enter_exit
def extend_volume(self, volume, new_size):
"""Extends an EMC volume."""
self._client.expand_lun_and_wait(volume['name'], new_size)
false_ret = (False, None)
if 'location_info' not in host['capabilities']:
- LOG.warn(_LW("Failed to get target_pool_name and "
- "target_array_serial. 'location_info' "
- "is not in host['capabilities']."))
+ LOG.warning(_LW("Failed to get target_pool_name and "
+ "target_array_serial. 'location_info' "
+ "is not in host['capabilities']."))
return false_ret
# mandatory info should be ok
info = host['capabilities']['location_info']
- LOG.debug("Host for migration is %s." % info)
+ LOG.debug("Host for migration is %s.", info)
try:
info_detail = info.split('|')
target_pool_name = info_detail[0]
target_array_serial = info_detail[1]
except AttributeError:
- LOG.warn(_LW("Error on parsing target_pool_name/"
- "target_array_serial."))
+ LOG.warning(_LW("Error on parsing target_pool_name/"
+ "target_array_serial."))
return false_ret
if len(target_pool_name) == 0:
"it doesn't support array backend .")
return false_ret
# source and destination should be on same array
- array_serial = self._client.get_array_serial()
- if target_array_serial != array_serial['array_serial']:
+ array_serial = self.get_array_serial()
+ if target_array_serial != array_serial:
LOG.debug('Skip storage-assisted migration because '
'target and source backend are not managing'
'the same array.')
and self._get_original_status(volume) == 'in-use':
LOG.debug('Skip storage-assisted migration because '
'in-use volume can not be '
- 'migrate between diff protocol.')
+ 'migrate between different protocols.')
return false_ret
return (True, target_pool_name)
- @log_enter_exit
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Leverage the VNX on-array migration functionality.
provisioning = 'thick'
tiering = None
if new_type:
- provisioning, tiering = self.get_extra_spec_value(
+ provisioning, tiering = self._get_extra_spec_value(
new_type['extra_specs'])
else:
- provisioning, tiering = self.get_extra_spec_value(
+ provisioning, tiering = self._get_extra_spec_value(
self.get_volumetype_extraspecs(volume))
- self._client.create_lun_with_advance_feature(
+ data = self._client.create_lun_with_advance_feature(
target_pool_name, new_volume_name, volume['size'],
provisioning, tiering)
- dst_id = self.get_lun_id_by_name(new_volume_name)
+ dst_id = data['lun_id']
moved = self._client.migrate_lun_with_verification(
src_id, dst_id, new_volume_name)
return moved, {}
- @log_enter_exit
def retype(self, ctxt, volume, new_type, diff, host):
new_specs = new_type['extra_specs']
- new_provisioning, new_tiering = self.get_extra_spec_value(
+ new_provisioning, new_tiering = self._get_extra_spec_value(
new_specs)
# validate new_type
if new_provisioning:
- self.check_extra_spec_value(
+ self._check_extra_spec_value(
new_provisioning,
self._client.provisioning_values.keys())
if new_tiering:
- self.check_extra_spec_value(
+ self._check_extra_spec_value(
new_tiering,
self._client.tiering_values.keys())
- self.check_extra_spec_combination(new_specs)
+ self._check_extra_spec_combination(new_specs)
# check what changes are needed
migration, tiering_change = self.determine_changes_when_retype(
volume, target_pool_name, new_type)[0]:
return True
else:
- LOG.warn(_LW('Storage-assisted migration failed during '
- 'retype.'))
+ LOG.warning(_LW('Storage-assisted migration failed during '
+ 'retype.'))
return False
else:
# migration is invalid
LOG.debug('Driver is not able to do retype due to '
'storage-assisted migration is not valid '
- 'in this stuation.')
+ 'in this situation.')
return False
elif not migration and tiering_change:
# modify lun to change tiering policy
tiering_change = False
old_specs = self.get_volumetype_extraspecs(volume)
- old_provisioning, old_tiering = self.get_extra_spec_value(
+ old_provisioning, old_tiering = self._get_extra_spec_value(
old_specs)
old_pool = self.get_specific_extra_spec(
old_specs,
self._client.pool_spec)
new_specs = new_type['extra_specs']
- new_provisioning, new_tiering = self.get_extra_spec_value(
+ new_provisioning, new_tiering = self._get_extra_spec_value(
new_specs)
new_pool = self.get_specific_extra_spec(
new_specs,
return False
return True
- @log_enter_exit
def update_volume_stats(self):
"""Update the common status share with pool and
array backend.
"""
if not self.determine_all_enablers_exist(self.enablers):
- self.enablers = self._client.get_enablers_on_array(NO_POLL)
+ self.enablers = self._client.get_enablers_on_array()
if '-Compression' in self.enablers:
self.stats['compression_support'] = 'True'
else:
else:
self.stats['consistencygroup_support'] = 'False'
- return self.stats
-
- @log_enter_exit
- def create_export(self, context, volume):
- """Driver entry point to get the export info for a new volume."""
- volumename = volume['name']
-
- data = self._client.get_lun_by_name(volumename)
-
- device_id = data['lun_id']
-
- LOG.debug('Exiting EMCVnxCliBase.create_export: Volume: %(volume)s '
- 'Device ID: %(device_id)s'
- % {'volume': volumename,
- 'device_id': device_id})
+ if self.protocol == 'iSCSI':
+ self.iscsi_targets = self._client.get_iscsi_targets(poll=False)
- return {'provider_location': device_id}
+ return self.stats
- @log_enter_exit
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
- snapshotname = snapshot['name']
- volumename = snapshot['volume_name']
-
- LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s')
- % {'snapshot': snapshotname,
- 'volume': volumename})
-
- self._client.create_snapshot(volumename, snapshotname)
+ snapshot_name = snapshot['name']
+ volume_name = snapshot['volume_name']
+ volume = snapshot['volume']
+ LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'),
+ {'snapshot': snapshot_name,
+ 'volume': volume_name})
+ lun_id = self.get_lun_id(volume)
+ self._client.create_snapshot(lun_id, snapshot_name)
- @log_enter_exit
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
- snapshotname = snapshot['name']
+ snapshot_name = snapshot['name']
- LOG.info(_LI('Delete Snapshot: %(snapshot)s')
- % {'snapshot': snapshotname})
+ LOG.info(_LI('Delete Snapshot: %(snapshot)s'),
+ {'snapshot': snapshot_name})
- self._client.delete_snapshot(snapshotname)
+ self._client.delete_snapshot(snapshot_name)
- @log_enter_exit
def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
- if snapshot['cgsnapshot_id']:
- snapshot_name = snapshot['cgsnapshot_id']
- else:
- snapshot_name = snapshot['name']
- source_volume_name = snapshot['volume_name']
- volume_name = volume['name']
- volume_size = snapshot['volume_size']
-
- # defining CLI command
- self._client.create_mount_point(source_volume_name, volume_name)
-
- # defining CLI command
- self._client.attach_mount_point(volume_name, snapshot_name)
-
- dest_volume_name = volume_name + '_dest'
-
- LOG.debug('Creating Temporary Volume: %s ' % dest_volume_name)
- pool_name = self.get_target_storagepool(volume, source_volume_name)
- try:
- self._volume_creation_check(volume)
- specs = self.get_volumetype_extraspecs(volume)
- provisioning, tiering = self.get_extra_spec_value(specs)
- self._client.create_lun_with_advance_feature(
- pool_name, dest_volume_name, volume_size,
- provisioning, tiering)
- except exception.VolumeBackendAPIException as ex:
- msg = (_('Command to create the temporary Volume %s failed')
- % dest_volume_name)
- LOG.error(msg)
- raise ex
+ """Constructs a work flow to create a volume from snapshot.
- source_vol_lun_id = self.get_lun_id(volume)
- temp_vol_lun_id = self.get_lun_id_by_name(dest_volume_name)
+ This flow will do the following:
- LOG.debug('Migrating Mount Point Volume: %s ' % volume_name)
- self._client.migrate_lun_with_verification(source_vol_lun_id,
- temp_vol_lun_id,
- dest_volume_name)
- self._client.check_smp_not_attached(volume_name)
- data = self._client.get_lun_by_name(volume_name)
- pl_dict = {'system': self.get_array_serial(),
+ 1. Create a snap mount point (SMP) for the snapshot.
+ 2. Attach the snapshot to the SMP created in the first step.
+ 3. Create a temporary lun prepare for migration.
+ 4. Start a migration between the SMP and the temp lun.
+ """
+ self._volume_creation_check(volume)
+ array_serial = self.get_array_serial()
+ flow_name = 'create_volume_from_snapshot'
+ work_flow = linear_flow.Flow(flow_name)
+ store_spec = self._construct_store_spec(volume, snapshot)
+ work_flow.add(CreateSMPTask(),
+ AttachSnapTask(),
+ CreateDestLunTask(),
+ MigrateLunTask())
+ flow_engine = taskflow.engines.load(work_flow,
+ store=store_spec)
+ flow_engine.run()
+ new_lun_id = flow_engine.storage.fetch('new_lun_id')
+ pl_dict = {'system': array_serial,
'type': 'lun',
- 'id': str(data['lun_id'])}
+ 'id': str(new_lun_id)}
model_update = {'provider_location':
self.dumps_provider_location(pl_dict)}
volume['provider_location'] = model_update['provider_location']
return model_update
- @log_enter_exit
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
+ self._volume_creation_check(volume)
+ array_serial = self.get_array_serial()
source_volume_name = src_vref['name']
+ source_lun_id = self.get_lun_id(src_vref)
volume_size = src_vref['size']
consistencygroup_id = src_vref['consistencygroup_id']
snapshot_name = 'tmp-snap-%s' % volume['id']
'name': snapshot_name,
'volume_name': source_volume_name,
'volume_size': volume_size,
+ 'volume': src_vref,
'cgsnapshot_id': tmp_cgsnapshot_name,
'consistencygroup_id': consistencygroup_id,
'id': tmp_cgsnapshot_name
}
- # Create temp Snapshot
- if consistencygroup_id:
- self._client.create_cgsnapshot(snapshot)
- else:
- self.create_snapshot(snapshot)
-
- # Create volume
- model_update = self.create_volume_from_snapshot(volume, snapshot)
+ store_spec = self._construct_store_spec(volume, snapshot)
+ flow_name = 'create_cloned_volume'
+ work_flow = linear_flow.Flow(flow_name)
+ store_spec.update({'snapshot': snapshot})
+ store_spec.update({'source_lun_id': source_lun_id})
+ work_flow.add(CreateSnapshotTask(),
+ CreateSMPTask(),
+ AttachSnapTask(),
+ CreateDestLunTask(),
+ MigrateLunTask())
+ flow_engine = taskflow.engines.load(work_flow,
+ store=store_spec)
+ flow_engine.run()
+ new_lun_id = flow_engine.storage.fetch('new_lun_id')
# Delete temp Snapshot
if consistencygroup_id:
self._client.delete_cgsnapshot(snapshot)
else:
self.delete_snapshot(snapshot)
+
+ pl_dict = {'system': array_serial,
+ 'type': 'lun',
+ 'id': str(new_lun_id)}
+ model_update = {'provider_location':
+ self.dumps_provider_location(pl_dict)}
return model_update
- @log_enter_exit
def create_consistencygroup(self, context, group):
- """Create a consistency group."""
+ """Creates a consistency group."""
LOG.info(_LI('Start to create consistency group: %(group_name)s '
- 'id: %(id)s') %
+ 'id: %(id)s'),
{'group_name': group['name'], 'id': group['id']})
model_update = {'status': 'available'}
self._client.create_consistencygroup(context, group)
except Exception:
with excutils.save_and_reraise_exception():
- msg = (_('Create consistency group %s failed.')
- % group['id'])
- LOG.error(msg)
+ LOG.error(_LE('Create consistency group %s failed.'),
+ group['id'])
return model_update
- @log_enter_exit
def delete_consistencygroup(self, driver, context, group):
- """Delete a consistency group."""
+ """Deletes a consistency group."""
cg_name = group['id']
volumes = driver.db.volume_get_all_by_group(context, group['id'])
model_update = {}
model_update['status'] = group['status']
- LOG.info(_LI('Start to delete consistency group: %(cg_name)s')
- % {'cg_name': cg_name})
+ LOG.info(_LI('Start to delete consistency group: %(cg_name)s'),
+ {'cg_name': cg_name})
try:
self._client.delete_consistencygroup(cg_name)
except Exception:
return model_update, volumes
- @log_enter_exit
def create_cgsnapshot(self, driver, context, cgsnapshot):
- """Create a cgsnapshot (snap group)."""
+ """Creates a cgsnapshot (snap group)."""
cgsnapshot_id = cgsnapshot['id']
snapshots = driver.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot_id)
model_update = {}
LOG.info(_LI('Start to create cgsnapshot for consistency group'
- ': %(group_name)s') %
+ ': %(group_name)s'),
{'group_name': cgsnapshot['consistencygroup_id']})
try:
snapshot['status'] = 'available'
except Exception:
with excutils.save_and_reraise_exception():
- msg = (_('Create cg snapshot %s failed.')
- % cgsnapshot_id)
- LOG.error(msg)
+ LOG.error(_LE('Create cg snapshot %s failed.'),
+ cgsnapshot_id)
model_update['status'] = 'available'
return model_update, snapshots
- @log_enter_exit
def delete_cgsnapshot(self, driver, context, cgsnapshot):
- """delete a cgsnapshot (snap group)."""
+ """Deletes a cgsnapshot (snap group)."""
cgsnapshot_id = cgsnapshot['id']
snapshots = driver.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot_id)
model_update = {}
model_update['status'] = cgsnapshot['status']
LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
- '%(group_name)s') % {'snap_name': cgsnapshot['id'],
+ '%(group_name)s'), {'snap_name': cgsnapshot['id'],
'group_name': cgsnapshot['consistencygroup_id']})
try:
snapshot['status'] = 'deleted'
except Exception:
with excutils.save_and_reraise_exception():
- msg = (_('Delete cgsnapshot %s failed.')
- % cgsnapshot_id)
- LOG.error(msg)
+ LOG.error(_LE('Delete cgsnapshot %s failed.'),
+ cgsnapshot_id)
return model_update, snapshots
'query it.')
lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
except Exception as ex:
- LOG.debug('Exception when getting lun id: %s.' % (ex))
+ LOG.debug('Exception when getting lun id: %s.', six.text_type(ex))
lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
- LOG.debug('Get lun_id: %s.' % (lun_id))
+ LOG.debug('Get lun_id: %s.', lun_id)
return lun_id
def get_lun_map(self, storage_group):
return data['storage_group_uid']
def assure_storage_group(self, storage_group):
- try:
- self._client.create_storage_group(storage_group)
- except EMCVnxCLICmdError as ex:
- if ex.out.find("Storage Group name already in use") == -1:
- raise ex
+ self._client.create_storage_group(storage_group)
def assure_host_in_storage_group(self, hostname, storage_group):
try:
self._client.connect_host_to_storage_group(hostname, storage_group)
except EMCVnxCLICmdError as ex:
- if ex.rc == 83:
+ if ex.kwargs["rc"] == 83:
# SG was not created or was destroyed by another concurrent
# operation before connected.
# Create SG and try to connect again
- LOG.warn(_LW('Storage Group %s is not found. Create it.'),
- storage_group)
+ LOG.warning(_LW('Storage Group %s is not found. Create it.'),
+ storage_group)
self.assure_storage_group(storage_group)
self._client.connect_host_to_storage_group(
hostname, storage_group)
raise ex
return hostname
- def find_device_details(self, volume, storage_group):
- """Returns the Host Device number for the volume."""
-
- host_lun_id = -1
-
- data = self._client.get_storage_group(storage_group)
- lun_map = data['lunmap']
- data = self._client.get_lun_by_name(volume['name'])
- allocated_lun_id = data['lun_id']
+ def get_lun_owner(self, volume):
+ """Returns SP owner of the volume."""
+ data = self._client.get_lun_by_name(volume['name'],
+ poll=False)
owner_sp = data['owner']
-
- for lun in lun_map.iterkeys():
- if lun == int(allocated_lun_id):
- host_lun_id = lun_map[lun]
- LOG.debug('Host Lun Id : %s' % (host_lun_id))
- break
-
- LOG.debug('Owner SP : %s' % (owner_sp))
-
- device = {
- 'hostlunid': host_lun_id,
- 'ownersp': owner_sp,
- 'lunmap': lun_map,
- }
- return device
+ LOG.debug('Owner SP : %s', owner_sp)
+ return owner_sp
def filter_available_hlu_set(self, used_hlus):
used_hlu_set = set(used_hlus)
'-spport', port_id, '-spvport', vport_id,
'-ip', ip, '-host', host, '-o')
out, rc = self._client.command_execute(*cmd_iscsi_setpath)
- if rc != 0:
- raise EMCVnxCLICmdError(cmd_iscsi_setpath, rc, out)
else:
cmd_fc_setpath = ('storagegroup', '-gname', gname, '-setpath',
'-hbauid', initiator_uid, '-sp', sp,
'-spport', port_id,
'-ip', ip, '-host', host, '-o')
out, rc = self._client.command_execute(*cmd_fc_setpath)
- if rc != 0:
- raise EMCVnxCLICmdError(cmd_fc_setpath, rc, out)
+ if rc != 0:
+ LOG.warning(_LW("Failed to register %(itor)s to SP%(sp)s "
+ "port %(portid)s because: %(msg)s."),
+ {'itor': initiator_uid,
+ 'sp': sp,
+ 'portid': port_id,
+ 'msg': out})
def _register_iscsi_initiator(self, ip, host, initiator_uids):
+ iscsi_targets = self.iscsi_targets
for initiator_uid in initiator_uids:
- iscsi_targets = self._client.get_iscsi_targets()
LOG.info(_LI('Get ISCSI targets %(tg)s to register '
- 'initiator %(in)s.')
- % ({'tg': iscsi_targets,
- 'in': initiator_uid}))
+ 'initiator %(in)s.'),
+ {'tg': iscsi_targets,
+ 'in': initiator_uid})
target_portals_SPA = list(iscsi_targets['A'])
target_portals_SPB = list(iscsi_targets['B'])
ip, host, vport_id)
def _register_fc_initiator(self, ip, host, initiator_uids):
+ fc_targets = self._client.get_fc_targets()
for initiator_uid in initiator_uids:
- fc_targets = self._client.get_fc_targets()
- LOG.info(_LI('Get FC targets %(tg)s to register initiator %(in)s.')
- % ({'tg': fc_targets,
- 'in': initiator_uid}))
+ LOG.info(_LI('Get FC targets %(tg)s to register '
+ 'initiator %(in)s.'),
+ {'tg': fc_targets,
+ 'in': initiator_uid})
target_portals_SPA = list(fc_targets['A'])
target_portals_SPB = list(fc_targets['B'])
self._exec_command_setpath(initiator_uid, sp, port_id,
ip, host)
- def _filter_unregistered_initiators(self, initiator_uids=tuple()):
+ def _deregister_initiators(self, connector):
+ initiator_uids = []
+ try:
+ if self.protocol == 'iSCSI':
+ initiator_uids = self._extract_iscsi_uids(connector)
+ elif self.protocol == 'FC':
+ initiator_uids = self._extract_fc_uids(connector)
+ except exception.VolumeBackendAPIException:
+ LOG.warning(_LW("Failed to extract initiators of %s, so ignore "
+ "deregistration operation."),
+ connector['host'])
+ if initiator_uids:
+ for initiator_uid in initiator_uids:
+ rc, out = self._client.deregister_initiator(initiator_uid)
+ if rc != 0:
+ LOG.warning(_LW("Failed to deregister %(itor)s "
+ "because: %(msg)s."),
+ {'itor': initiator_uid,
+ 'msg': out})
+
+ def _filter_unregistered_initiators(self, initiator_uids, sgdata):
unregistered_initiators = []
if not initiator_uids:
return unregistered_initiators
- command_get_storage_group = ('storagegroup', '-list')
- out, rc = self._client.command_execute(*command_get_storage_group)
-
- if rc != 0:
- raise EMCVnxCLICmdError(command_get_storage_group, rc, out)
+ out = sgdata['raw_output']
for initiator_uid in initiator_uids:
m = re.search(initiator_uid, out)
unregistered_initiators.append(initiator_uid)
return unregistered_initiators
- def auto_register_initiator(self, connector):
- """Automatically register available initiators."""
+ def auto_register_initiator(self, connector, sgdata):
+ """Automatically registers available initiators.
+
+ Returns True if has registered initiator otherwise returns False.
+ """
initiator_uids = []
ip = connector['ip']
host = connector['host']
if self.protocol == 'iSCSI':
initiator_uids = self._extract_iscsi_uids(connector)
- itors_toReg = self._filter_unregistered_initiators(initiator_uids)
- LOG.debug('iSCSI Initiators %(in)s of %(ins)s need registration.'
- % ({'in': itors_toReg,
- 'ins': initiator_uids}))
- if not itors_toReg:
- LOG.debug('Initiators %s are already registered'
- % initiator_uids)
- return
+ if sgdata is not None:
+ itors_toReg = self._filter_unregistered_initiators(
+ initiator_uids,
+ sgdata)
+ else:
+ itors_toReg = initiator_uids
+
+ if len(itors_toReg) == 0:
+ return False
+
+ LOG.info(_LI('iSCSI Initiators %(in)s of %(ins)s '
+ 'need registration.'),
+ {'in': itors_toReg,
+ 'ins': initiator_uids})
self._register_iscsi_initiator(ip, host, itors_toReg)
+ return True
elif self.protocol == 'FC':
initiator_uids = self._extract_fc_uids(connector)
- itors_toReg = self._filter_unregistered_initiators(initiator_uids)
- LOG.debug('FC Initiators %(in)s of %(ins)s need registration.'
- % ({'in': itors_toReg,
- 'ins': initiator_uids}))
- if not itors_toReg:
- LOG.debug('Initiators %s are already registered.'
- % initiator_uids)
- return
+ if sgdata is not None:
+ itors_toReg = self._filter_unregistered_initiators(
+ initiator_uids,
+ sgdata)
+ else:
+ itors_toReg = initiator_uids
+
+ if len(itors_toReg) == 0:
+ return False
+
+ LOG.info(_LI('FC Initiators %(in)s of %(ins)s need registration'),
+ {'in': itors_toReg,
+ 'ins': initiator_uids})
self._register_fc_initiator(ip, host, itors_toReg)
+ return True
- def assure_host_access(self, volumename, connector):
+ def assure_host_access(self, volume, connector):
hostname = connector['host']
+ volumename = volume['name']
auto_registration_done = False
try:
- self.get_storage_group_uid(hostname)
+ sgdata = self._client.get_storage_group(hostname,
+ poll=False)
except EMCVnxCLICmdError as ex:
- if ex.rc != 83:
+ if ex.kwargs["rc"] != 83:
raise ex
# Storage Group has not existed yet
self.assure_storage_group(hostname)
if self.itor_auto_reg:
- self.auto_register_initiator(connector)
+ self.auto_register_initiator(connector, None)
auto_registration_done = True
else:
self._client.connect_host_to_storage_group(hostname, hostname)
+ sgdata = self._client.get_storage_group(hostname,
+ poll=True)
+
if self.itor_auto_reg and not auto_registration_done:
- self.auto_register_initiator(connector)
- auto_registration_done = True
-
- lun_id = self.get_lun_id_by_name(volumename)
- lun_map = self.get_lun_map(hostname)
- if lun_id in lun_map:
- return lun_map[lun_id]
- used_hlus = lun_map.values()
- if len(used_hlus) >= self.max_luns_per_sg:
- msg = (_('Reach limitation set by configuration '
- 'option max_luns_per_storage_group. '
- 'Operation to add %(vol)s into '
- 'Storage Group %(sg)s is rejected.')
- % {'vol': volumename, 'sg': hostname})
- LOG.error(msg)
- raise exception.VolumeBackendAPIException(data=msg)
+ new_registerred = self.auto_register_initiator(connector, sgdata)
+ if new_registerred:
+ sgdata = self._client.get_storage_group(hostname,
+ poll=True)
+
+ lun_id = self.get_lun_id(volume)
+ tried = 0
+ while tried < self.max_retries:
+ tried += 1
+ lun_map = sgdata['lunmap']
+ used_hlus = lun_map.values()
+ candidate_hlus = self.filter_available_hlu_set(used_hlus)
+ candidate_hlus = list(candidate_hlus)
+
+ if len(candidate_hlus) != 0:
+ hlu = candidate_hlus[random.randint(0,
+ len(candidate_hlus) - 1)]
+ try:
+ self._client.add_hlu_to_storage_group(
+ hlu,
+ lun_id,
+ hostname)
+
+ if hostname not in self.hlu_cache:
+ self.hlu_cache[hostname] = {}
+ self.hlu_cache[hostname][lun_id] = hlu
+ return hlu, sgdata
+ except EMCVnxCLICmdError as ex:
+ LOG.debug("Add HLU to storagegroup failed, retry %s",
+ tried)
+ elif tried == 1:
+ # The first try didn't get the in time data,
+ # so we need a retry
+ LOG.debug("Did not find candidate HLUs, retry %s",
+ tried)
+ else:
+ msg = (_('Reach limitation set by configuration '
+ 'option max_luns_per_storage_group. '
+ 'Operation to add %(vol)s into '
+ 'Storage Group %(sg)s is rejected.')
+ % {'vol': volumename, 'sg': hostname})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
- candidate_hlus = self.filter_available_hlu_set(used_hlus)
- candidate_hlus = list(candidate_hlus)
- random.shuffle(candidate_hlus)
- for i, hlu in enumerate(candidate_hlus):
- if i >= self.max_retries:
- break
- try:
- self._client.add_hlu_to_storage_group(
- hlu,
- lun_id,
- hostname)
- return hlu
- except EMCVnxCLICmdError as ex:
- # Retry
- continue
+ # Need a full poll to get the real in time data
+ # Query storage group with poll for retry
+ sgdata = self._client.get_storage_group(hostname, poll=True)
+ self.hlu_cache[hostname] = sgdata['lunmap']
+ if lun_id in sgdata['lunmap']:
+ hlu = sgdata['lunmap'][lun_id]
+ return hlu, sgdata
msg = _("Failed to add %(vol)s into %(sg)s "
"after %(retries)s tries.") % \
{'vol': volumename,
'sg': hostname,
- 'retries': min(self.max_retries, len(candidate_hlus))}
+ 'retries': tried}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
- def vnx_get_iscsi_properties(self, volume, connector):
+ def vnx_get_iscsi_properties(self, volume, connector, hlu, sg_raw_output):
storage_group = connector['host']
- device_info = self.find_device_details(volume, storage_group)
- owner_sp = device_info['ownersp']
+ owner_sp = self.get_lun_owner(volume)
registered_spports = self._client.get_registered_spport_set(
connector['initiator'],
- storage_group)
+ storage_group,
+ sg_raw_output)
target = self._client.find_avaialable_iscsi_target_one(
storage_group, owner_sp,
- registered_spports)
+ registered_spports,
+ self.iscsi_targets)
properties = {'target_discovered': True,
'target_iqn': 'unknown',
'target_portal': 'unknown',
properties = {'target_discovered': True,
'target_iqn': target['Port WWN'],
'target_portal': "%s:3260" % target['IP Address'],
- 'target_lun': device_info['hostlunid']}
+ 'target_lun': hlu}
LOG.debug("iSCSI Properties: %s", properties)
auth = volume['provider_auth']
if auth:
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
else:
- LOG.error(_LE('Failed to find an available '
- 'iSCSI targets for %s.'),
+ LOG.error(_LE('Failed to find an available iSCSI targets for %s.'),
storage_group)
return properties
def vnx_get_fc_properties(self, connector, device_number):
- ports = self.get_login_ports(connector)
- return {'target_lun': device_number,
- 'target_discovered': True,
- 'target_wwn': ports}
+ fc_properties = {'target_lun': device_number,
+ 'target_dicovered': True,
+ 'target_wwn': None}
+ if self.zonemanager_lookup_service is None:
+ fc_properties['target_wwn'] = self.get_login_ports(connector)
+ else:
+ target_wwns, itor_tgt_map = self.get_initiator_target_map(
+ connector['wwpns'],
+ self.get_status_up_ports(connector))
+ fc_properties['target_wwn'] = target_wwns
+ fc_properties['initiator_target_map'] = itor_tgt_map
+ return fc_properties
- @log_enter_exit
def initialize_connection(self, volume, connector):
volume_metadata = {}
for metadata in volume['volume_admin_metadata']:
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
- LOG.debug('Volume %(vol)s Access mode is: %(access)s.'
- % {'vol': volume['name'],
- 'access': access_mode})
+ LOG.debug('Volume %(vol)s Access mode is: %(access)s.',
+ {'vol': volume['name'],
+ 'access': access_mode})
"""Initializes the connection and returns connection info."""
@lockutils.synchronized('emc-connection-' + connector['host'],
"emc-connection-", True)
def do_initialize_connection():
- device_number = self.assure_host_access(
- volume['name'], connector)
- return device_number
+ return self.assure_host_access(
+ volume, connector)
if self.protocol == 'iSCSI':
- do_initialize_connection()
- iscsi_properties = self.vnx_get_iscsi_properties(volume,
- connector)
+ (device_number, sg_data) = do_initialize_connection()
+ iscsi_properties = self.vnx_get_iscsi_properties(
+ volume,
+ connector,
+ device_number,
+ sg_data['raw_output']
+ )
iscsi_properties['access_mode'] = access_mode
data = {'driver_volume_type': 'iscsi',
'data': iscsi_properties}
elif self.protocol == 'FC':
- device_number = do_initialize_connection()
+ (device_number, sg_data) = do_initialize_connection()
fc_properties = self.vnx_get_fc_properties(connector,
device_number)
fc_properties['volume_id'] = volume['id']
return data
- @log_enter_exit
def terminate_connection(self, volume, connector):
"""Disallow connection from connector."""
-
@lockutils.synchronized('emc-connection-' + connector['host'],
"emc-connection-", True)
def do_terminate_connection():
hostname = connector['host']
volume_name = volume['name']
- try:
- lun_map = self.get_lun_map(hostname)
- except EMCVnxCLICmdError as ex:
- if ex.rc == 83:
- LOG.warn(_LW("Storage Group %s is not found. "
- "terminate_connection() is unnecessary."),
- hostname)
- return True
- try:
- lun_id = self.get_lun_id(volume)
- except EMCVnxCLICmdError as ex:
- if ex.rc == 9:
- LOG.warn(_LW("Volume %s is not found. "
- "It has probably been removed in VNX.")
- % volume_name)
-
- if lun_id in lun_map:
- self._client.remove_hlu_from_storagegroup(
- lun_map[lun_id], hostname)
+ lun_id = self.get_lun_id(volume)
+ lun_map = None
+ conn_info = None
+ if (hostname in self.hlu_cache and
+ lun_id in self.hlu_cache[hostname] and
+ not self.destroy_empty_sg and
+ not self.zonemanager_lookup_service):
+ hlu = self.hlu_cache[hostname][lun_id]
+ self._client.remove_hlu_from_storagegroup(hlu, hostname,
+ poll=True)
+ self.hlu_cache[hostname].pop(lun_id)
else:
- LOG.warn(_LW("Volume %(vol)s was not in Storage Group %(sg)s.")
- % {'vol': volume_name, 'sg': hostname})
- if self.destroy_empty_sg or self.zonemanager_lookup_service:
try:
lun_map = self.get_lun_map(hostname)
- if not lun_map:
- LOG.debug("Storage Group %s was empty.", hostname)
- if self.destroy_empty_sg:
- LOG.info(_LI("Storage Group %s was empty, "
- "destroy it."), hostname)
- self._client.disconnect_host_from_storage_group(
- hostname, hostname)
- self._client.delete_storage_group(hostname)
- return True
- else:
- LOG.debug("Storage Group %s not empty,", hostname)
- return False
+ self.hlu_cache[hostname] = lun_map
+ except EMCVnxCLICmdError as ex:
+ if ex.kwargs["rc"] == 83:
+ LOG.warning(_LW("Storage Group %s is not found. "
+ "terminate_connection() is "
+ "unnecessary."),
+ hostname)
+ if lun_id in lun_map:
+ self._client.remove_hlu_from_storagegroup(
+ lun_map[lun_id], hostname)
+ lun_map.pop(lun_id)
+ else:
+ LOG.warning(_LW("Volume %(vol)s was not in Storage Group"
+ " %(sg)s."),
+ {'vol': volume_name, 'sg': hostname})
+
+ if self.protocol == 'FC':
+ conn_info = {'driver_volume_type': 'fibre_channel',
+ 'data': {}}
+ if self.zonemanager_lookup_service and not lun_map:
+ target_wwns, itor_tgt_map = self.get_initiator_target_map(
+ connector['wwpns'],
+ self.get_status_up_ports(connector))
+ conn_info['data']['initiator_target_map'] = itor_tgt_map
+
+ if self.destroy_empty_sg and not lun_map:
+ try:
+ LOG.info(_LI("Storage Group %s was empty."), hostname)
+ self._client.disconnect_host_from_storage_group(
+ hostname, hostname)
+ self._client.delete_storage_group(hostname)
+ if self.itor_auto_dereg:
+ self._deregister_initiators(connector)
except Exception:
- LOG.warn(_LW("Failed to destroy Storage Group %s."),
- hostname)
- else:
- return False
+ LOG.warning(_LW("Failed to destroy Storage Group %s."),
+ hostname)
+ try:
+ self._client.connect_host_to_storage_group(
+ hostname, hostname)
+ except Exception:
+ LOG.warning(_LW("Fail to connect host %(host)s "
+ "back to storage group %(sg)s."),
+ {'host': hostname, 'sg': hostname})
+ return conn_info
return do_terminate_connection()
- @log_enter_exit
- def adjust_fc_conn_info(self, conn_info, connector, remove_zone=None):
- target_wwns, itor_tgt_map = self.get_initiator_target_map(
- connector['wwpns'],
- self.get_status_up_ports(connector))
- if target_wwns:
- conn_info['data']['target_wwn'] = target_wwns
- if remove_zone is None or remove_zone:
- # Return initiator_target_map for initialize_connection (None)
- # Return initiator_target_map for terminate_connection when (True)
- # no volumes are in the storagegroup for host to use
- conn_info['data']['initiator_target_map'] = itor_tgt_map
- return conn_info
-
- @log_enter_exit
def manage_existing_get_size(self, volume, ref):
- """Return size of volume to be managed by manage_existing.
- """
+ """Return size of volume to be managed by manage_existing."""
+
# Check that the reference is valid
if 'id' not in ref:
reason = _('Reference must contain lun_id element.')
reason=reason)
return data['total_capacity_gb']
- @log_enter_exit
def manage_existing(self, volume, ref):
- raise NotImplementedError
+ """Imports the existing backend storage object as a volume.
+
+ Renames the backend storage object so that it matches the,
+ volume['name'] which is how drivers traditionally map between a
+ cinder volume and the associated backend storage object.
+
+ existing_ref:{
+ 'id':lun_id
+ }
+ """
+
+ self._client.lun_rename(ref['id'], volume['name'])
def find_iscsi_protocol_endpoints(self, device_sp):
"""Returns the iSCSI initiators for a SP."""
return specs
+@decorate_all_methods(log_enter_exit)
class EMCVnxCliPool(EMCVnxCliBase):
def __init__(self, prtcl, configuration):
raise exception.VolumeBackendAPIException(data=msg)
return self.storage_pool
- def is_pool_fastcache_enabled(self, storage_pool, no_poll=False):
- command_check_fastcache = None
- if no_poll:
- command_check_fastcache = ('-np', 'storagepool', '-list', '-name',
- storage_pool, '-fastcache')
- else:
- command_check_fastcache = ('storagepool', '-list', '-name',
- storage_pool, '-fastcache')
- out, rc = self._client.command_execute(*command_check_fastcache)
-
- if 0 != rc:
- raise EMCVnxCLICmdError(command_check_fastcache, rc, out)
- else:
- re_fastcache = 'FAST Cache:\s*(.*)\s*'
- m = re.search(re_fastcache, out)
- if m is not None:
- result = True if 'Enabled' == m.group(1) else False
- else:
- LOG.error(_LE("Error parsing output for FastCache Command."))
- return result
-
- @log_enter_exit
def update_volume_stats(self):
- """Retrieve stats info."""
+ """Retrieves stats info."""
self.stats = super(EMCVnxCliPool, self).update_volume_stats()
- data = self._client.get_pool(self.get_target_storagepool())
- self.stats['total_capacity_gb'] = data['total_capacity_gb']
- self.stats['free_capacity_gb'] = data['free_capacity_gb']
-
- array_serial = self._client.get_array_serial(NO_POLL)
+ pool = self._client.get_pool(self.get_target_storagepool(),
+ poll=False)
+ self.stats['total_capacity_gb'] = pool['total_capacity_gb']
+ self.stats['free_capacity_gb'] = pool['free_capacity_gb']
+ # Some extra capacity will be used by meta data of pool LUNs.
+ # The overhead is about LUN_Capacity * 0.02 + 3 GB
+ # reserved_percentage will be used to make sure the scheduler
+ # takes the overhead into consideration
+ # Assume that all the remaining capacity is to be used to create
+ # a thick LUN, reserved_percentage is estimated as follows:
+ reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
+ (1.02 * pool['total_capacity_gb'])) * 100)
+ self.stats['reserved_percentage'] = int(math.ceil(min(reserved, 100)))
+ if self.check_max_pool_luns_threshold:
+ pool_feature = self._client.get_pool_feature_properties(poll=False)
+ if (pool_feature['max_pool_luns']
+ <= pool_feature['total_pool_luns']):
+ LOG.warning(_LW("Maximum number of Pool LUNs, %s, "
+ "have been created. "
+ "No more LUN creation can be done."),
+ pool_feature['max_pool_luns'])
+ self.stats['free_capacity_gb'] = 0
+ array_serial = self._client.get_array_serial()
self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
{'pool_name': self.storage_pool,
'array_serial':
array_serial['array_serial']})
# check if this pool's fast_cache is really enabled
if self.stats['fast_cache_enabled'] == 'True' and \
- not self.is_pool_fastcache_enabled(self.storage_pool, NO_POLL):
+ not self._client.is_pool_fastcache_enabled(self.storage_pool):
self.stats['fast_cache_enabled'] = 'False'
return self.stats
- @log_enter_exit
- def manage_existing(self, volume, ref):
- """Manage an existing lun in the array.
-
- The lun should be in a manageable pool backend, otherwise
- error would return.
- Rename the backend storage object so that it matches the,
- volume['name'] which is how drivers traditionally map between a
- cinder volume and the associated backend storage object.
-
- existing_ref:{
- 'id':lun_id
- }
- """
+ def manage_existing_get_size(self, volume, ref):
+ """Returns size of volume to be managed by manage_existing."""
+ # Check that the reference is valid
+ if 'id' not in ref:
+ reason = _('Reference must contain lun_id element.')
+ raise exception.ManageExistingInvalidReference(
+ existing_ref=ref,
+ reason=reason)
+ # Check for existence of the lun
data = self._client.get_lun_by_id(
- ref['id'], self._client.LUN_WITH_POOL)
- if self.storage_pool != data['pool']:
+ ref['id'],
+ properties=self._client.LUN_WITH_POOL)
+ if data is None:
+ reason = _('Cannot find the lun with LUN id %s.') % ref['id']
+ raise exception.ManageExistingInvalidReference(existing_ref=ref,
+ reason=reason)
+ if data['pool'] != self.storage_pool:
reason = _('The input lun is not in a manageable pool backend '
'by cinder')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
- self._client.lun_rename(ref['id'], volume['name'])
+ return data['total_capacity_gb']
+@decorate_all_methods(log_enter_exit)
class EMCVnxCliArray(EMCVnxCliBase):
def __init__(self, prtcl, configuration):
def _update_pool_cache(self):
LOG.debug("Updating Pool Cache")
- self.pool_cache = self._client.get_pool_list(NO_POLL)
+ self.pool_cache = self._client.get_pool_list(poll=False)
def get_target_storagepool(self, volume, source_volume_name=None):
"""Find the storage pool for given volume."""
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
- @log_enter_exit
def update_volume_stats(self):
"""Retrieve stats info."""
self.stats = super(EMCVnxCliArray, self).update_volume_stats()
self._update_pool_cache()
self.stats['total_capacity_gb'] = 'unknown'
self.stats['free_capacity_gb'] = 'unknown'
- array_serial = self._client.get_array_serial(NO_POLL)
+ array_serial = self._client.get_array_serial()
self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
{'pool_name': '',
'array_serial':
self.stats['fast_cache_enabled'] = 'unknown'
return self.stats
- @log_enter_exit
- def manage_existing(self, volume, ref):
- """Rename the backend storage object so that it matches the,
- volume['name'] which is how drivers traditionally map between a
- cinder volume and the associated backend storage object.
-
- existing_ref:{
- 'id':lun_id
- }
- """
-
- self._client.lun_rename(ref['id'], volume['name'])
-
def getEMCVnxCli(prtcl, configuration=None):
configuration.append_config_values(loc_opts)
return EMCVnxCliArray(prtcl, configuration=configuration)
else:
return EMCVnxCliPool(prtcl, configuration=configuration)
+
+
+class CreateSMPTask(task.Task):
+ """Creates a snap mount point (SMP) for the source snapshot.
+
+ Reversion strategy: Delete the SMP.
+ """
+ def execute(self, client, volume, source_vol_name, *args, **kwargs):
+ LOG.debug('CreateSMPTask.execute')
+ client.create_mount_point(source_vol_name, volume['name'])
+
+ def revert(self, result, client, volume, *args, **kwargs):
+ LOG.debug('CreateSMPTask.revert')
+ if isinstance(result, failure.Failure):
+ return
+ else:
+ LOG.warning(_LW('CreateSMPTask.revert: delete mount point %s'),
+ volume['name'])
+ client.delete_lun(volume['name'])
+
+
+class AttachSnapTask(task.Task):
+ """Attaches the snapshot to the SMP created before.
+
+ Reversion strategy: Detach the SMP.
+ """
+ def execute(self, client, volume, snap_name, *args, **kwargs):
+ LOG.debug('AttachSnapTask.execute')
+ client.attach_mount_point(volume['name'], snap_name)
+
+ def revert(self, result, client, volume, *args, **kwargs):
+ LOG.debug('AttachSnapTask.revert')
+ if isinstance(result, failure.Failure):
+ return
+ else:
+ LOG.warning(_LW('AttachSnapTask.revert: detach mount point %s'),
+ volume['name'])
+ client.detach_mount_point(volume['name'])
+
+
+class CreateDestLunTask(task.Task):
+ """Creates a destination lun for migration.
+
+ Reversion strategy: Detach the temp lun.
+ """
+ def __init__(self):
+ super(CreateDestLunTask, self).__init__(provides='lun_data')
+
+ def execute(self, client, pool_name, dest_vol_name, volume_size,
+ provisioning, tiering, *args, **kwargs):
+ LOG.debug('CreateDestLunTask.execute')
+ data = client.create_lun_with_advance_feature(
+ pool_name, dest_vol_name, volume_size,
+ provisioning, tiering)
+ return data
+
+ def revert(self, result, client, dest_vol_name, *args, **kwargs):
+ LOG.debug('CreateDestLunTask.revert')
+ if isinstance(result, failure.Failure):
+ return
+ else:
+ LOG.warning(_LW('CreateDestLunTask.revert: delete temp lun %s'),
+ dest_vol_name)
+ client.delete_lun(dest_vol_name)
+
+
+class MigrateLunTask(task.Task):
+ """Starts a migration between the SMP and the temp lun.
+
+ Reversion strategy: None
+ """
+ def __init__(self):
+ super(MigrateLunTask, self).__init__(provides='new_lun_id')
+
+ def execute(self, client, dest_vol_name, volume, lun_data,
+ *args, **kwargs):
+ LOG.debug('MigrateLunTask.execute')
+ new_vol_name = volume['name']
+ new_vol_lun_id = client.get_lun_by_name(new_vol_name)['lun_id']
+ dest_vol_lun_id = lun_data['lun_id']
+
+ LOG.info(_LI('Migrating Mount Point Volume: %s'), new_vol_name)
+
+ migrated = client.migrate_lun_with_verification(new_vol_lun_id,
+ dest_vol_lun_id,
+ None)
+ if not migrated:
+ msg = (_LE("Migrate volume failed between source vol %(src)s"
+ " and dest vol %(dst)s."),
+ {'src': new_vol_name, 'dst': dest_vol_name})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ return new_vol_lun_id
+
+ def revert(self, *args, **kwargs):
+ pass
+
+
+class CreateSnapshotTask(task.Task):
+ """Creates a snapshot/cgsnapshot of a volume.
+
+ Reversion Strategy: Delete the created snapshot/cgsnapshot.
+ """
+ def execute(self, client, snapshot, source_lun_id, *args, **kwargs):
+ LOG.debug('CreateSnapshotTask.execute')
+ # Create temp Snapshot
+ if snapshot['consistencygroup_id']:
+ client.create_cgsnapshot(snapshot)
+ else:
+ snapshot_name = snapshot['name']
+ volume_name = snapshot['volume_name']
+ LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'),
+ {'snapshot': snapshot_name,
+ 'volume': volume_name})
+ client.create_snapshot(source_lun_id, snapshot_name)
+
+ def revert(self, result, client, snapshot, *args, **kwargs):
+ LOG.debug('CreateSnapshotTask.revert')
+ if isinstance(result, failure.Failure):
+ return
+ else:
+ if snapshot['consistencygroup_id']:
+ LOG.warning(_LW('CreateSnapshotTask.revert: '
+ 'delete temp cgsnapshot %s'),
+ snapshot['consistencygroup_id'])
+ client.delete_cgsnapshot(snapshot)
+ else:
+ LOG.warning(_LW('CreateSnapshotTask.revert: '
+ 'delete temp snapshot %s'),
+ snapshot['name'])
+ client.delete_snapshot(snapshot['name'])