]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
EMC VNX Direct Driver Update for Juno
authorJeegn Chen <jeegn.chen@emc.com>
Thu, 3 Jul 2014 01:59:16 +0000 (09:59 +0800)
committerJeegn Chen <jeegn.chen@emc.com>
Wed, 30 Jul 2014 06:35:37 +0000 (14:35 +0800)
VNX Direct Driver has been contributed to Icehouse release.
This patch refactors driver and adds the following new features

* Array-based Backend Support
* FC Basic Support
* Target Port Selection for MPIO
* Initiator Auto Registration
* Storage Group Auto Deletion
* Multiple Authentication Type Support
* Storage-Assisted Volume Migration
* SP Toggle for HA
* Security File Support
* Advance LUN Features
    # Compression Support
    # Deduplication Support
    # FAST VP Support
    # FAST Cache Support
* Storage-assisted Retype
* External Volume Management
* Read-only Volume
* FC Auto Zoning

Certificate Test Results
    https://bugs.launchpad.net/cinder/+bug/1336640

CCLA SCHEDULE B SUBMISSION

Change-Id: Ib7edaefa5eceb8e8c01ec0ce0dcdada7eaa9dd08
Implements: blueprint emc-vnx-direct-driver-juno-update

cinder/exception.py
cinder/tests/test_emc_vnxdirect.py
cinder/volume/drivers/emc/emc_cli_fc.py [new file with mode: 0644]
cinder/volume/drivers/emc/emc_cli_iscsi.py
cinder/volume/drivers/emc/emc_vnx_cli.py
etc/cinder/cinder.conf.sample

index b72d9d08bd0195b443fb8f511a0ac7a24155e7c2..32ead52ba83fc4a5198ce7a87f189942cc809a06 100644 (file)
@@ -731,3 +731,23 @@ class BrocadeZoningCliException(CinderException):
 
 class NetAppDriverException(VolumeDriverException):
     message = _("NetApp Cinder Driver exception.")
+
+
+class EMCVnxCLICmdError(VolumeBackendAPIException):
+    def __init__(self, cmd=None, rc=None, out='',
+                 log_as_error=True, **kwargs):
+        self.cmd = cmd
+        self.rc = rc
+        self.out = out
+        msg = _("EMCVnxCLICmdError : %(cmd)s "
+                "(Return Code: %(rc)s) "
+                "(Output: %(out)s) ") % \
+            {'cmd': cmd,
+             'rc': rc,
+             'out': out.split('\n')}
+        kwargs["data"] = msg
+        super(EMCVnxCLICmdError, self).__init__(**kwargs)
+        if log_as_error:
+            LOG.error(msg)
+        else:
+            LOG.warn(msg)
index 50000ffc9dbf9db8debdc1b6cce97d158200f963..f2f0698ea12abc976b8226d96de73840cae2b3e2 100644 (file)
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-
-
-import os
-
 import mock
+import os
+import re
 
 from cinder import exception
+from cinder.openstack.common import processutils
 from cinder import test
 from cinder.volume import configuration as conf
+from cinder.volume.drivers.emc.emc_cli_fc import EMCCLIFCDriver
 from cinder.volume.drivers.emc.emc_cli_iscsi import EMCCLIISCSIDriver
-from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCli
+import cinder.volume.drivers.emc.emc_vnx_cli as emc_vnx_cli
+from cinder.volume.drivers.emc.emc_vnx_cli import CommandLineHelper
+from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCLICmdError
 from cinder.volume import volume_types
+from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
+
+SUCCEED = ("", 0)
+FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
 
 
 class EMCVNXCLIDriverTestData():
@@ -37,27 +43,46 @@ class EMCVNXCLIDriverTestData():
         'project_id': 'project',
         'display_name': 'vol1',
         'display_description': 'test volume',
-        'volume_type_id': None}
-    test_volfromsnap = {
-        'name': 'volfromsnap',
+        'volume_type_id': None,
+        'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]
+    }
+
+    test_volume_rw = {
+        'name': 'vol1',
         'size': 1,
-        'volume_name': 'volfromsnap',
-        'id': '10',
+        'volume_name': 'vol1',
+        'id': '1',
         'provider_auth': None,
         'project_id': 'project',
-        'display_name': 'volfromsnap',
+        'display_name': 'vol1',
         'display_description': 'test volume',
-        'volume_type_id': None}
-    test_volfromsnap_e = {
-        'name': 'volfromsnap_e',
+        'volume_type_id': None,
+        'volume_admin_metadata': [{'key': 'access_mode', 'value': 'rw'},
+                                  {'key': 'readonly', 'value': 'False'}]
+    }
+
+    test_volume2 = {
+        'name': 'vol2',
         'size': 1,
-        'volume_name': 'volfromsnap_e',
-        'id': '20',
+        'volume_name': 'vol2',
+        'id': '1',
         'provider_auth': None,
         'project_id': 'project',
-        'display_name': 'volfromsnap_e',
+        'display_name': 'vol2',
         'display_description': 'test volume',
         'volume_type_id': None}
+
+    test_volume_with_type = {
+        'name': 'vol_with_type',
+        'size': 1,
+        'volume_name': 'vol_with_type',
+        'id': '1',
+        'provider_auth': None,
+        'project_id': 'project',
+        'display_name': 'thin_vol',
+        'display_description': 'vol with type',
+        'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231'}
+
     test_failed_volume = {
         'name': 'failed_vol1',
         'size': 1,
@@ -72,7 +97,7 @@ class EMCVNXCLIDriverTestData():
         'name': 'snapshot1',
         'size': 1,
         'id': '4444',
-        'volume_name': 'vol-vol1',
+        'volume_name': 'vol1',
         'volume_size': 1,
         'project_id': 'project'}
     test_failed_snapshot = {
@@ -85,208 +110,415 @@ class EMCVNXCLIDriverTestData():
     test_clone = {
         'name': 'clone1',
         'size': 1,
-        'id': '20',
-        'volume_name': 'clone1',
+        'id': '2',
+        'volume_name': 'vol1',
         'provider_auth': None,
         'project_id': 'project',
         'display_name': 'clone1',
         'display_description': 'volume created from snapshot',
         'volume_type_id': None}
-    test_clone_e = {
-        'name': 'clone1_e',
-        'size': 1,
-        'id': '28',
-        'volume_name': 'clone1_e',
-        'provider_auth': None,
-        'project_id': 'project',
-        'display_name': 'clone1_e',
-        'display_description': 'volume created from snapshot',
-        'volume_type_id': None}
-    test_clone_src = {
-        'name': 'clone1src',
-        'size': 1,
-        'id': '22',
-        'volume_name': 'clone1src',
-        'provider_auth': None,
-        'project_id': 'project',
-        'display_name': 'clone1src',
-        'display_description': 'volume created from snapshot',
-        'volume_type_id': None}
     connector = {
         'ip': '10.0.0.2',
         'initiator': 'iqn.1993-08.org.debian:01:222',
-        'wwpns': ["123456789012345", "123456789054321"],
-        'wwnns': ["223456789012345", "223456789054321"],
+        'wwpns': ["1234567890123456", "1234567890543216"],
+        'wwnns': ["2234567890123456", "2234567890543216"],
         'host': 'fakehost'}
+    test_volume3 = {'migration_status': None, 'availability_zone': 'nova',
+                    'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
+                    'name': 'vol3',
+                    'size': 2,
+                    'volume_admin_metadata': [],
+                    'status': 'available',
+                    'volume_type_id':
+                    '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
+                    'deleted': False, 'provider_location': None,
+                    'host': 'ubuntu-server12@pool_backend_1',
+                    'source_volid': None, 'provider_auth': None,
+                    'display_name': 'vol-test02', 'instance_uuid': None,
+                    'attach_status': 'detached',
+                    'volume_type': [],
+                    'attached_host': None,
+                    '_name_id': None, 'volume_metadata': []}
 
+    test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
+                     'deleted': False,
+                     'extra_specs': {'storagetype:provisioning': 'thin'},
+                     'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
 
-class EMCVNXCLIDriverISCSITestCase(test.TestCase):
+    test_diff = {'encryption': {}, 'qos_specs': {},
+                 'extra_specs':
+                 {'storagetype:provisioning': ('thick', 'thin')}}
+
+    test_host = {'host': 'ubuntu-server12@pool_backend_1',
+                 'capabilities':
+                 {'location_info': 'POOL_SAS1|FNM00124500890',
+                  'volume_backend_name': 'pool_backend_1',
+                  'storage_protocol': 'iSCSI'}}
+
+    test_volume4 = {'migration_status': None, 'availability_zone': 'nova',
+                    'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
+                    'name': 'vol4',
+                    'size': 2L,
+                    'volume_admin_metadata': [],
+                    'status': 'available',
+                    'volume_type_id':
+                    '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
+                    'deleted': False, 'provider_location': None,
+                    'host': 'ubuntu-server12@array_backend_1',
+                    'source_volid': None, 'provider_auth': None,
+                    'display_name': 'vol-test02', 'instance_uuid': None,
+                    'attach_status': 'detached',
+                    'volume_type': [],
+                    '_name_id': None, 'volume_metadata': []}
+
+    test_volume5 = {'migration_status': None, 'availability_zone': 'nova',
+                    'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
+                    'name_id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
+                    'name': 'vol5',
+                    'size': 1,
+                    'volume_admin_metadata': [],
+                    'status': 'available',
+                    'volume_type_id':
+                    '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
+                    'deleted': False, 'provider_location':
+                    'system^FNM11111|type^lun|lun_id^5',
+                    'host': 'ubuntu-server12@array_backend_1',
+                    'source_volid': None, 'provider_auth': None,
+                    'display_name': 'vol-test05', 'instance_uuid': None,
+                    'attach_status': 'detached',
+                    'volume_type': [],
+                    '_name_id': None, 'volume_metadata': []}
+
+    test_new_type2 = {'name': 'voltype0', 'qos_specs_id': None,
+                      'deleted': False,
+                      'extra_specs': {'storagetype:pool': 'POOL_SAS2'},
+                      'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+    test_diff2 = {'encryption': {}, 'qos_specs': {},
+                  'extra_specs':
+                  {'storagetype:pool': ('POOL_SAS1', 'POOL_SAS2')}}
+
+    test_host2 = {'host': 'ubuntu-server12@array_backend_1',
+                  'capabilities':
+                  {'location_info': '|FNM00124500890',
+                   'volume_backend_name': 'array_backend_1',
+                   'storage_protocol': 'iSCSI'}}
 
-    def _fake_cli_executor(self, *cmd, **kwargv):
-        # mock cli
-        if cmd == ("storagepool", "-list",
-                   "-name", "unit_test_pool", "-state"):
-            return None, 0
-        elif cmd == ('storagepool', '-list',
-                     '-name', 'unit_test_pool', '-userCap', '-availableCap'):
-            pool_details = "test\ntest\ntest\ntotal capacity:10000\n" + \
-                "test\nfree capacity:1000\ntest\ntest"
-            return pool_details, 0
-        elif cmd == ('lun', '-create', '-type', 'NonThin',
-                     '-capacity', 1, '-sq', 'gb',
-                     '-poolName', 'unit_test_pool', '-name', 'vol1'):
-            return None, 0
-        elif cmd == ('lun', '-create', '-type', 'NonThin',
-                     '-capacity', 1, '-sq', 'gb',
-                     '-poolName', 'unit_test_pool', '-name', 'failed_vol1'):
-            return None, 1023
-        elif cmd == ('lun', '-create', '-type', 'Thin',
-                     '-capacity', 1, '-sq', 'gb',
-                     '-poolName', 'unit_test_pool', '-name', 'vol1'):
-            return None, 0
-        elif cmd == ('lun', '-list', '-name', 'vol1'):
-            return "   10\nReady", 0
-        elif cmd == ('lun', '-destroy', '-name', 'vol1',
-                     '-forceDetach', '-o'):
-            return "Lun deleted successfully", 0
-        elif cmd == ('lun', '-destroy', '-name', 'failed_vol1',
-                     '-forceDetach', '-o'):
-            return "Lun deleted successfully", 1023
-        elif cmd == ('lun', '-list', '-name', 'vol-vol1'):
-            return "   16\n", 0
-        elif cmd == ('snap', '-create', '-res', '16', '-name',
-                     'snapshot1', '-allowReadWrite', 'yes'):
-            return "Create Snap successfully", 0
-        elif cmd == ('snap', '-create', '-res', '16', '-name',
-                     'failed_snapshot', '-allowReadWrite', 'yes'):
-            return "Create Snap failed", 1023
-        elif cmd == ('snap', '-destroy', '-id', 'snapshot1', '-o'):
-            return "Delete Snap successfully", 0
-        elif cmd == ('lun', '-create', '-type', 'NonThin',
-                     '-capacity', 1, '-sq', 'gb',
-                     '-poolName', 'unit_test_pool', '-name',
-                     'volfromsnapdest'):
-            return "create temp volume successfully", 0
-        elif cmd == ('lun', '-create', '-type', 'NonThin',
-                     '-capacity', 1, '-sq', 'gb',
-                     '-poolName', 'unit_test_pool', '-name',
-                     'volfromsnap_edest'):
-            return "create temp volume successfully", 0
-        elif cmd == ('lun', '-create', '-type', 'Snap',
-                     '-primaryLunName', 'vol-vol1', '-name', 'volfromsnap'):
-            return "create mount point successfully", 0
-        elif cmd == ('lun', '-create', '-type', 'Snap',
-                     '-primaryLunName', 'vol-vol1', '-name', 'volfromsnap_e'):
-            return "create mount point successfully", 0
-        elif cmd == ('lun', '-attach', '-name', 'volfromsnap',
-                     '-snapName', 'snapshot1'):
-            return None, 0
-        elif cmd == ('lun', '-attach', '-name', 'volfromsnap_e',
-                     '-snapName', 'snapshot1'):
-            return None, 0
-        elif cmd == ('lun', '-list', '-name', 'volfromsnap'):
-            return "   10\n", 0
-        elif cmd == ('lun', '-list', '-name', 'volfromsnapdest'):
-            return "   101\n", 0
-        elif cmd == ('lun', '-list', '-name', 'volfromsnap_e'):
-            return "   20\n", 0
-        elif cmd == ('lun', '-list', '-name', 'volfromsnap_edest'):
-            return "   201\n", 0
-        elif cmd == ('migrate', '-start', '-source', '10', '-dest', '101',
-                     '-rate', 'ASAP', '-o'):
-            return None, 0
-        elif cmd == ('migrate', '-start', '-source', '20', '-dest', '201',
-                     '-rate', 'ASAP', '-o'):
-            return None, 0
-        elif cmd == ('lun', '-list', '-name', 'volfromsnap',
-                     '-attachedSnapshot'):
-            return "\n test \n :N/A", 0
-        elif cmd == ('lun', '-list', '-name', 'volfromsnap_e',
-                     '-attachedSnapshot'):
-            return "\n test \n :N", 0
-        elif cmd == ('snap', '-create', '-res', '22', '-name',
-                     'clone1src-temp-snapshot', '-allowReadWrite', 'yes'):
-            return "Create Snap successfully", 0
-        elif cmd == ('lun', '-list', '-name', 'clone1src'):
-            return "   22\n", 0
-        elif cmd == ('lun', '-create', '-type', 'NonThin',
-                     '-capacity', 1, '-sq', 'gb',
-                     '-poolName', 'unit_test_pool', '-name', 'clone1dest'):
-            return "create temp volume successfully", 0
-        elif cmd == ('lun', '-create', '-type', 'Snap',
-                     '-primaryLunName', 'clone1src', '-name', 'clone1'):
-            return "create mount point successfully", 0
-        elif cmd == ('lun', '-attach', '-name', 'clone1',
-                     '-snapName', 'clone1src-temp-snapshot'):
-            return 'create temp snap successfully', 0
-        elif cmd == ('lun', '-list', '-name', 'clone1'):
-            return "   30\n", 0
-        elif cmd == ('lun', '-list', '-name', 'clone1dest'):
-            return "   301\n", 0
-        elif cmd == ('migrate', '-start', '-source', '30', '-dest', '301',
-                     '-rate', 'ASAP', '-o'):
-            return None, 0
-        elif cmd == ('lun', '-list', '-name', 'clone1',
-                     '-attachedSnapshot'):
-            return "\n test \n :N/A", 0
-        elif cmd == ('snap', '-destroy', '-id',
-                     'clone1src-temp-snapshot', '-o'):
-            return None, 0
-        elif cmd == ('lun', '-create', '-type', 'NonThin',
-                     '-capacity', 1, '-sq', 'gb',
-                     '-poolName', 'unit_test_pool', '-name', 'clone1_edest'):
-            return "create temp volume successfully", 0
-        elif cmd == ('lun', '-create', '-type', 'Snap',
-                     '-primaryLunName', 'clone1src', '-name', 'clone1_e'):
-            return "create mount point successfully", 0
-        elif cmd == ('lun', '-attach', '-name', 'clone1_e', '-snapName',
-                     'clone1src-temp-snapshot'):
-            return None, 0
-        elif cmd == ('lun', '-list', '-name', 'clone1_e'):
-            return "   40\n", 0
-        elif cmd == ('lun', '-list', '-name', 'clone1_edest'):
-            return "   401\n", 0
-        elif cmd == ('migrate', '-start', '-source', '40', '-dest', '401',
-                     '-rate', 'ASAP', '-o'):
-            return None, 0
-        elif cmd == ('lun', '-list', '-name', 'clone1_e',
-                     '-attachedSnapshot'):
-            return "\n test \n :N", 0
-        elif cmd == ('lun', '-expand', '-name', 'vol1',
-                     '-capacity', 2, '-sq', 'gb', '-o',
-                     '-ignoreThresholds'):
-            return "Expand volume successfully", 0
-        elif cmd == ('lun', '-expand', '-name', 'failed_vol1',
-                     '-capacity', 2, '-sq', 'gb', '-o',
-                     '-ignoreThresholds'):
-            return "Expand volume failed because it has snap", 97
-        elif cmd == ('lun', '-expand', '-name', 'failed_vol1',
-                     '-capacity', 3, '-sq', 'gb', '-o',
-                     '-ignoreThresholds'):
-            return "Expand volume failed", 1023
-        elif cmd == ('storagegroup', '-list', '-gname',
-                     'fakehost'):
-            return '\nStorage Group Name:    fakehost' + \
-                   '\nStorage Group UID:     78:47:C4:F2:CA:' + \
-                   '\n\nHLU/ALU Pairs:\n\n  HLU Number     ' + \
-                   'ALU Number\n  ----------     ----------\n' + \
-                   '    10               64\nShareable:             YES\n', 0
-        elif cmd == ('lun', '-list', '-l', '10', '-owner'):
-            return '\n\nCurrent Owner:  SP A', 0
-        elif cmd == ('storagegroup', '-addhlu', '-o', '-gname',
-                     'fakehost', '-hlu', 1, '-alu', '10'):
-            return None, 0
-        elif cmd == ('connection', '-getport', '-sp', 'A'):
-            return 'SP:  A\nPort ID:  5\nPort WWN:  iqn.1992-04.' + \
-                   'com.emc:cx.fnm00124000215.a5\niSCSI Alias:  0215.a5\n', 0
+    test_lun_id = 1
+    test_existing_ref = {'id': test_lun_id}
+    test_pool_name = 'Pool_02_SASFLASH'
+    device_map = {
+        '1122334455667788': {
+            'initiator_port_wwn_list': ['123456789012345', '123456789054321'],
+            'target_port_wwn_list': ['1122334455667777']}}
+    i_t_map = {'123456789012345': ['1122334455667777'],
+               '123456789054321': ['1122334455667777']}
+
+    POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
+                         '-userCap', '-availableCap')
+
+    NDU_LIST_CMD = ('ndu', '-list')
+    NDU_LIST_RESULT = ("Name of the software package:   -Compression " +
+                       "Name of the software package:   -Deduplication " +
+                       "Name of the software package:   -FAST " +
+                       "Name of the software package:   -FASTCache " +
+                       "Name of the software package:   -ThinProvisioning ",
+                       0)
+
+    def SNAP_MP_CREATE_CMD(self, name='vol1', source='vol1'):
+        return ('lun', '-create', '-type', 'snap', '-primaryLunName',
+                source, '-name', name)
+
+    def SNAP_ATTACH_CMD(self, name='vol1', snapName='snapshot1'):
+        return ('lun', '-attach', '-name', name, '-snapName', snapName)
+
+    def SNAP_DELETE_CMD(self, name):
+        return ('snap', '-destroy', '-id', name, '-o')
+
+    def SNAP_CREATE_CMD(self, name):
+        return ('snap', '-create', '-res', 1, '-name', name,
+                '-allowReadWrite', 'yes',
+                '-allowAutoDelete', 'no')
+
+    def LUN_DELETE_CMD(self, name):
+        return ('lun', '-destroy', '-name', name, '-forceDetach', '-o')
+
+    def LUN_CREATE_CMD(self, name, isthin=False):
+        return ('lun', '-create', '-type', 'Thin' if isthin else 'NonThin',
+                '-capacity', 1, '-sq', 'gb', '-poolName',
+                'unit_test_pool', '-name', name)
+
+    def LUN_EXTEND_CMD(self, name, newsize):
+        return ('lun', '-expand', '-name', name, '-capacity', newsize,
+                '-sq', 'gb', '-o', '-ignoreThresholds')
+
+    def LUN_PROPERTY_ALL_CMD(self, lunname):
+        return ('lun', '-list', '-name', lunname,
+                '-state', '-status', '-opDetails', '-userCap', '-owner',
+                '-attachedSnapshot')
+
+    def MIGRATION_CMD(self, src_id=1, dest_id=1):
+        return ("migrate", "-start", "-source", src_id, "-dest", dest_id,
+                "-rate", "high", "-o")
+
+    def MIGRATION_VERIFY_CMD(self, src_id):
+        return ("migrate", "-list", "-source", src_id)
+
+    def GETPORT_CMD(self):
+        return ("connection", "-getport", "-address", "-vlanid")
+
+    def PINGNODE_CMD(self, sp, portid, vportid, ip):
+        return ("connection", "-pingnode", "-sp", sp, '-portid', portid,
+                "-vportid", vportid, "-address", ip)
+
+    def GETFCPORT_CMD(self):
+        return ('port', '-list', '-sp')
+
+    def CONNECTHOST_CMD(self, hostname, gname):
+        return ('storagegroup', '-connecthost',
+                '-host', hostname, '-gname', gname, '-o')
+
+    def ENABLE_COMPRESSION_CMD(self, lun_id):
+        return ('compression', '-on',
+                '-l', lun_id, '-ignoreThresholds', '-o')
+
+    provisioning_values = {
+        'thin': ['-type', 'Thin'],
+        'thick': ['-type', 'NonThin'],
+        'compressed': ['-type', 'Thin'],
+        'deduplicated': ['-type', 'Thin', '-deduplication', 'on']}
+    tiering_values = {
+        'starthighthenauto': [
+            '-initialTier', 'highestAvailable',
+            '-tieringPolicy', 'autoTier'],
+        'auto': [
+            '-initialTier', 'optimizePool',
+            '-tieringPolicy', 'autoTier'],
+        'highestavailable': [
+            '-initialTier', 'highestAvailable',
+            '-tieringPolicy', 'highestAvailable'],
+        'lowestavailable': [
+            '-initialTier', 'lowestAvailable',
+            '-tieringPolicy', 'lowestAvailable'],
+        'nomovement': [
+            '-initialTier', 'optimizePool',
+            '-tieringPolicy', 'noMovement']}
+
+    def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering):
+        initial = ['lun', '-create',
+                   '-capacity', size,
+                   '-sq', 'gb',
+                   '-poolName', pool,
+                   '-name', name]
+        if provisioning:
+            initial.extend(self.provisioning_values[provisioning])
         else:
-            self.assertTrue(False)
+            initial.extend(self.provisioning_values['thick'])
+        if tiering:
+            initial.extend(self.tiering_values[tiering])
+        return tuple(initial)
+
+    def CHECK_FASTCACHE_CMD(self, storage_pool):
+        return ('-np', 'storagepool', '-list', '-name',
+                storage_pool, '-fastcache')
+
+    POOL_PROPERTY = ("""\
+Pool Name:  unit_test_pool
+Pool ID:  1
+User Capacity (Blocks):  5769501696
+User Capacity (GBs):  10000.5
+Available Capacity (Blocks):  5676521472
+Available Capacity (GBs):  1000.6
+                        """, 0)
+
+    ALL_PORTS = ("SP:  A\n" +
+                 "Port ID:  4\n" +
+                 "Port WWN:  iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" +
+                 "iSCSI Alias:  0215.a4\n\n" +
+                 "Virtual Port ID:  0\n" +
+                 "VLAN ID:  Disabled\n" +
+                 "IP Address:  10.244.214.118\n\n" +
+                 "SP:  A\n" +
+                 "Port ID:  5\n" +
+                 "Port WWN:  iqn.1992-04.com.emc:cx.fnm00124000215.a5\n" +
+                 "iSCSI Alias:  0215.a5\n", 0)
+
+    iscsi_connection_info_ro = \
+        {'data': {'access_mode': 'ro',
+                  'target_discovered': True,
+                  'target_iqn':
+                  'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
+                  'target_lun': 1,
+                  'target_portal': '10.244.214.118:3260'},
+         'driver_volume_type': 'iscsi'}
+
+    iscsi_connection_info_rw = \
+        {'data': {'access_mode': 'rw',
+                  'target_discovered': True,
+                  'target_iqn':
+                  'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
+                  'target_lun': 1,
+                  'target_portal': '10.244.214.118:3260'},
+         'driver_volume_type': 'iscsi'}
+
+    PING_OK = ("Reply from 10.0.0.2:  bytes=32 time=1ms TTL=30\n" +
+               "Reply from 10.0.0.2:  bytes=32 time=1ms TTL=30\n" +
+               "Reply from 10.0.0.2:  bytes=32 time=1ms TTL=30\n" +
+               "Reply from 10.0.0.2:  bytes=32 time=1ms TTL=30\n", 0)
+
+    FC_PORTS = ("Information about each SPPORT:\n" +
+                "\n" +
+                "SP Name:             SP A\n" +
+                "SP Port ID:          0\n" +
+                "SP UID:              50:06:01:60:88:60:01:95:" +
+                "50:06:01:60:08:60:01:95\n" +
+                "Link Status:         Up\n" +
+                "Port Status:         Online\n" +
+                "Switch Present:      YES\n" +
+                "Switch UID:          10:00:00:05:1E:72:EC:A6:" +
+                "20:46:00:05:1E:72:EC:A6\n" +
+                "SP Source ID:        272896\n" +
+                "\n" +
+                "SP Name:             SP B\n" +
+                "SP Port ID:          4\n" +
+                "SP UID:              iqn.1992-04.com.emc:cx." +
+                "fnm00124000215.b4\n" +
+                "Link Status:         Up\n" +
+                "Port Status:         Online\n" +
+                "Switch Present:      Not Applicable\n" +
+                "\n" +
+                "SP Name:             SP A\n" +
+                "SP Port ID:          2\n" +
+                "SP UID:              50:06:01:60:88:60:01:95:" +
+                "50:06:01:62:08:60:01:95\n" +
+                "Link Status:         Down\n" +
+                "Port Status:         Online\n" +
+                "Switch Present:      NO\n", 0)
+
+    FAKEHOST_PORTS = (
+        "Information about each HBA:\n" +
+        "\n" +
+        "HBA UID:                 20:00:00:90:FA:53:46:41:12:34:" +
+        "56:78:90:12:34:56\n" +
+        "Server Name:             fakehost\n" +
+        "Server IP Address:       10.0.0.2" +
+        "HBA Model Description:\n" +
+        "HBA Vendor Description:\n" +
+        "HBA Device Driver Name:\n" +
+        "Information about each port of this HBA:\n\n" +
+        "    SP Name:               SP A\n" +
+        "    SP Port ID:            0\n" +
+        "    HBA Devicename:\n" +
+        "    Trusted:               NO\n" +
+        "    Logged In:             YES\n" +
+        "    Defined:               YES\n" +
+        "    Initiator Type:           3\n" +
+        "    StorageGroup Name:     fakehost\n\n" +
+        "    SP Name:               SP A\n" +
+        "    SP Port ID:            2\n" +
+        "    HBA Devicename:\n" +
+        "    Trusted:               NO\n" +
+        "    Logged In:             YES\n" +
+        "    Defined:               YES\n" +
+        "    Initiator Type:           3\n" +
+        "    StorageGroup Name:     fakehost\n\n" +
+        "Information about each SPPORT:\n" +
+        "\n" +
+        "SP Name:             SP A\n" +
+        "SP Port ID:          0\n" +
+        "SP UID:              50:06:01:60:88:60:01:95:" +
+        "50:06:01:60:08:60:01:95\n" +
+        "Link Status:         Up\n" +
+        "Port Status:         Online\n" +
+        "Switch Present:      YES\n" +
+        "Switch UID:          10:00:00:05:1E:72:EC:A6:" +
+        "20:46:00:05:1E:72:EC:A6\n" +
+        "SP Source ID:        272896\n" +
+        "\n" +
+        "SP Name:             SP B\n" +
+        "SP Port ID:          4\n" +
+        "SP UID:              iqn.1992-04.com.emc:cx." +
+        "fnm00124000215.b4\n" +
+        "Link Status:         Up\n" +
+        "Port Status:         Online\n" +
+        "Switch Present:      Not Applicable\n" +
+        "\n" +
+        "SP Name:             SP A\n" +
+        "SP Port ID:          2\n" +
+        "SP UID:              50:06:01:60:88:60:01:95:" +
+        "50:06:01:62:08:60:01:95\n" +
+        "Link Status:         Down\n" +
+        "Port Status:         Online\n" +
+        "Switch Present:      NO\n", 0)
+
+    def LUN_PROPERTY(self, name, isThin=False, hasSnap=False, size=1):
+        return """\
+               LOGICAL UNIT NUMBER 1
+               Name:  %s
+               UID:  60:06:01:60:09:20:32:00:13:DF:B4:EF:C2:63:E3:11
+               Current Owner:  SP A
+               Default Owner:  SP A
+               Allocation Owner:  SP A
+               Attached Snapshot: %s
+               User Capacity (Blocks):  2101346304
+               User Capacity (GBs):  %d
+               Consumed Capacity (Blocks):  2149576704
+               Consumed Capacity (GBs):  1024.998
+               Pool Name:  Pool_02_SASFLASH
+               Current State:  Ready
+               Status:  OK(0x0)
+               Is Faulted:  false
+               Is Transitioning:  false
+               Current Operation:  None
+               Current Operation State:  N/A
+               Current Operation Status:  N/A
+               Current Operation Percent Completed:  0
+               Is Thin LUN:  %s""" % (name,
+                                      'FakeSnap' if hasSnap else 'N/A',
+                                      size,
+                                      'Yes' if isThin else 'No'), 0
+
+    def STORAGE_GROUP_NO_MAP(self, sgname):
+        return ("""\
+        Storage Group Name:    %s
+        Storage Group UID:     27:D2:BE:C1:9B:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
+        Shareable:             YES""" % sgname, 0)
+
+    def STORAGE_GROUP_HAS_MAP(self, sgname):
+
+        return ("""\
+        Storage Group Name:    %s
+        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
+        HBA/SP Pairs:
+
+          HBA UID                                          SP Name     SPPort
+          -------                                          -------     ------
+          iqn.1993-08.org.debian:01:222                     SP A         4
+
+        HLU/ALU Pairs:
+
+          HLU Number     ALU Number
+          ----------     ----------
+            1               1
+        Shareable:             YES""" % sgname, 0)
+
+
+class EMCVNXCLIDriverISCSITestCase(test.TestCase):
 
     def setUp(self):
-        # backup
-        back_os_path_exists = os.path.exists
-        self.addCleanup(self._restore, back_os_path_exists)
         super(EMCVNXCLIDriverISCSITestCase, self).setUp()
+
+        self.stubs.Set(CommandLineHelper, 'command_execute',
+                       self.succeed_fake_command_execute)
+        self.stubs.Set(CommandLineHelper, 'get_array_serial',
+                       mock.Mock(return_value={'array_serial':
+                                               'fakeSerial'}))
+        self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
+
+        self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
+        self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
+        self.stubs.Set(emc_vnx_cli, 'INTERVAL_60_SEC', 0.01)
+
         self.configuration = conf.Configuration(None)
         self.configuration.append_config_values = mock.Mock(return_value=0)
         self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
@@ -294,302 +526,2005 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
         self.configuration.storage_vnx_pool_name = 'unit_test_pool'
         self.configuration.san_login = 'sysadmin'
         self.configuration.san_password = 'sysadmin'
-        self.configuration.default_timeout = 0
+        #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
+        self.configuration.default_timeout = 0.0002
+        self.configuration.initiator_auto_registration = True
+        self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
         self.testData = EMCVNXCLIDriverTestData()
         self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
             '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
-        os.path.exists = mock.Mock(return_value=1)
-        EMCVnxCli._cli_execute = mock.Mock(side_effect=self._fake_cli_executor)
+        self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
+
+    def tearDown(self):
+        super(EMCVNXCLIDriverISCSITestCase, self).tearDown()
+
+    def driverSetup(self, commands=tuple(), results=tuple()):
         self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
-        self.driver.cli.wait_interval = 0
+        fake_command_execute = self.get_command_execute_simulator(
+            commands, results)
+        fake_cli = mock.Mock(side_effect=fake_command_execute)
+        self.driver.cli._client.command_execute = fake_cli
+        return fake_cli
 
-    def _restore(self, back_os_path_exists):
-        # recover
-        os.path.exists = back_os_path_exists
+    def get_command_execute_simulator(self, commands=tuple(),
+                                      results=tuple()):
 
-    def test_create_destroy_volume_withoutExtraSpec(self):
-        # case
-        self.driver.create_volume(self.testData.test_volume)
-        self.driver.delete_volume(self.testData.test_volume)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-create', '-type', 'NonThin',
-                              '-capacity', 1, '-sq', 'gb', '-poolName',
-                              'unit_test_pool', '-name', 'vol1'),
-                    mock.call('lun', '-list', '-name', 'vol1'),
-                    mock.call('lun', '-destroy', '-name', 'vol1',
-                              '-forceDetach', '-o')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
-
-    def test_create_destroy_volume_withExtraSpec(self):
-        # mock
-        extra_specs = {'storage:provisioning': 'Thin'}
-        volume_types.get = mock.Mock(return_value=extra_specs)
-        # case
+        assert(len(commands) == len(results))
+
+        def fake_command_execute(*args, **kwargv):
+            for i in range(len(commands)):
+                if args == commands[i]:
+                    if isinstance(results[i], list):
+                        if len(results[i]) > 0:
+                            ret = results[i][0]
+                            del results[i][0]
+                            return ret
+                    else:
+                        return results[i]
+            return self.standard_fake_command_execute(*args, **kwargv)
+        return fake_command_execute
+
+    def standard_fake_command_execute(self, *args, **kwargv):
+        standard_commands = [
+            self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+            self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+            self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+            self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
+            self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
+            self.testData.POOL_PROPERTY_CMD]
+
+        standard_results = [
+            self.testData.LUN_PROPERTY('vol1'),
+            self.testData.LUN_PROPERTY('vol2'),
+            self.testData.LUN_PROPERTY('vol2_dest'),
+            self.testData.LUN_PROPERTY('vol-vol1'),
+            self.testData.LUN_PROPERTY('snapshot1'),
+            self.testData.POOL_PROPERTY]
+
+        standard_default = SUCCEED
+        for i in range(len(standard_commands)):
+            if args == standard_commands[i]:
+                return standard_results[i]
+
+        return standard_default
+
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    def test_create_destroy_volume_without_extra_spec(self):
+        fake_cli = self.driverSetup()
         self.driver.create_volume(self.testData.test_volume)
         self.driver.delete_volume(self.testData.test_volume)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-create', '-type', 'NonThin',
-                              '-capacity', 1, '-sq', 'gb', '-poolName',
-                              'unit_test_pool', '-name', 'vol1'),
-                    mock.call('lun', '-list', '-name', 'vol1'),
-                    mock.call('lun', '-destroy', '-name', 'vol1',
-                              '-forceDetach', '-o')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol1', 1,
+                'unit_test_pool',
+                'thick', None)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+            mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
+
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    def test_create_volume_compressed(self):
+        extra_specs = {'storagetype:provisioning': 'compressed'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.NDU_LIST_RESULT]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        #case
+        self.driver.create_volume(self.testData.test_volume_with_type)
+        #verification
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol_with_type', 1,
+                'unit_test_pool',
+                'compressed', None)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                'vol_with_type')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                'vol_with_type')),
+            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
+                1))]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    def test_create_volume_compressed_tiering_highestavailable(self):
+        extra_specs = {'storagetype:provisioning': 'compressed',
+                       'storagetype:tiering': 'HighestAvailable'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.NDU_LIST_RESULT]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        #case
+        self.driver.create_volume(self.testData.test_volume_with_type)
+
+        #verification
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol_with_type', 1,
+                'unit_test_pool',
+                'compressed', 'highestavailable')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                'vol_with_type')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                'vol_with_type')),
+            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
+                1))]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    def test_create_volume_deduplicated(self):
+        extra_specs = {'storagetype:provisioning': 'deduplicated'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.NDU_LIST_RESULT]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        #case
+        self.driver.create_volume(self.testData.test_volume_with_type)
+
+        #verification
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol_with_type', 1,
+                'unit_test_pool',
+                'deduplicated', None))]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    def test_create_volume_tiering_auto(self):
+        extra_specs = {'storagetype:tiering': 'Auto'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.NDU_LIST_RESULT]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        #case
+        self.driver.create_volume(self.testData.test_volume_with_type)
+
+        #verification
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol_with_type', 1,
+                'unit_test_pool',
+                None, 'auto'))]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    def test_create_volume_deduplicated_tiering_auto(self):
+        extra_specs = {'storagetype:tiering': 'Auto',
+                       'storagetype:provisioning': 'Deduplicated'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.NDU_LIST_RESULT]
+        self.driverSetup(commands, results)
+        ex = self.assertRaises(
+            exception.VolumeBackendAPIException,
+            self.driver.create_volume,
+            self.testData.test_volume_with_type)
+        self.assertTrue(
+            re.match(r".*deduplicated and auto tiering can't be both enabled",
+                     ex.msg))
+
+    def test_create_volume_compressed_no_enabler(self):
+        extra_specs = {'storagetype:provisioning': 'Compressed'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   ('No package', 0)]
+        self.driverSetup(commands, results)
+        ex = self.assertRaises(
+            exception.VolumeBackendAPIException,
+            self.driver.create_volume,
+            self.testData.test_volume_with_type)
+        self.assertTrue(
+            re.match(r".*Compression Enabler is not installed",
+                     ex.msg))
+
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    def test_create_compression_volume_on_array_backend(self):
+        """Unit test for create a compression volume on array
+        backend.
+        """
+        #Set up the array backend
+        config = conf.Configuration(None)
+        config.append_config_values = mock.Mock(return_value=0)
+        config.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
+        config.san_ip = '10.0.0.1'
+        config.san_login = 'sysadmin'
+        config.san_password = 'sysadmin'
+        config.default_timeout = 0.0002
+        config.initiator_auto_registration = True
+        config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
+            '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
+        config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
+        self.driver = EMCCLIISCSIDriver(configuration=config)
+        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
+
+        extra_specs = {'storagetype:provisioning': 'Compressed',
+                       'storagetype:pool': 'unit_test_pool'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.NDU_LIST_RESULT]
+        fake_command_execute = self.get_command_execute_simulator(
+            commands, results)
+        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
+        self.driver.cli._client.command_execute = fake_cli
+
+        self.driver.cli.stats['compression_support'] = 'True'
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        #case
+        self.driver.create_volume(self.testData.test_volume_with_type)
+        #verification
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol_with_type', 1,
+                'unit_test_pool',
+                'compressed', None)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                'vol_with_type')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                'vol_with_type')),
+            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
+                1))]
+        fake_cli.assert_has_calls(expect_cmd)
 
     def test_get_volume_stats(self):
-        # mock
-        self.configuration.safe_get = mock.Mock(return_value=0)
-        # case
-        rc = self.driver.get_volume_stats(True)
-        stats = {'volume_backend_name': 'EMCCLIISCSIDriver',
-                 'free_capacity_gb': 1000.0,
-                 'driver_version': '02.00.00', 'total_capacity_gb': 10000.0,
-                 'reserved_percentage': 0, 'vendor_name': 'EMC',
-                 'storage_protocol': 'iSCSI'}
-        self.assertEqual(rc, stats)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-userCap', '-availableCap')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        #expect_result = [POOL_PROPERTY]
+        self.driverSetup()
+        stats = self.driver.get_volume_stats(True)
+        self.assertTrue(stats['driver_version'] is not None,
+                        "dirver_version is not returned")
+        self.assertTrue(
+            stats['free_capacity_gb'] == 1000.6,
+            "free_capacity_gb is not correct")
+        self.assertTrue(
+            stats['reserved_percentage'] == 0,
+            "reserved_percentage is not correct")
+        self.assertTrue(
+            stats['storage_protocol'] == 'iSCSI',
+            "storage_protocol is not correct")
+        self.assertTrue(
+            stats['total_capacity_gb'] == 10000.5,
+            "total_capacity_gb is not correct")
+        self.assertTrue(
+            stats['vendor_name'] == "EMC",
+            "vender name is not correct")
+        self.assertTrue(
+            stats['volume_backend_name'] == "namedbackend",
+            "volume backend name is not correct")
+        self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
+        self.assertTrue(
+            stats['driver_version'] == "04.00.00",
+            "driver version is incorrect.")
+
+    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
+                "CommandLineHelper.create_lun_by_cmd",
+                mock.Mock(return_value=True))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(
+            side_effect=[1, 1]))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
+        "get_lun_id_by_name",
+        mock.Mock(return_value=1))
+    def test_volume_migration_timeout(self):
+        commands = [self.testData.MIGRATION_CMD(),
+                    self.testData.MIGRATION_VERIFY_CMD(1)]
+        FAKE_ERROR_MSG = """\
+A network error occurred while trying to connect: '10.244.213.142'.
+Message : Error occurred because connection refused. \
+Unable to establish a secure connection to the Management Server.
+"""
+        FAKE_ERROR_MSG = FAKE_ERROR_MSG.replace('\n', ' ')
+        FAKE_MIGRATE_PROPETY = """\
+Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
+Source LU ID:  63950
+Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
+Dest LU ID:  136
+Migration Rate:  high
+Current State:  MIGRATED
+Percent Complete:  100
+Time Remaining:  0 second(s)
+"""
+        results = [(FAKE_ERROR_MSG, 255),
+                   [SUCCEED,
+                    (FAKE_MIGRATE_PROPETY, 0),
+                    ('The specified source LUN is not currently migrating',
+                     23)]]
+        fake_cli = self.driverSetup(commands, results)
+        fakehost = {'capabilities': {'location_info':
+                                     "unit_test_pool2|fakeSerial",
+                                     'storage_protocol': 'iSCSI'}}
+        ret = self.driver.migrate_volume(None, self.testData.test_volume,
+                                         fakehost)[0]
+        self.assertTrue(ret)
+        #verification
+        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1),
+                                retry_disable=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
+                "CommandLineHelper.create_lun_by_cmd",
+                mock.Mock(
+                    return_value=True))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(
+            side_effect=[1, 1]))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
+        "get_lun_id_by_name",
+        mock.Mock(return_value=1))
+    def test_volume_migration(self):
+
+        commands = [self.testData.MIGRATION_CMD(),
+                    self.testData.MIGRATION_VERIFY_CMD(1)]
+        FAKE_MIGRATE_PROPETY = """\
+Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
+Source LU ID:  63950
+Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
+Dest LU ID:  136
+Migration Rate:  high
+Current State:  MIGRATED
+Percent Complete:  100
+Time Remaining:  0 second(s)
+"""
+        results = [SUCCEED, [(FAKE_MIGRATE_PROPETY, 0),
+                             ('The specified source LUN is not '
+                              'currently migrating',
+                              23)]]
+        fake_cli = self.driverSetup(commands, results)
+        fakehost = {'capabilities': {'location_info':
+                                     "unit_test_pool2|fakeSerial",
+                                     'storage_protocol': 'iSCSI'}}
+        ret = self.driver.migrate_volume(None, self.testData.test_volume,
+                                         fakehost)[0]
+        self.assertTrue(ret)
+        #verification
+        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
+                                retry_disable=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
+                "CommandLineHelper.create_lun_by_cmd",
+                mock.Mock(
+                    return_value=True))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
+        "get_lun_id_by_name",
+        mock.Mock(return_value=5))
+    def test_volume_migration_02(self):
+
+        commands = [self.testData.MIGRATION_CMD(5, 5),
+                    self.testData.MIGRATION_VERIFY_CMD(5)]
+        FAKE_MIGRATE_PROPETY = """\
+Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
+Source LU ID:  63950
+Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
+Dest LU ID:  136
+Migration Rate:  high
+Current State:  MIGRATED
+Percent Complete:  100
+Time Remaining:  0 second(s)
+"""
+        results = [SUCCEED, [(FAKE_MIGRATE_PROPETY, 0),
+                             ('The specified source LUN is not '
+                              'currently migrating',
+                              23)]]
+        fake_cli = self.driverSetup(commands, results)
+        fakehost = {'capabilities': {'location_info':
+                                     "unit_test_pool2|fakeSerial",
+                                     'storage_protocol': 'iSCSI'}}
+        ret = self.driver.migrate_volume(None, self.testData.test_volume5,
+                                         fakehost)[0]
+        self.assertTrue(ret)
+        #verification
+        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5),
+                                retry_disable=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(5)),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(5))]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
+                "CommandLineHelper.create_lun_by_cmd",
+                mock.Mock(
+                    return_value=True))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(
+            side_effect=[1, 1]))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
+        "get_lun_id_by_name",
+        mock.Mock(return_value=1))
+    def test_volume_migration_failed(self):
+        commands = [self.testData.MIGRATION_CMD()]
+        results = [FAKE_ERROR_RETURN]
+        fake_cli = self.driverSetup(commands, results)
+        fakehost = {'capabilities': {'location_info':
+                                     "unit_test_pool2|fakeSerial",
+                                     'storage_protocol': 'iSCSI'}}
+        ret = self.driver.migrate_volume(None, self.testData.test_volume,
+                                         fakehost)[0]
+        self.assertFalse(ret)
+        #verification
+        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
+                                retry_disable=True)]
+        fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_destroy_volume_snapshot(self):
-        # case
+        fake_cli = self.driverSetup()
+
+        #case
         self.driver.create_snapshot(self.testData.test_snapshot)
         self.driver.delete_snapshot(self.testData.test_snapshot)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-list', '-name', 'vol-vol1'),
-                    mock.call('snap', '-create', '-res', '16', '-name',
-                              'snapshot1', '-allowReadWrite', 'yes'),
-                    mock.call('snap', '-destroy', '-id', 'snapshot1', '-o')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
-
-    @mock.patch.object(
-        EMCCLIISCSIDriver,
-        '_do_iscsi_discovery',
-        return_value=['10.0.0.3:3260,1 '
-                      'iqn.1992-04.com.emc:cx.apm00123907237.a8',
-                      '10.0.0.4:3260,2 '
-                      'iqn.1992-04.com.emc:cx.apm00123907237.b8'])
-    def test_initialize_connection(self, _mock_iscsi_discovery):
-        # case
-        rc = self.driver.initialize_connection(
+
+        #verification
+        expect_cmd = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+                      mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1')),
+                      mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'))]
+
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "cinder.openstack.common.processutils.execute",
+        mock.Mock(
+            return_value=(
+                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
+    @mock.patch("random.shuffle", mock.Mock())
+    def test_initialize_connection(self):
+        # Test for auto registration
+        self.configuration.initiator_auto_registration = True
+        commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+                    self.testData.GETPORT_CMD(),
+                    self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
+        results = [[("No group", 83),
+                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+                   self.testData.ALL_PORTS,
+                   self.testData.PING_OK]
+
+        fake_cli = self.driverSetup(commands, results)
+        connection_info = self.driver.initialize_connection(
             self.testData.test_volume,
             self.testData.connector)
-        connect_info = {'driver_volume_type': 'iscsi', 'data':
-                        {'target_lun': -1, 'volume_id': '1',
-                         'target_iqn': 'iqn.1992-04.com.emc:' +
-                         'cx.apm00123907237.b8',
-                         'target_discovered': True,
-                         'target_portal': '10.0.0.4:3260'}}
-        self.assertEqual(rc, connect_info)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
+
+        self.assertEqual(connection_info,
+                         self.testData.iscsi_connection_info_ro)
+
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-list'),
+                    mock.call(*self.testData.GETPORT_CMD()),
+                    mock.call('storagegroup', '-gname', 'fakehost', '-setpath',
+                              '-hbauid', 'iqn.1993-08.org.debian:01:222',
+                              '-sp', 'A', '-spport', 4, '-spvport', 0,
+                              '-ip', '10.0.0.2', '-host', 'fakehost', '-o'),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
+                              '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call(*self.testData.GETPORT_CMD()),
+                    mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
+                                                          '10.0.0.2'))]
+        fake_cli.assert_has_calls(expected)
+
+        # Test for manaul registration
+        self.configuration.initiator_auto_registration = False
+
+        commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+                    self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
+                    self.testData.GETPORT_CMD(),
+                    self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
+        results = [[("No group", 83),
+                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+                   ('', 0),
+                   self.testData.ALL_PORTS,
+                   self.testData.PING_OK]
+        fake_cli = self.driverSetup(commands, results)
+        connection_info = self.driver.initialize_connection(
+            self.testData.test_volume_rw,
+            self.testData.connector)
+
+        self.assertEqual(connection_info,
+                         self.testData.iscsi_connection_info_rw)
+
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-connecthost',
+                              '-host', 'fakehost', '-gname', 'fakehost', '-o'),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
                     mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('lun', '-list', '-name', 'vol1'),
-                    mock.call('lun', '-list', '-name', 'vol1'),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
+                              '-gname', 'fakehost'),
                     mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('lun', '-list', '-l', '10', '-owner'),
-                    mock.call('storagegroup', '-addhlu', '-o', '-gname',
-                              'fakehost', '-hlu', 1, '-alu', '10'),
-                    mock.call('lun', '-list', '-name', 'vol1'),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
                     mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('lun', '-list', '-l', '10', '-owner'),
-                    mock.call('connection', '-getport', '-sp', 'A')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+                    mock.call('connection', '-getport', '-address', '-vlanid')]
+        fake_cli.assert_has_calls(expected)
 
     def test_terminate_connection(self):
-        # case
+
+        os.path.exists = mock.Mock(return_value=1)
+        self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+        cli_helper = self.driver.cli._client
+        data = {'storage_group_name': "fakehost",
+                'storage_group_uid': "2F:D4:00:00:00:00:00:"
+                "00:00:00:FF:E5:3A:03:FD:6D",
+                'lunmap': {1: 16, 2: 88, 3: 47}}
+        cli_helper.get_storage_group = mock.Mock(
+            return_value=data)
+        lun_info = {'lun_name': "unit_test_lun",
+                    'lun_id': 1,
+                    'pool': "unit_test_pool",
+                    'attached_snapshot': "N/A",
+                    'owner': "A",
+                    'total_capacity_gb': 1.0,
+                    'state': "Ready"}
+        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
+        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
         self.driver.terminate_connection(self.testData.test_volume,
                                          self.testData.connector)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('lun', '-list', '-name', 'vol1'),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('lun', '-list', '-l', '10', '-owner')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
+            16, self.testData.connector["host"])
+#         expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+#                     mock.call('lun', '-list', '-name', 'vol1'),
+#                     mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+#                     mock.call('lun', '-list', '-l', '10', '-owner')]
 
-    def test_create_volume_failed(self):
-        # case
-        self.assertRaises(exception.VolumeBackendAPIException,
+    def test_create_volume_cli_failed(self):
+        commands = [self.testData.LUN_CREATION_CMD(
+            'failed_vol1', 1, 'unit_test_pool', None, None)]
+        results = [FAKE_ERROR_RETURN]
+        fake_cli = self.driverSetup(commands, results)
+
+        self.assertRaises(EMCVnxCLICmdError,
                           self.driver.create_volume,
                           self.testData.test_failed_volume)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-create', '-type', 'NonThin',
-                              '-capacity', 1, '-sq', 'gb', '-poolName',
-                              'unit_test_pool', '-name', 'failed_vol1')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
+            'failed_vol1', 1, 'unit_test_pool', None, None))]
+        fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_volume_snapshot_failed(self):
-        # case
-        self.assertRaises(exception.VolumeBackendAPIException,
+        commands = [self.testData.SNAP_CREATE_CMD('failed_snapshot')]
+        results = [FAKE_ERROR_RETURN]
+        fake_cli = self.driverSetup(commands, results)
+
+        #case
+        self.assertRaises(EMCVnxCLICmdError,
                           self.driver.create_snapshot,
                           self.testData.test_failed_snapshot)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-list', '-name', 'vol-vol1'),
-                    mock.call('snap', '-create', '-res', '16', '-name',
-                              'failed_snapshot', '-allowReadWrite', 'yes')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+
+        #verification
+        expect_cmd = [
+            mock.call(
+                *self.testData.LUN_PROPERTY_ALL_CMD(
+                    'vol-vol1')),
+            mock.call(
+                *self.testData.SNAP_CREATE_CMD(
+                    'failed_snapshot'))]
+
+        fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_volume_from_snapshot(self):
-        # case
-        self.driver.create_volume_from_snapshot(self.testData.test_volfromsnap,
+        #set up
+        cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
+        output_smp = ("""LOGICAL UNIT NUMBER 1
+                     Name:  vol2
+                     Attached Snapshot:  N/A""", 0)
+        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+        output_dest = self.testData.LUN_PROPERTY("vol2_dest")
+        cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
+        output_migrate = ("", 0)
+        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
+        output_migrate_verify = (r'The specified source LUN '
+                                 'is not currently migrating', 23)
+        commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
+        results = [output_smp, output_dest, output_migrate,
+                   output_migrate_verify]
+        fake_cli = self.driverSetup(commands, results)
+
+        self.driver.create_volume_from_snapshot(self.testData.test_volume2,
                                                 self.testData.test_snapshot)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-create', '-type', 'NonThin',
-                              '-capacity', 1, '-sq', 'gb', '-poolName',
-                              'unit_test_pool', '-name', 'volfromsnapdest'),
-                    mock.call('lun', '-create', '-type', 'Snap',
-                              '-primaryLunName', 'vol-vol1', '-name',
-                              'volfromsnap'),
-                    mock.call('lun', '-attach', '-name', 'volfromsnap',
-                              '-snapName', 'snapshot1'),
-                    mock.call('lun', '-list', '-name', 'volfromsnap'),
-                    mock.call('lun', '-list', '-name', 'volfromsnapdest'),
-                    mock.call('migrate', '-start', '-source', '10', '-dest',
-                              '101', '-rate', 'ASAP', '-o'),
-                    mock.call('lun', '-list', '-name', 'volfromsnap',
-                              '-attachedSnapshot')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expect_cmd = [
+            mock.call(
+                *self.testData.SNAP_MP_CREATE_CMD(
+                    name='vol2', source='vol1')),
+            mock.call(
+                *self.testData.SNAP_ATTACH_CMD(
+                    name='vol2', snapName='snapshot1')),
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol2_dest', 1, 'unit_test_pool', None, None)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.MIGRATION_CMD(1, 1),
+                      retry_disable=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
+
+            mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot')]
+        fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_volume_from_snapshot_sync_failed(self):
-        # case
+
+        output_smp = ("""LOGICAL UNIT NUMBER 1
+                    Name:  vol1
+                    Attached Snapshot:  fakesnap""", 0)
+        cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
+        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+        output_dest = self.testData.LUN_PROPERTY("vol2_dest")
+        cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
+        output_migrate = ("", 0)
+        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
+        output_migrate_verify = (r'The specified source LUN '
+                                 'is not currently migrating', 23)
+        commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
+        results = [output_smp, output_dest, output_migrate,
+                   output_migrate_verify]
+        fake_cli = self.driverSetup(commands, results)
+
         self.assertRaises(exception.VolumeBackendAPIException,
                           self.driver.create_volume_from_snapshot,
-                          self.testData.test_volfromsnap_e,
+                          self.testData.test_volume2,
                           self.testData.test_snapshot)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-create', '-type', 'NonThin',
-                              '-capacity', 1, '-sq', 'gb', '-poolName',
-                              'unit_test_pool', '-name', 'volfromsnap_edest'),
-                    mock.call('lun', '-create', '-type', 'Snap',
-                              '-primaryLunName', 'vol-vol1', '-name',
-                              'volfromsnap_e'),
-                    mock.call('lun', '-attach', '-name', 'volfromsnap_e',
-                              '-snapName', 'snapshot1'),
-                    mock.call('lun', '-list', '-name', 'volfromsnap_e'),
-                    mock.call('lun', '-list', '-name', 'volfromsnap_edest'),
-                    mock.call('migrate', '-start', '-source', '20', '-dest',
-                              '201', '-rate', 'ASAP', '-o'),
-                    mock.call('lun', '-list', '-name', 'volfromsnap_e',
-                              '-attachedSnapshot')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expect_cmd = [
+            mock.call(
+                *self.testData.SNAP_MP_CREATE_CMD(
+                    name='vol2', source='vol1')),
+            mock.call(
+                *self.testData.SNAP_ATTACH_CMD(
+                    name='vol2', snapName='snapshot1')),
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol2_dest', 1, 'unit_test_pool', None, None)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.MIGRATION_CMD(1, 1),
+                      retry_disable=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+        fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_cloned_volume(self):
-        # case
-        self.driver.create_cloned_volume(self.testData.test_clone,
-                                         self.testData.test_clone_src)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-list', '-name', 'clone1src'),
-                    mock.call('snap', '-create', '-res', '22', '-name',
-                              'clone1src-temp-snapshot', '-allowReadWrite',
-                              'yes'),
-                    mock.call('lun', '-create', '-type', 'NonThin',
-                              '-capacity', 1, '-sq', 'gb', '-poolName',
-                              'unit_test_pool', '-name', 'clone1dest'),
-                    mock.call('lun', '-create', '-type', 'Snap',
-                              '-primaryLunName', 'clone1src', '-name',
-                              'clone1'),
-                    mock.call('lun', '-attach', '-name', 'clone1',
-                              '-snapName', 'clone1src-temp-snapshot'),
-                    mock.call('lun', '-list', '-name', 'clone1'),
-                    mock.call('lun', '-list', '-name', 'clone1dest'),
-                    mock.call('migrate', '-start', '-source', '30', '-dest',
-                              '301', '-rate', 'ASAP', '-o'),
-                    mock.call('lun', '-list', '-name', 'clone1',
-                              '-attachedSnapshot'),
-                    mock.call('snap', '-destroy', '-id',
-                              'clone1src-temp-snapshot', '-o')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
-
-    def test_create_volume_clone_sync_failed(self):
-        # case
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_cloned_volume,
-                          self.testData.test_clone_e,
-                          self.testData.test_clone_src)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-list', '-name', 'clone1src'),
-                    mock.call('snap', '-create', '-res', '22', '-name',
-                              'clone1src-temp-snapshot', '-allowReadWrite',
-                              'yes'),
-                    mock.call('lun', '-create', '-type', 'NonThin',
-                              '-capacity', 1, '-sq', 'gb', '-poolName',
-                              'unit_test_pool', '-name', 'clone1_edest'),
-                    mock.call('lun', '-create', '-type', 'Snap',
-                              '-primaryLunName', 'clone1src', '-name',
-                              'clone1_e'),
-                    mock.call('lun', '-attach', '-name', 'clone1_e',
-                              '-snapName', 'clone1src-temp-snapshot'),
-                    mock.call('lun', '-list', '-name', 'clone1_e'),
-                    mock.call('lun', '-list', '-name', 'clone1_edest'),
-                    mock.call('migrate', '-start', '-source', '40', '-dest',
-                              '401', '-rate', 'ASAP', '-o'),
-                    mock.call('lun', '-list', '-name', 'clone1_e',
-                              '-attachedSnapshot')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot')
+        output_smp = ("""LOGICAL UNIT NUMBER 1
+                     Name:  vol1
+                     Attached Snapshot:  N/A""", 0)
+        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
+        output_dest = self.testData.LUN_PROPERTY("vol1_dest")
+        cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
+        output_migrate = ("", 0)
+        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
+        output_migrate_verify = (r'The specified source LUN '
+                                 'is not currently migrating', 23)
+        commands = [cmd_smp, cmd_dest, cmd_migrate,
+                    cmd_migrate_verify,
+                    self.testData.NDU_LIST_CMD]
+        results = [output_smp, output_dest, output_migrate,
+                   output_migrate_verify,
+                   self.testData.NDU_LIST_RESULT]
+        fake_cli = self.driverSetup(commands, results)
+
+        self.driver.create_cloned_volume(self.testData.test_volume,
+                                         self.testData.test_snapshot)
+        tmp_snap = 'tmp-snap-' + self.testData.test_volume['id']
+        expect_cmd = [
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('snapshot1')),
+            mock.call(
+                *self.testData.SNAP_CREATE_CMD(tmp_snap)),
+            mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
+                                                        source='snapshot1')),
+            mock.call(
+                *self.testData.SNAP_ATTACH_CMD(
+                    name='vol1', snapName=tmp_snap)),
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol1_dest', 1, 'unit_test_pool', None, None)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
+            mock.call(*self.testData.MIGRATION_CMD(1, 1),
+                      retry_disable=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
+            mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+            mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap))]
+        fake_cli.assert_has_calls(expect_cmd)
 
     def test_delete_volume_failed(self):
-        # case
-        self.assertRaises(exception.VolumeBackendAPIException,
+        commands = [self.testData.LUN_DELETE_CMD('failed_vol1')]
+        results = [FAKE_ERROR_RETURN]
+        fake_cli = self.driverSetup(commands, results)
+
+        self.assertRaises(EMCVnxCLICmdError,
                           self.driver.delete_volume,
                           self.testData.test_failed_volume)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-destroy', '-name', 'failed_vol1',
-                              '-forceDetach', '-o')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))]
+        fake_cli.assert_has_calls(expected)
 
     def test_extend_volume(self):
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
+        results = [self.testData.LUN_PROPERTY('vol1', size=2)]
+        fake_cli = self.driverSetup(commands, results)
+
         # case
         self.driver.extend_volume(self.testData.test_volume, 2)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-expand', '-name', 'vol1', '-capacity',
-                              2, '-sq', 'gb', '-o', '-ignoreThresholds')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2)),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                        'vol1'))]
+        fake_cli.assert_has_calls(expected)
 
     def test_extend_volume_has_snapshot(self):
-        # case
-        self.assertRaises(exception.VolumeBackendAPIException,
+        commands = [self.testData.LUN_EXTEND_CMD('failed_vol1', 2)]
+        results = [FAKE_ERROR_RETURN]
+        fake_cli = self.driverSetup(commands, results)
+
+        self.assertRaises(EMCVnxCLICmdError,
                           self.driver.extend_volume,
                           self.testData.test_failed_volume,
                           2)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-expand', '-name', 'failed_vol1',
-                              '-capacity', 2, '-sq', 'gb', '-o',
-                              '-ignoreThresholds')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2))]
+        fake_cli.assert_has_calls(expected)
 
     def test_extend_volume_failed(self):
-        # case
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1')]
+        results = [self.testData.LUN_PROPERTY('failed_vol1', size=2)]
+        fake_cli = self.driverSetup(commands, results)
+
         self.assertRaises(exception.VolumeBackendAPIException,
                           self.driver.extend_volume,
                           self.testData.test_failed_volume,
                           3)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-expand', '-name', 'failed_vol1',
-                              '-capacity', 3, '-sq', 'gb', '-o',
-                              '-ignoreThresholds')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expected = [
+            mock.call(
+                *self.testData.LUN_EXTEND_CMD('failed_vol1', 3)),
+            mock.call(
+                *self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'))]
+        fake_cli.assert_has_calls(expected)
 
     def test_create_remove_export(self):
-        # case
+        fake_cli = self.driverSetup()
+
         self.driver.create_export(None, self.testData.test_volume)
         self.driver.remove_export(None, self.testData.test_volume)
-        expected = [mock.call('storagepool', '-list', '-name',
-                              'unit_test_pool', '-state'),
-                    mock.call('lun', '-list', '-name', 'vol1')]
-        EMCVnxCli._cli_execute.assert_has_calls(expected)
+        expected = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'))]
+        fake_cli.assert_has_calls(expected)
+
+    def test_manage_existing(self):
+        """Unit test for the manage_existing function
+        of driver
+        """
+        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+                       '-state', '-userCap', '-owner',
+                       '-attachedSnapshot', '-poolName')
+        lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
+                          '-newName', 'vol_with_type', '-o')
+        commands = [get_lun_cmd, lun_rename_cmd]
+
+        results = [self.testData.LUN_PROPERTY('lun_name'), SUCCEED]
+        self.configuration.storage_vnx_pool_name = \
+            self.testData.test_pool_name
+        self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
+        #mock the command executor
+        fake_command_execute = self.get_command_execute_simulator(
+            commands, results)
+        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
+        self.driver.cli._client.command_execute = fake_cli
+        self.driver.manage_existing(
+            self.testData.test_volume_with_type,
+            self.testData.test_existing_ref)
+        expected = [mock.call(*get_lun_cmd),
+                    mock.call(*lun_rename_cmd)]
+        fake_cli.assert_has_calls(expected)
+
+    def test_manage_existing_lun_in_another_pool(self):
+        """Unit test for the manage_existing function
+        of driver with a invalid pool backend.
+        An exception would occur in this case
+        """
+        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+                       '-state', '-userCap', '-owner',
+                       '-attachedSnapshot', '-poolName')
+        commands = [get_lun_cmd]
+
+        results = [self.testData.LUN_PROPERTY('lun_name')]
+        invalid_pool_name = "fake_pool"
+        self.configuration.storage_vnx_pool_name = invalid_pool_name
+        self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
+        #mock the command executor
+        fake_command_execute = self.get_command_execute_simulator(
+            commands, results)
+        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
+        self.driver.cli._client.command_execute = fake_cli
+        ex = self.assertRaises(
+            exception.ManageExistingInvalidReference,
+            self.driver.manage_existing,
+            self.testData.test_volume_with_type,
+            self.testData.test_existing_ref)
+        self.assertTrue(
+            re.match(r'.*not in a manageable pool backend by cinder',
+                     ex.msg))
+        expected = [mock.call(*get_lun_cmd)]
+        fake_cli.assert_has_calls(expected)
+
+    def test_manage_existing_get_size(self):
+        """Unit test for the manage_existing_get_size
+        function of driver.
+        """
+        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+                       '-state', '-status', '-opDetails', '-userCap', '-owner',
+                       '-attachedSnapshot')
+        test_size = 2
+        commands = [get_lun_cmd]
+        results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
+
+        self.configuration.storage_vnx_pool_name = \
+            self.testData.test_pool_name
+        self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
+
+        #mock the command executor
+        fake_command_execute = self.get_command_execute_simulator(
+            commands, results)
+        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
+        self.driver.cli._client.command_execute = fake_cli
+
+        get_size = self.driver.manage_existing_get_size(
+            self.testData.test_volume_with_type,
+            self.testData.test_existing_ref)
+        expected = [mock.call(*get_lun_cmd)]
+        assert get_size == test_size
+        fake_cli.assert_has_calls(expected)
+        #Test the function with invalid reference.
+        invaild_ref = {'fake': 'fake_ref'}
+        self.assertRaises(exception.ManageExistingInvalidReference,
+                          self.driver.manage_existing_get_size,
+                          self.testData.test_volume_with_type,
+                          invaild_ref)
+
+    def test_manage_existing_with_array_backend(self):
+        """Unit test for the manage_existing with the
+        array backend which is not support the manage
+        existing functinality.
+        """
+        #Set up the array backend
+        config = conf.Configuration(None)
+        config.append_config_values = mock.Mock(return_value=0)
+        config.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
+        config.san_ip = '10.0.0.1'
+        config.san_login = 'sysadmin'
+        config.san_password = 'sysadmin'
+        config.default_timeout = 0.0002
+        config.initiator_auto_registration = True
+        config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
+            '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
+        config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
+        self.driver = EMCCLIISCSIDriver(configuration=config)
+        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
+        #mock the command executor
+        lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
+                          '-newName', 'vol_with_type', '-o')
+        commands = [lun_rename_cmd]
+        results = [SUCCEED]
+        #mock the command executor
+        fake_command_execute = self.get_command_execute_simulator(
+            commands, results)
+        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
+        self.driver.cli._client.command_execute = fake_cli
+        self.driver.manage_existing(
+            self.testData.test_volume_with_type,
+            self.testData.test_existing_ref)
+        expected = [mock.call(*lun_rename_cmd)]
+        fake_cli.assert_has_calls(expected)
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
+        "get_lun_id_by_name",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    @mock.patch(
+        "time.time",
+        mock.Mock(return_value=123456))
+    def test_retype_compressed_to_deduplicated(self):
+        """Unit test for retype compressed to deduplicated."""
+        diff_data = {'encryption': {}, 'qos_specs': {},
+                     'extra_specs':
+                     {'storagetype:provsioning': ('compressed',
+                                                  'deduplicated')}}
+
+        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+                         'deleted': False,
+                         'extra_specs': {'storagetype:provisioning':
+                                         'deduplicated'},
+                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+                          'capabilities':
+                          {'location_info': 'unit_test_pool|FNM00124500890',
+                           'volume_backend_name': 'pool_backend_1',
+                           'storage_protocol': 'iSCSI'}}
+
+        commands = [self.testData.NDU_LIST_CMD,
+                    ('snap', '-list', '-res', 1)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   ('No snap', 1023)]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        extra_specs = {'storagetype:provisioning': 'compressed'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+        self.driver.retype(None, self.testData.test_volume3,
+                           new_type_data,
+                           diff_data,
+                           host_test_data)
+        expect_cmd = [
+            mock.call('snap', '-list', '-res', 1),
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456')),
+            mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
+        "get_lun_by_name",
+        mock.Mock(return_value={'lun_id': 1}))
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    @mock.patch(
+        "time.time",
+        mock.Mock(return_value=123456))
+    def test_retype_thin_to_compressed_auto(self):
+        """Unit test for retype thin to compressed and auto tiering."""
+        diff_data = {'encryption': {}, 'qos_specs': {},
+                     'extra_specs':
+                     {'storagetype:provsioning': ('thin',
+                                                  'compressed'),
+                      'storagetype:tiering': (None, 'auto')}}
+
+        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+                         'deleted': False,
+                         'extra_specs': {'storagetype:provisioning':
+                                         'compressed',
+                                         'storagetype:tiering': 'auto'},
+                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+                          'capabilities':
+                          {'location_info': 'unit_test_pool|FNM00124500890',
+                           'volume_backend_name': 'pool_backend_1',
+                           'storage_protocol': 'iSCSI'}}
+
+        commands = [self.testData.NDU_LIST_CMD,
+                    ('snap', '-list', '-res', 1)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   ('No snap', 1023)]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        extra_specs = {'storagetype:provisioning': 'thin'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+        self.driver.retype(None, self.testData.test_volume3,
+                           new_type_data,
+                           diff_data,
+                           host_test_data)
+        expect_cmd = [
+            mock.call('snap', '-list', '-res', 1),
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol3-123456', 2, 'unit_test_pool', 'compressed', 'auto')),
+            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
+            mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
+        "get_lun_by_name",
+        mock.Mock(return_value={'lun_id': 1}))
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    @mock.patch(
+        "time.time",
+        mock.Mock(return_value=123456))
+    def test_retype_pool_changed_dedup_to_compressed_auto(self):
+        """Unit test for retype dedup to compressed and auto tiering
+        and pool changed
+        """
+        diff_data = {'encryption': {}, 'qos_specs': {},
+                     'extra_specs':
+                     {'storagetype:provsioning': ('deduplicated',
+                                                  'compressed'),
+                      'storagetype:tiering': (None, 'auto'),
+                      'storagetype:pool': ('unit_test_pool',
+                                           'unit_test_pool2')}}
+
+        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+                         'deleted': False,
+                         'extra_specs': {'storagetype:provisioning':
+                                             'compressed',
+                                         'storagetype:tiering': 'auto',
+                                         'storagetype:pool':
+                                             'unit_test_pool2'},
+                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+                          'capabilities':
+                          {'location_info': 'unit_test_pool2|FNM00124500890',
+                           'volume_backend_name': 'pool_backend_1',
+                           'storage_protocol': 'iSCSI'}}
+
+        commands = [self.testData.NDU_LIST_CMD,
+                    ('snap', '-list', '-res', 1)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   ('No snap', 1023)]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        extra_specs = {'storagetype:provisioning': 'deduplicated',
+                       'storagetype:pool': 'unit_test_pool'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+        self.driver.retype(None, self.testData.test_volume3,
+                           new_type_data,
+                           diff_data,
+                           host_test_data)
+        expect_cmd = [
+            mock.call('snap', '-list', '-res', 1),
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol3-123456', 2, 'unit_test_pool2', 'compressed', 'auto')),
+            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
+            mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
+        "get_lun_by_name",
+        mock.Mock(return_value={'lun_id': 1}))
+    def test_retype_compressed_auto_to_compressed_nomovement(self):
+        """Unit test for retype only tiering changed."""
+        diff_data = {'encryption': {}, 'qos_specs': {},
+                     'extra_specs':
+                     {'storagetype:tiering': ('auto', 'nomovement')}}
+
+        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+                         'deleted': False,
+                         'extra_specs': {'storagetype:provisioning':
+                                             'compressed',
+                                         'storagetype:tiering': 'nomovement',
+                                         'storagetype:pool':
+                                             'unit_test_pool'},
+                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+                          'capabilities':
+                          {'location_info': 'unit_test_pool|FNM00124500890',
+                           'volume_backend_name': 'pool_backend_1',
+                           'storage_protocol': 'iSCSI'}}
+
+        commands = [self.testData.NDU_LIST_CMD,
+                    ('snap', '-list', '-res', 1)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   ('No snap', 1023)]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        extra_specs = {'storagetype:provisioning': 'compressed',
+                       'storagetype:pool': 'unit_test_pool',
+                       'storagetype:tiering': 'auto'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+        self.driver.retype(None, self.testData.test_volume3,
+                           new_type_data,
+                           diff_data,
+                           host_test_data)
+        expect_cmd = [
+            mock.call('lun', '-modify', '-name', 'vol3', '-o', '-initialTier',
+                      'optimizePool', '-tieringPolicy', 'noMovement')]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
+        "get_lun_by_name",
+        mock.Mock(return_value={'lun_id': 1}))
+    def test_retype_compressed_to_thin_cross_array(self):
+        """Unit test for retype cross array."""
+        diff_data = {'encryption': {}, 'qos_specs': {},
+                     'extra_specs':
+                     {'storagetype:provsioning': ('compressed', 'thin')}}
+
+        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+                         'deleted': False,
+                         'extra_specs': {'storagetype:provisioning': 'thin',
+                                         'storagetype:pool':
+                                             'unit_test_pool'},
+                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+        host_test_data = {'host': 'ubuntu-server12@pool_backend_2',
+                          'capabilities':
+                          {'location_info': 'unit_test_pool|FNM00124500891',
+                           'volume_backend_name': 'pool_backend_2',
+                           'storage_protocol': 'iSCSI'}}
+
+        commands = [self.testData.NDU_LIST_CMD,
+                    ('snap', '-list', '-res', 1)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   ('No snap', 1023)]
+        self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        extra_specs = {'storagetype:provisioning': 'thin',
+                       'storagetype:pool': 'unit_test_pool'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+        retyped = self.driver.retype(None, self.testData.test_volume3,
+                                     new_type_data, diff_data,
+                                     host_test_data)
+        self.assertFalse(retyped,
+                         "Retype should failed due to"
+                         " different protocol or array")
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
+        "get_lun_by_name",
+        mock.Mock(return_value={'lun_id': 1}))
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    @mock.patch(
+        "time.time",
+        mock.Mock(return_value=123456))
+    def test_retype_thin_auto_to_dedup_diff_procotol(self):
+        """Unit test for retype different procotol."""
+        diff_data = {'encryption': {}, 'qos_specs': {},
+                     'extra_specs':
+                     {'storagetype:provsioning': ('thin', 'deduplicated'),
+                      'storagetype:tiering': ('auto', None)}}
+
+        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+                         'deleted': False,
+                         'extra_specs': {'storagetype:provisioning':
+                                             'deduplicated',
+                                         'storagetype:pool':
+                                             'unit_test_pool'},
+                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+        host_test_data = {'host': 'ubuntu-server12@pool_backend_2',
+                          'capabilities':
+                          {'location_info': 'unit_test_pool|FNM00124500890',
+                           'volume_backend_name': 'pool_backend_2',
+                           'storage_protocol': 'FC'}}
+
+        commands = [self.testData.NDU_LIST_CMD,
+                    ('snap', '-list', '-res', 1)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   ('No snap', 1023)]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        extra_specs = {'storagetype:provisioning': 'thin',
+                       'storagetype:tiering': 'auto',
+                       'storagetype:pool': 'unit_test_pool'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        self.driver.retype(None, self.testData.test_volume3,
+                           new_type_data,
+                           diff_data,
+                           host_test_data)
+        expect_cmd = [
+            mock.call('snap', '-list', '-res', 1),
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
+            mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
+        "get_lun_by_name",
+        mock.Mock(return_value={'lun_id': 1}))
+    def test_retype_thin_auto_has_snap_to_thick_highestavailable(self):
+        """Unit test for retype volume has snap when need migration."""
+        diff_data = {'encryption': {}, 'qos_specs': {},
+                     'extra_specs':
+                     {'storagetype:provsioning': ('thin', None),
+                      'storagetype:tiering': ('auto', 'highestAvailable')}}
+
+        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+                         'deleted': False,
+                         'extra_specs': {'storagetype:tiering':
+                                             'highestAvailable',
+                                         'storagetype:pool':
+                                             'unit_test_pool'},
+                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+                          'capabilities':
+                          {'location_info': 'unit_test_pool|FNM00124500890',
+                           'volume_backend_name': 'pool_backend_1',
+                           'storage_protocol': 'iSCSI'}}
+
+        commands = [self.testData.NDU_LIST_CMD,
+                    ('snap', '-list', '-res', 1)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   ('Has snap', 0)]
+        self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        extra_specs = {'storagetype:provisioning': 'thin',
+                       'storagetype:tiering': 'auto',
+                       'storagetype:pool': 'unit_test_pool'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        retyped = self.driver.retype(None, self.testData.test_volume3,
+                                     new_type_data,
+                                     diff_data,
+                                     host_test_data)
+        self.assertFalse(retyped,
+                         "Retype should failed due to"
+                         " different protocol or array")
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(return_value=1))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
+        "get_lun_by_name",
+        mock.Mock(return_value={'lun_id': 1}))
+    def test_retype_thin_auto_to_thin_auto(self):
+        """Unit test for retype volume which has no change."""
+        diff_data = {'encryption': {}, 'qos_specs': {},
+                     'extra_specs': {}}
+
+        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
+                         'deleted': False,
+                         'extra_specs': {'storagetype:tiering':
+                                             'auto',
+                                         'storagetype:provisioning':
+                                             'thin'},
+                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
+
+        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+                          'capabilities':
+                          {'location_info': 'unit_test_pool|FNM00124500890',
+                           'volume_backend_name': 'pool_backend_1',
+                           'storage_protocol': 'iSCSI'}}
+
+        commands = [self.testData.NDU_LIST_CMD]
+        results = [self.testData.NDU_LIST_RESULT]
+        self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        CommandLineHelper.get_array_serial = mock.Mock(
+            return_value={'array_serial': "FNM00124500890"})
+
+        extra_specs = {'storagetype:provisioning': 'thin',
+                       'storagetype:tiering': 'auto',
+                       'storagetype:pool': 'unit_test_pool'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+        self.driver.retype(None, self.testData.test_volume3,
+                           new_type_data,
+                           diff_data,
+                           host_test_data)
+
+    def test_create_volume_with_fastcache(self):
+        '''enable fastcache when creating volume.'''
+        extra_specs = {'fast_cache_enabled': 'True'}
+        volume_types.get_volume_type_extra_specs = \
+            mock.Mock(return_value=extra_specs)
+
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD,
+                    self.testData.CHECK_FASTCACHE_CMD(
+                        self.testData.test_pool_name)]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   SUCCEED,
+                   ('FAST Cache:  Enabled', 0)]
+        fake_cli = self.driverSetup(commands, results)
+
+        lun_info = {'lun_name': "vol_with_type",
+                    'lun_id': 1,
+                    'pool': "unit_test_pool",
+                    'attached_snapshot': "N/A",
+                    'owner': "A",
+                    'total_capacity_gb': 1.0,
+                    'state': "Ready",
+                    'status': 'OK(0x0)',
+                    'operation': 'None'
+                    }
+
+        self.configuration.storage_vnx_pool_name = \
+            self.testData.test_pool_name
+        self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
+
+        cli_helper = self.driver.cli._client
+        cli_helper.command_execute = fake_cli
+        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
+        cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
+        self.driver.update_volume_stats()
+        self.driver.create_volume(self.testData.test_volume_with_type)
+        self.assertEqual(self.driver.cli.stats['fast_cache_enabled'], 'True')
+        expect_cmd = [
+            mock.call('storagepool', '-list', '-name',
+                      'Pool_02_SASFLASH', '-userCap', '-availableCap'),
+            mock.call('-np', 'storagepool', '-list', '-name',
+                      'Pool_02_SASFLASH', '-fastcache'),
+            mock.call('lun', '-create', '-capacity',
+                      1, '-sq', 'gb', '-poolName', 'Pool_02_SASFLASH',
+                      '-name', 'vol_with_type', '-type', 'NonThin')
+        ]
+
+        fake_cli.assert_has_calls(expect_cmd)
+
+    def test_get_lun_id_provider_location_exists(self):
+        '''test function get_lun_id.'''
+        self.driverSetup()
+        volume_01 = {
+            'name': 'vol_01',
+            'size': 1,
+            'volume_name': 'vol_01',
+            'id': '1',
+            'name_id': '1',
+            'provider_location': 'system^FNM11111|type^lun|lun_id^1',
+            'project_id': 'project',
+            'display_name': 'vol_01',
+            'display_description': 'test volume',
+            'volume_type_id': None,
+            'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]}
+        self.assertEqual(self.driver.cli.get_lun_id(volume_01), 1)
+
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
+        "get_lun_by_name",
+        mock.Mock(return_value={'lun_id': 2}))
+    def test_get_lun_id_provider_location_has_no_lun_id(self):
+        '''test function get_lun_id.'''
+        self.driverSetup()
+        volume_02 = {
+            'name': 'vol_02',
+            'size': 1,
+            'volume_name': 'vol_02',
+            'id': '2',
+            'provider_location': 'system^FNM11111|type^lun|',
+            'project_id': 'project',
+            'display_name': 'vol_02',
+            'display_description': 'test volume',
+            'volume_type_id': None,
+            'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]}
+        self.assertEqual(self.driver.cli.get_lun_id(volume_02), 2)
+
+    def succeed_fake_command_execute(self, *command, **kwargv):
+        return SUCCEED
+
+    def fake_get_pool_properties(self, filter_option, properties=None):
+        pool_info = {'pool_name': "unit_test_pool0",
+                     'total_capacity_gb': 1000.0,
+                     'free_capacity_gb': 1000.0
+                     }
+        return pool_info
+
+    def fake_get_lun_properties(self, filter_option, properties=None):
+        lun_info = {'lun_name': "vol1",
+                    'lun_id': 1,
+                    'pool': "unit_test_pool",
+                    'attached_snapshot': "N/A",
+                    'owner': "A",
+                    'total_capacity_gb': 1.0,
+                    'state': "Ready"}
+        return lun_info
+
+    def fake_safe_get(self, value):
+        if value == "storage_vnx_pool_name":
+            return "unit_test_pool"
+        elif 'volume_backend_name' == value:
+            return "namedbackend"
+        else:
+            return None
+
+
+class EMCVNXCLIDriverFCTestCase(test.TestCase):
+
+    def setUp(self):
+        super(EMCVNXCLIDriverFCTestCase, self).setUp()
+
+        self.stubs.Set(CommandLineHelper, 'command_execute',
+                       self.succeed_fake_command_execute)
+        self.stubs.Set(CommandLineHelper, 'get_array_serial',
+                       mock.Mock(return_value={'array_serial':
+                                               "fakeSerial"}))
+        self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
+
+        self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
+        self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
+        self.stubs.Set(emc_vnx_cli, 'INTERVAL_60_SEC', 0.01)
+
+        self.configuration = conf.Configuration(None)
+        self.configuration.append_config_values = mock.Mock(return_value=0)
+        self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
+        self.configuration.san_ip = '10.0.0.1'
+        self.configuration.storage_vnx_pool_name = 'unit_test_pool'
+        self.configuration.san_login = 'sysadmin'
+        self.configuration.san_password = 'sysadmin'
+        #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
+        self.configuration.default_timeout = 0.0002
+        self.configuration.initiator_auto_registration = True
+        self.configuration.zoning_mode = None
+        self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
+        self.testData = EMCVNXCLIDriverTestData()
+        self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
+            '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
+
+    def tearDown(self):
+        super(EMCVNXCLIDriverFCTestCase, self).tearDown()
+
+    def driverSetup(self, commands=tuple(), results=tuple()):
+        self.driver = EMCCLIFCDriver(configuration=self.configuration)
+        fake_command_execute = self.get_command_execute_simulator(
+            commands, results)
+        fake_cli = mock.Mock(side_effect=fake_command_execute)
+        self.driver.cli._client.command_execute = fake_cli
+        return fake_cli
+
+    def get_command_execute_simulator(self, commands=tuple(),
+                                      results=tuple()):
+
+        assert(len(commands) == len(results))
+
+        def fake_command_execute(*args, **kwargv):
+            for i in range(len(commands)):
+                if args == commands[i]:
+                    if isinstance(results[i], list):
+                        if len(results[i]) > 0:
+                            ret = results[i][0]
+                            del results[i][0]
+                            return ret
+                    else:
+                        return results[i]
+            return self.standard_fake_command_execute(*args, **kwargv)
+        return fake_command_execute
+
+    def standard_fake_command_execute(self, *args, **kwargv):
+        standard_commands = [
+            self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+            self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+            self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
+            self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
+            self.testData.POOL_PROPERTY_CMD]
+
+        standard_results = [
+            self.testData.LUN_PROPERTY('vol1'),
+            self.testData.LUN_PROPERTY('vol2'),
+            self.testData.LUN_PROPERTY('vol-vol1'),
+            self.testData.LUN_PROPERTY('snapshot1'),
+            self.testData.POOL_PROPERTY]
+
+        standard_default = SUCCEED
+        for i in range(len(standard_commands)):
+            if args == standard_commands[i]:
+                return standard_results[i]
+
+        return standard_default
+
+    def succeed_fake_command_execute(self, *command, **kwargv):
+        return SUCCEED
+
+    def fake_get_pool_properties(self, filter_option, properties=None):
+        pool_info = {'pool_name': "unit_test_pool0",
+                     'total_capacity_gb': 1000.0,
+                     'free_capacity_gb': 1000.0
+                     }
+        return pool_info
+
+    def fake_get_lun_properties(self, filter_option, properties=None):
+        lun_info = {'lun_name': "vol1",
+                    'lun_id': 1,
+                    'pool': "unit_test_pool",
+                    'attached_snapshot': "N/A",
+                    'owner': "A",
+                    'total_capacity_gb': 1.0,
+                    'state': "Ready"}
+        return lun_info
+
+    def fake_safe_get(self, value):
+        if value == "storage_vnx_pool_name":
+            return "unit_test_pool"
+        elif 'volume_backend_name' == value:
+            return "namedbackend"
+        else:
+            return None
+
+    @mock.patch(
+        "cinder.openstack.common.processutils.execute",
+        mock.Mock(
+            return_value=(
+                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
+    @mock.patch("random.shuffle", mock.Mock())
+    def test_initialize_connection_fc_auto_reg(self):
+        # Test for auto registration
+        self.configuration.initiator_auto_registration = True
+        commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+                    ('storagegroup', '-list'),
+                    self.testData.GETFCPORT_CMD(),
+                    ('port', '-list', '-gname', 'fakehost')]
+        results = [[("No group", 83),
+                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+                   self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+                   self.testData.FC_PORTS,
+                   self.testData.FAKEHOST_PORTS]
+
+        fake_cli = self.driverSetup(commands, results)
+        data = self.driver.initialize_connection(
+            self.testData.test_volume,
+            self.testData.connector)
+
+        self.assertEqual(data['data']['access_mode'], 'ro')
+
+        expected = [
+            mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+            mock.call('storagegroup', '-create', '-gname', 'fakehost'),
+            mock.call('storagegroup', '-list'),
+            mock.call('port', '-list', '-sp'),
+            mock.call('storagegroup', '-gname', 'fakehost',
+                      '-setpath', '-hbauid',
+                      '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56',
+                      '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
+                      '-host', 'fakehost', '-o'),
+            mock.call('port', '-list', '-sp'),
+            mock.call('storagegroup', '-gname', 'fakehost',
+                      '-setpath', '-hbauid',
+                      '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16',
+                      '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
+                      '-host', 'fakehost', '-o'),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+            mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+            mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
+                      '-gname', 'fakehost'),
+            mock.call('port', '-list', '-gname', 'fakehost'),
+            mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+            mock.call('port', '-list', '-sp')]
+        fake_cli.assert_has_calls(expected)
+
+        # Test for manaul registration
+        self.configuration.initiator_auto_registration = False
+
+        commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+                    ('storagegroup', '-list'),
+                    self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
+                    self.testData.GETFCPORT_CMD(),
+                    ('port', '-list', '-gname', 'fakehost')]
+        results = [[("No group", 83),
+                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+                   self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+                   ('', 0),
+                   self.testData.FC_PORTS,
+                   self.testData.FAKEHOST_PORTS]
+        fake_cli = self.driverSetup(commands, results)
+        data = self.driver.initialize_connection(
+            self.testData.test_volume_rw,
+            self.testData.connector)
+
+        self.assertEqual(data['data']['access_mode'], 'rw')
+
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-connecthost',
+                              '-host', 'fakehost', '-gname', 'fakehost', '-o'),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
+                              '-gname', 'fakehost'),
+                    mock.call('port', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('port', '-list', '-sp')]
+        fake_cli.assert_has_calls(expected)
+
+    @mock.patch(
+        "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
+        "get_device_mapping_from_network",
+        mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
+    @mock.patch("random.shuffle", mock.Mock())
+    def test_initialize_connection_fc_auto_zoning(self):
+        # Test for auto zoning
+        self.configuration.zoning_mode = 'fabric'
+        self.configuration.initiator_auto_registration = False
+        commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+                    ('storagegroup', '-list'),
+                    self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
+                    self.testData.GETFCPORT_CMD(),
+                    ('port', '-list', '-gname', 'fakehost')]
+        results = [[("No group", 83),
+                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+                   self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+                   ('', 0),
+                   self.testData.FC_PORTS,
+                   self.testData.FAKEHOST_PORTS]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
+            configuration=self.configuration)
+
+        conn_info = self.driver.initialize_connection(
+            self.testData.test_volume,
+            self.testData.connector)
+
+        self.assertEqual(conn_info['data']['initiator_target_map'],
+                         EMCVNXCLIDriverTestData.i_t_map)
+        self.assertEqual(conn_info['data']['target_wwn'],
+                         ['1122334455667777'])
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-connecthost',
+                              '-host', 'fakehost', '-gname', 'fakehost', '-o'),
+                    mock.call('lun', '-list', '-name', 'vol1',
+                              '-state', '-status', '-opDetails',
+                              '-userCap', '-owner', '-attachedSnapshot'),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
+                              '-gname', 'fakehost'),
+                    mock.call('port', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('port', '-list', '-sp')]
+        fake_cli.assert_has_calls(expected)
+
+    @mock.patch(
+        "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
+        "get_device_mapping_from_network",
+        mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
+    def test_terminate_connection_remove_zone_false(self):
+        self.driver = EMCCLIFCDriver(configuration=self.configuration)
+        cli_helper = self.driver.cli._client
+        data = {'storage_group_name': "fakehost",
+                'storage_group_uid': "2F:D4:00:00:00:00:00:"
+                "00:00:00:FF:E5:3A:03:FD:6D",
+                'lunmap': {1: 16, 2: 88, 3: 47}}
+        cli_helper.get_storage_group = mock.Mock(
+            return_value=data)
+        lun_info = {'lun_name': "unit_test_lun",
+                    'lun_id': 1,
+                    'pool': "unit_test_pool",
+                    'attached_snapshot': "N/A",
+                    'owner': "A",
+                    'total_capacity_gb': 1.0,
+                    'state': "Ready"}
+        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
+        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
+        self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
+            configuration=self.configuration)
+        connection_info = self.driver.terminate_connection(
+            self.testData.test_volume,
+            self.testData.connector)
+        self.assertFalse('initiator_target_map' in connection_info['data'],
+                         'initiator_target_map should not appear.')
+
+        cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
+            16, self.testData.connector["host"])
+
+    @mock.patch(
+        "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
+        "get_device_mapping_from_network",
+        mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
+    def test_terminate_connection_remove_zone_true(self):
+        self.driver = EMCCLIFCDriver(configuration=self.configuration)
+        cli_helper = self.driver.cli._client
+        data = {'storage_group_name': "fakehost",
+                'storage_group_uid': "2F:D4:00:00:00:00:00:"
+                "00:00:00:FF:E5:3A:03:FD:6D",
+                'lunmap': {}}
+        cli_helper.get_storage_group = mock.Mock(
+            return_value=data)
+        lun_info = {'lun_name': "unit_test_lun",
+                    'lun_id': 1,
+                    'pool': "unit_test_pool",
+                    'attached_snapshot': "N/A",
+                    'owner': "A",
+                    'total_capacity_gb': 1.0,
+                    'state': "Ready"}
+        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
+        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
+        self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
+            configuration=self.configuration)
+        connection_info = self.driver.terminate_connection(
+            self.testData.test_volume,
+            self.testData.connector)
+        self.assertTrue('initiator_target_map' in connection_info['data'],
+                        'initiator_target_map should be populated.')
+        self.assertEqual(connection_info['data']['initiator_target_map'],
+                         EMCVNXCLIDriverTestData.i_t_map)
+
+    def test_get_volume_stats(self):
+        #expect_result = [POOL_PROPERTY]
+        self.driverSetup()
+        stats = self.driver.get_volume_stats(True)
+        self.assertTrue(stats['driver_version'] is not None,
+                        "dirver_version is not returned")
+        self.assertTrue(
+            stats['free_capacity_gb'] == 1000.6,
+            "free_capacity_gb is not correct")
+        self.assertTrue(
+            stats['reserved_percentage'] == 0,
+            "reserved_percentage is not correct")
+        self.assertTrue(
+            stats['storage_protocol'] == 'FC',
+            "storage_protocol is not correct")
+        self.assertTrue(
+            stats['total_capacity_gb'] == 10000.5,
+            "total_capacity_gb is not correct")
+        self.assertTrue(
+            stats['vendor_name'] == "EMC",
+            "vender name is not correct")
+        self.assertTrue(
+            stats['volume_backend_name'] == "namedbackend",
+            "volume backend name is not correct")
+        self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
+        self.assertTrue(
+            stats['driver_version'] == "04.00.00",
+            "driver version is incorrect.")
+
+
+class EMCVNXCLIToggleSPTestData():
+    def FAKE_COMMAND_PREFIX(self, sp_address):
+        return ('/opt/Navisphere/bin/naviseccli', '-address', sp_address,
+                '-user', 'sysadmin', '-password', 'sysadmin',
+                '-scope', 'global')
+
+
+class EMCVNXCLIToggleSPTestCase(test.TestCase):
+    def setUp(self):
+        super(EMCVNXCLIToggleSPTestCase, self).setUp()
+        self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
+        self.configuration = mock.Mock(conf.Configuration)
+        self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
+        self.configuration.san_ip = '10.10.10.10'
+        self.configuration.san_secondary_ip = "10.10.10.11"
+        self.configuration.storage_vnx_pool_name = 'unit_test_pool'
+        self.configuration.san_login = 'sysadmin'
+        self.configuration.san_password = 'sysadmin'
+        self.configuration.default_timeout = 1
+        self.configuration.max_luns_per_storage_group = 10
+        self.configuration.destroy_empty_storage_group = 10
+        self.configuration.storage_vnx_authentication_type = "global"
+        self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
+        self.configuration.zoning_mode = None
+        self.configuration.storage_vnx_security_file_dir = ""
+        self.cli_client = emc_vnx_cli.CommandLineHelper(
+            configuration=self.configuration)
+        self.test_data = EMCVNXCLIToggleSPTestData()
+
+    def tearDown(self):
+        super(EMCVNXCLIToggleSPTestCase, self).tearDown()
+
+    def test_no_sp_toggle(self):
+        self.cli_client.active_storage_ip = '10.10.10.10'
+        FAKE_SUCCESS_RETURN = ('success', 0)
+        FAKE_COMMAND = ('list', 'pool')
+        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN, FAKE_SUCCESS_RETURN]
+
+        with mock.patch('cinder.utils.execute') as mock_utils:
+            mock_utils.side_effect = SIDE_EFFECTS
+            self.cli_client.command_execute(*FAKE_COMMAND)
+            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.10")
+            expected = [mock.call(*('ping', '-c', 1, '10.10.10.10'),
+                                  check_exit_code=True),
+                        mock.call(
+                            *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+                              + FAKE_COMMAND),
+                            check_exit_code=True)]
+            mock_utils.assert_has_calls(expected)
+
+    def test_toggle_sp_with_server_unavailabe(self):
+        self.cli_client.active_storage_ip = '10.10.10.10'
+        FAKE_ERROR_MSG = """\
+Error occurred during HTTP request/response from the target: '10.244.213.142'.
+Message : HTTP/1.1 503 Service Unavailable"""
+        FAKE_SUCCESS_RETURN = ('success', 0)
+        FAKE_COMMAND = ('list', 'pool')
+        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
+                        processutils.ProcessExecutionError(
+                            exit_code=255, stdout=FAKE_ERROR_MSG),
+                        FAKE_SUCCESS_RETURN]
+
+        with mock.patch('cinder.utils.execute') as mock_utils:
+            mock_utils.side_effect = SIDE_EFFECTS
+            self.cli_client.command_execute(*FAKE_COMMAND)
+            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+            expected = [
+                mock.call(
+                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+                        + FAKE_COMMAND),
+                    check_exit_code=True),
+                mock.call(
+                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+                        + FAKE_COMMAND),
+                    check_exit_code=True)]
+            mock_utils.assert_has_calls(expected)
+
+    def test_toggle_sp_with_end_of_data(self):
+        self.cli_client.active_storage_ip = '10.10.10.10'
+        FAKE_ERROR_MSG = """\
+Error occurred during HTTP request/response from the target: '10.244.213.142'.
+Message : End of data stream"""
+        FAKE_SUCCESS_RETURN = ('success', 0)
+        FAKE_COMMAND = ('list', 'pool')
+        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
+                        processutils.ProcessExecutionError(
+                            exit_code=255, stdout=FAKE_ERROR_MSG),
+                        FAKE_SUCCESS_RETURN]
+
+        with mock.patch('cinder.utils.execute') as mock_utils:
+            mock_utils.side_effect = SIDE_EFFECTS
+            self.cli_client.command_execute(*FAKE_COMMAND)
+            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+            expected = [
+                mock.call(
+                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+                        + FAKE_COMMAND),
+                    check_exit_code=True),
+                mock.call(
+                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+                        + FAKE_COMMAND),
+                    check_exit_code=True)]
+            mock_utils.assert_has_calls(expected)
+
+    def test_toggle_sp_with_connection_refused(self):
+        self.cli_client.active_storage_ip = '10.10.10.10'
+        FAKE_ERROR_MSG = """\
+A network error occurred while trying to connect: '10.244.213.142'.
+Message : Error occurred because connection refused. \
+Unable to establish a secure connection to the Management Server.
+"""
+        FAKE_SUCCESS_RETURN = ('success', 0)
+        FAKE_COMMAND = ('list', 'pool')
+        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
+                        processutils.ProcessExecutionError(
+                            exit_code=255, stdout=FAKE_ERROR_MSG),
+                        FAKE_SUCCESS_RETURN]
+
+        with mock.patch('cinder.utils.execute') as mock_utils:
+            mock_utils.side_effect = SIDE_EFFECTS
+            self.cli_client.command_execute(*FAKE_COMMAND)
+            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+            expected = [
+                mock.call(
+                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+                        + FAKE_COMMAND),
+                    check_exit_code=True),
+                mock.call(
+                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+                        + FAKE_COMMAND),
+                    check_exit_code=True)]
+            mock_utils.assert_has_calls(expected)
diff --git a/cinder/volume/drivers/emc/emc_cli_fc.py b/cinder/volume/drivers/emc/emc_cli_fc.py
new file mode 100644 (file)
index 0000000..0ce6f58
--- /dev/null
@@ -0,0 +1,219 @@
+# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Fibre Channel Driver for EMC VNX array based on CLI.
+
+"""
+
+from cinder.openstack.common import log as logging
+from cinder.volume import driver
+from cinder.volume.drivers.emc import emc_vnx_cli
+from cinder.zonemanager.utils import AddFCZone
+from cinder.zonemanager.utils import RemoveFCZone
+
+
+LOG = logging.getLogger(__name__)
+
+
+class EMCCLIFCDriver(driver.FibreChannelDriver):
+    """EMC FC Driver for VNX using CLI.
+
+    Version history:
+        1.0.0 - Initial driver
+        2.0.0 - Thick/thin provisioning, robust enhancement
+        3.0.0 - Array-based Backend Support, FC Basic Support,
+                Target Port Selection for MPIO,
+                Initiator Auto Registration,
+                Storage Group Auto Deletion,
+                Multiple Authentication Type Support,
+                Storage-Assisted Volume Migration,
+                SP Toggle for HA
+        3.0.1 - Security File Support
+        4.0.0 - Advance LUN Features (Compression Support,
+                Deduplication Support, FAST VP Support,
+                FAST Cache Support), Storage-assisted Retype,
+                External Volume Management, Read-only Volume,
+                FC Auto Zoning
+    """
+
+    def __init__(self, *args, **kwargs):
+
+        super(EMCCLIFCDriver, self).__init__(*args, **kwargs)
+        self.cli = emc_vnx_cli.getEMCVnxCli(
+            'FC',
+            configuration=self.configuration)
+        self.VERSION = self.cli.VERSION
+
+    def check_for_setup_error(self):
+        pass
+
+    def create_volume(self, volume):
+        """Creates a volume."""
+        return self.cli.create_volume(volume)
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot."""
+        return self.cli.create_volume_from_snapshot(volume, snapshot)
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Creates a cloned volume."""
+        return self.cli.create_cloned_volume(volume, src_vref)
+
+    def extend_volume(self, volume, new_size):
+        """Extend a volume."""
+        self.cli.extend_volume(volume, new_size)
+
+    def delete_volume(self, volume):
+        """Deletes a volume."""
+        self.cli.delete_volume(volume)
+
+    def migrate_volume(self, ctxt, volume, host):
+        """Migrate volume via EMC migration functionality."""
+        return self.cli.migrate_volume(ctxt, volume, host)
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Convert the volume to be of the new type."""
+        return self.cli.retype(ctxt, volume, new_type, diff, host)
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot."""
+        self.cli.create_snapshot(snapshot)
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+        self.cli.delete_snapshot(snapshot)
+
+    def ensure_export(self, context, volume):
+        """Driver entry point to get the export info for an existing volume."""
+        pass
+
+    def create_export(self, context, volume):
+        """Driver entry point to get the export info for a new volume."""
+        pass
+
+    def remove_export(self, context, volume):
+        """Driver entry point to remove an export for a volume."""
+        pass
+
+    def check_for_export(self, context, volume_id):
+        """Make sure volume is exported."""
+        pass
+
+    @AddFCZone
+    def initialize_connection(self, volume, connector):
+        """Initializes the connection and returns connection info.
+
+        Assign any created volume to a compute node/host so that it can be
+        used from that host.
+
+        The  driver returns a driver_volume_type of 'fibre_channel'.
+        The target_wwn can be a single entry or a list of wwns that
+        correspond to the list of remote wwn(s) that will export the volume.
+        The initiator_target_map is a map that represents the remote wwn(s)
+        and a list of wwns which are visiable to the remote wwn(s).
+        Example return values:
+
+            {
+                'driver_volume_type': 'fibre_channel'
+                'data': {
+                    'target_discovered': True,
+                    'target_lun': 1,
+                    'target_wwn': '1234567890123',
+                    'access_mode': 'rw'
+                    'initiator_target_map': {
+                        '1122334455667788': ['1234567890123']
+                    }
+                }
+            }
+
+            or
+
+             {
+                'driver_volume_type': 'fibre_channel'
+                'data': {
+                    'target_discovered': True,
+                    'target_lun': 1,
+                    'target_wwn': ['1234567890123', '0987654321321'],
+                    'access_mode': 'rw'
+                    'initiator_target_map': {
+                        '1122334455667788': ['1234567890123',
+                                             '0987654321321']
+                    }
+                }
+            }
+
+        """
+        conn_info = self.cli.initialize_connection(volume,
+                                                   connector)
+        conn_info = self.cli.adjust_fc_conn_info(conn_info, connector)
+        LOG.debug("Exit initialize_connection"
+                  " - Returning FC connection info: %(conn_info)s."
+                  % {'conn_info': conn_info})
+
+        return conn_info
+
+    @RemoveFCZone
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Disallow connection from connector."""
+        remove_zone = self.cli.terminate_connection(volume, connector)
+        conn_info = {'driver_volume_type': 'fibre_channel',
+                     'data': {}}
+        conn_info = self.cli.adjust_fc_conn_info(conn_info, connector,
+                                                 remove_zone)
+        LOG.debug("Exit terminate_connection"
+                  " - Returning FC connection info: %(conn_info)s."
+                  % {'conn_info': conn_info})
+
+        return conn_info
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume stats.
+
+        If 'refresh' is True, run update the stats first.
+        """
+        if refresh:
+            self.update_volume_stats()
+
+        return self._stats
+
+    def update_volume_stats(self):
+        """Retrieve stats info from volume group."""
+        LOG.debug("Updating volume stats.")
+        data = self.cli.update_volume_stats()
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        data['volume_backend_name'] = backend_name or 'EMCCLIFCDriver'
+        data['storage_protocol'] = 'FC'
+        self._stats = data
+
+    def manage_existing(self, volume, existing_ref):
+        """Manage an existing lun in the array.
+
+        The lun should be in a manageable pool backend, otherwise
+        error would return.
+        Rename the backend storage object so that it matches the,
+        volume['name'] which is how drivers traditionally map between a
+        cinder volume and the associated backend storage object.
+
+        existing_ref:{
+            'id':lun_id
+        }
+        """
+        LOG.debug("Reference lun id %s." % existing_ref['id'])
+        self.cli.manage_existing(volume, existing_ref)
+
+    def manage_existing_get_size(self, volume, existing_ref):
+        """Return size of volume to be managed by manage_existing.
+        """
+        return self.cli.manage_existing_get_size(volume, existing_ref)
index faaded813437d3759a8c46408b9670de5f3ad97c..c7d37d36879bdf21abd1fbb16b3eb61d090ae8c3 100644 (file)
@@ -17,10 +17,7 @@ iSCSI Drivers for EMC VNX array based on CLI.
 
 """
 
-from cinder import exception
-from cinder.openstack.common.gettextutils import _
 from cinder.openstack.common import log as logging
-from cinder import utils
 from cinder.volume import driver
 from cinder.volume.drivers.emc import emc_vnx_cli
 
@@ -28,34 +25,64 @@ LOG = logging.getLogger(__name__)
 
 
 class EMCCLIISCSIDriver(driver.ISCSIDriver):
-    """EMC ISCSI Drivers for VNX using CLI."""
+    """EMC ISCSI Drivers for VNX using CLI.
+
+    Version history:
+        1.0.0 - Initial driver
+        2.0.0 - Thick/thin provisioning, robust enhancement
+        3.0.0 - Array-based Backend Support, FC Basic Support,
+                Target Port Selection for MPIO,
+                Initiator Auto Registration,
+                Storage Group Auto Deletion,
+                Multiple Authentication Type Support,
+                Storage-Assisted Volume Migration,
+                SP Toggle for HA
+        3.0.1 - Security File Support
+        4.0.0 - Advance LUN Features (Compression Support,
+                Deduplication Support, FAST VP Support,
+                FAST Cache Support), Storage-assisted Retype,
+                External Volume Management, Read-only Volume,
+                FC Auto Zoning
+    """
 
     def __init__(self, *args, **kwargs):
 
         super(EMCCLIISCSIDriver, self).__init__(*args, **kwargs)
-        self.cli = emc_vnx_cli.EMCVnxCli(
+        self.cli = emc_vnx_cli.getEMCVnxCli(
             'iSCSI',
             configuration=self.configuration)
+        self.VERSION = self.cli.VERSION
 
     def check_for_setup_error(self):
         pass
 
     def create_volume(self, volume):
-        """Creates a EMC(VMAX/VNX) volume."""
-        self.cli.create_volume(volume)
+        """Creates a VNX volume."""
+        return self.cli.create_volume(volume)
 
     def create_volume_from_snapshot(self, volume, snapshot):
         """Creates a volume from a snapshot."""
-        self.cli.create_volume_from_snapshot(volume, snapshot)
+        return self.cli.create_volume_from_snapshot(volume, snapshot)
 
     def create_cloned_volume(self, volume, src_vref):
         """Creates a cloned volume."""
-        self.cli.create_cloned_volume(volume, src_vref)
+        return self.cli.create_cloned_volume(volume, src_vref)
+
+    def extend_volume(self, volume, new_size):
+        """Extend a volume."""
+        self.cli.extend_volume(volume, new_size)
 
     def delete_volume(self, volume):
-        """Deletes an EMC volume."""
+        """Deletes a VNX volume."""
         self.cli.delete_volume(volume)
 
+    def migrate_volume(self, ctxt, volume, host):
+        return self.cli.migrate_volume(ctxt, volume, host)
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Convert the volume to be of the new type."""
+        return self.cli.retype(ctxt, volume, new_type, diff, host)
+
     def create_snapshot(self, snapshot):
         """Creates a snapshot."""
         self.cli.create_snapshot(snapshot)
@@ -80,157 +107,30 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
         """Make sure volume is exported."""
         pass
 
-    def extend_volume(self, volume, new_size):
-        self.cli.extend_volume(volume, new_size)
-
     def initialize_connection(self, volume, connector):
         """Initializes the connection and returns connection info.
 
         The iscsi driver returns a driver_volume_type of 'iscsi'.
         the format of the driver data is defined in vnx_get_iscsi_properties.
+        Example return value::
 
-        :param volume: volume to be attached.
-        :param connector: connector information.
-        :returns: dictionary containing iscsi_properties.
-        Example return value:
             {
                 'driver_volume_type': 'iscsi'
                 'data': {
                     'target_discovered': True,
                     'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
                     'target_portal': '127.0.0.0.1:3260',
-                    'volume_id': '12345678-1234-4321-1234-123456789012',
+                    'target_lun': 1,
+                    'access_mode': 'rw'
                 }
             }
-        """
-        @utils.synchronized('emc-connection-' + connector['host'],
-                            external=True)
-        def do_initialize_connection():
-            self.cli.initialize_connection(volume, connector)
-        do_initialize_connection()
-
-        iscsi_properties = self.vnx_get_iscsi_properties(volume, connector)
-        return {
-            'driver_volume_type': 'iscsi',
-            'data': {
-                'target_discovered': True,
-                'target_iqn': iscsi_properties['target_iqn'],
-                'target_lun': iscsi_properties['target_lun'],
-                'target_portal': iscsi_properties['target_portal'],
-                'volume_id': iscsi_properties['volume_id']
-            }
-        }
-
-    def _do_iscsi_discovery(self, volume):
-
-        LOG.warn(_("iSCSI provider_location not stored for volume %s, "
-                 "using discovery.") % (volume['name']))
-
-        (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
-                                    '-t', 'sendtargets', '-p',
-                                    self.configuration.iscsi_ip_address,
-                                    run_as_root=True)
-        targets = []
-        for target in out.splitlines():
-            targets.append(target)
-
-        return targets
-
-    def vnx_get_iscsi_properties(self, volume, connector):
-        """Gets iscsi configuration.
 
-        We ideally get saved information in the volume entity, but fall back
-        to discovery if need be. Discovery may be completely removed in future
-        The properties are:
-
-        :target_discovered:    boolean indicating whether discovery was used
-
-        :target_iqn:    the IQN of the iSCSI target
-
-        :target_portal:    the portal of the iSCSI target
-
-        :target_lun:    the lun of the iSCSI target
-
-        :volume_id:    the UUID of the volume
-
-        :auth_method:, :auth_username:, :auth_password:
-
-            the authentication details. Right now, either auth_method is not
-            present meaning no authentication, or auth_method == `CHAP`
-            meaning use CHAP with the specified credentials.
         """
-        properties = {}
-
-        location = self._do_iscsi_discovery(volume)
-        if not location:
-            raise exception.InvalidVolume(_("Could not find iSCSI export "
-                                          " for volume %s") %
-                                          (volume['name']))
-
-        LOG.debug("ISCSI Discovery: Found %s" % (location))
-        properties['target_discovered'] = True
-
-        hostname = connector['host']
-        storage_group = hostname
-        device_info = self.cli.find_device_details(volume, storage_group)
-        if device_info is None or device_info['hostlunid'] is None:
-            exception_message = (_("Cannot find device number for volume %s")
-                                 % volume['name'])
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        device_number = device_info['hostlunid']
-        device_sp = device_info['ownersp']
-        endpoints = []
-
-        if device_sp:
-            # endpoints example:
-            # [iqn.1992-04.com.emc:cx.apm00123907237.a8,
-            # iqn.1992-04.com.emc:cx.apm00123907237.a9]
-            endpoints = self.cli._find_iscsi_protocol_endpoints(device_sp)
-
-        foundEndpoint = False
-        for loc in location:
-            results = loc.split(" ")
-            properties['target_portal'] = results[0].split(",")[0]
-            properties['target_iqn'] = results[1]
-            # for VNX, find the target_iqn that matches the endpoint
-            # target_iqn example: iqn.1992-04.com.emc:cx.apm00123907237.a8
-            # or iqn.1992-04.com.emc:cx.apm00123907237.b8
-            if not device_sp:
-                break
-            for endpoint in endpoints:
-                if properties['target_iqn'] == endpoint:
-                    LOG.debug("Found iSCSI endpoint: %s" % endpoint)
-                    foundEndpoint = True
-                    break
-            if foundEndpoint:
-                break
-
-        if device_sp and not foundEndpoint:
-            LOG.warn(_("ISCSI endpoint not found for SP %(sp)s ")
-                     % {'sp': device_sp})
-
-        properties['target_lun'] = device_number
-
-        properties['volume_id'] = volume['id']
-
-        auth = volume['provider_auth']
-        if auth:
-            (auth_method, auth_username, auth_secret) = auth.split()
-
-            properties['auth_method'] = auth_method
-            properties['auth_username'] = auth_username
-            properties['auth_password'] = auth_secret
-
-        return properties
+        return self.cli.initialize_connection(volume, connector)
 
     def terminate_connection(self, volume, connector, **kwargs):
         """Disallow connection from connector."""
-        @utils.synchronized('emc-connection-' + connector['host'],
-                            external=True)
-        def do_terminate_connection():
-            self.cli.terminate_connection(volume, connector)
-        do_terminate_connection()
+        self.cli.terminate_connection(volume, connector)
 
     def get_volume_stats(self, refresh=False):
         """Get volume status.
@@ -239,16 +139,38 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
         """
         if refresh:
             self.update_volume_stats()
-            LOG.info(_("update_volume_status:%s"), self._stats)
 
         return self._stats
 
     def update_volume_stats(self):
         """Retrieve status info from volume group."""
-        LOG.debug("Updating volume status")
+        LOG.debug("Updating volume status.")
         # retrieving the volume update from the VNX
-        data = self.cli.update_volume_status()
+        data = self.cli.update_volume_stats()
+
         backend_name = self.configuration.safe_get('volume_backend_name')
         data['volume_backend_name'] = backend_name or 'EMCCLIISCSIDriver'
         data['storage_protocol'] = 'iSCSI'
+
         self._stats = data
+
+    def manage_existing(self, volume, existing_ref):
+        """Manage an existing lun in the array.
+
+        The lun should be in a manageable pool backend, otherwise
+        error would return.
+        Rename the backend storage object so that it matches the,
+        volume['name'] which is how drivers traditionally map between a
+        cinder volume and the associated backend storage object.
+
+        existing_ref:{
+            'id':lun_id
+        }
+        """
+        LOG.debug("Reference lun id %s." % existing_ref['id'])
+        self.cli.manage_existing(volume, existing_ref)
+
+    def manage_existing_get_size(self, volume, existing_ref):
+        """Return size of volume to be managed by manage_existing.
+        """
+        return self.cli.manage_existing_get_size(volume, existing_ref)
index 0e93c2e2a623851a688520a3ec2b1eb61838ea90..9324c6e00996b2bf1405181b0f65e78407445545 100644 (file)
 # Copyright (c) 2012 - 2014 EMC Corporation, Inc.
 # All Rights Reserved.
 #
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#         http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
 """
-VNX CLI on iSCSI.
+VNX CLI
 """
 
 import os
+import random
+import re
 import time
 
 from oslo.config import cfg
 
 from cinder import exception
+from cinder.exception import EMCVnxCLICmdError
+from cinder.openstack.common import excutils
 from cinder.openstack.common.gettextutils import _
+from cinder.openstack.common import jsonutils as json
+from cinder.openstack.common import lockutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
 from cinder.openstack.common import processutils
+from cinder.openstack.common import timeutils
 from cinder import utils
+from cinder.volume.configuration import Configuration
 from cinder.volume.drivers.san import san
+from cinder.volume import manager
 from cinder.volume import volume_types
 
+CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
 
-CONF = cfg.CONF
-VERSION = '02.00.00'
+INTERVAL_5_SEC = 5
+INTERVAL_30_SEC = 30
+INTERVAL_60_SEC = 60
+
+NO_POLL = True
 
 loc_opts = [
+    cfg.StrOpt('storage_vnx_authentication_type',
+               default='global',
+               help='VNX authentication scope type.'),
+    cfg.StrOpt('storage_vnx_security_file_dir',
+               default=None,
+               help='Directory path that contains the VNX security file. '
+               'Make sure the security file is generated first.'),
     cfg.StrOpt('naviseccli_path',
                default='',
-               help='Naviseccli Path'),
+               help='Naviseccli Path.'),
     cfg.StrOpt('storage_vnx_pool_name',
                default=None,
-               help='ISCSI pool name'),
+               help='Storage pool name'),
+    cfg.StrOpt('san_secondary_ip',
+               default=None,
+               help='VNX secondary SP IP Address.'),
     cfg.IntOpt('default_timeout',
-               default=20,
-               help='Default Time Out For CLI operations in minutes'),
+               default=60 * 24 * 365,
+               help='Default Time Out For CLI operations in minutes. '
+               'By default, it is 365 days long.'),
     cfg.IntOpt('max_luns_per_storage_group',
-               default=256,
-               help='Default max number of LUNs in a storage group'), ]
+               default=255,
+               help='Default max number of LUNs in a storage group.'
+               ' By default, the value is 255.'),
+    cfg.BoolOpt('destroy_empty_storage_group',
+                default=False,
+                help='To destroy storage group '
+                'when the last LUN is removed from it. '
+                'By default, the value is False.'),
+    cfg.StrOpt('iscsi_initiators',
+               default='',
+               help='Mapping between hostname and '
+               'its iSCSI initiator IP addresses.'),
+    cfg.BoolOpt('initiator_auto_registration',
+                default=False,
+                help='Automatically register initiators. '
+                'By default, the value is False.'),
+]
 
 CONF.register_opts(loc_opts)
 
 
-class EMCVnxCli(object):
-    """This class defines the functions to use the native CLI functionality."""
+def log_enter_exit(func):
+    def inner(self, *args, **kwargs):
+        LOG.debug("Entering %(cls)s.%(method)s" %
+                  {'cls': self.__class__.__name__,
+                   'method': func.__name__})
+        start = timeutils.utcnow()
+        ret = func(self, *args, **kwargs)
+        end = timeutils.utcnow()
+        LOG.debug("Exiting %(cls)s.%(method)s. "
+                  "Spent %(duration)s sec. "
+                  "Return %(return)s" %
+                  {'cls': self.__class__.__name__,
+                   'duration': timeutils.delta_seconds(start, end),
+                   'method': func.__name__,
+                   'return': ret})
+        return ret
+    return inner
+
+
+class PropertyDescriptor(object):
+    def __init__(self, option, label, key, converter=None):
+        self.option = option
+        self.label = label
+        self.key = key
+        self.converter = converter
+
+
+class CommandLineHelper(object):
+
+    LUN_STATE = PropertyDescriptor(
+        '-state',
+        'Current State:\s*(.*)\s*',
+        'state')
+    LUN_STATUS = PropertyDescriptor(
+        '-status',
+        'Status:\s*(.*)\s*',
+        'status')
+    LUN_OPERATION = PropertyDescriptor(
+        '-opDetails',
+        'Current Operation:\s*(.*)\s*',
+        'operation')
+    LUN_CAPACITY = PropertyDescriptor(
+        '-userCap',
+        'User Capacity \(GBs\):\s*(.*)\s*',
+        'total_capacity_gb',
+        float)
+    LUN_OWNER = PropertyDescriptor(
+        '-owner',
+        'Current Owner:\s*SP\s*(.*)\s*',
+        'owner')
+    LUN_ATTACHEDSNAP = PropertyDescriptor(
+        '-attachedSnapshot',
+        'Attached Snapshot:\s*(.*)\s*',
+        'attached_snapshot')
+    LUN_NAME = PropertyDescriptor(
+        '-name',
+        'Name:\s*(.*)\s*',
+        'lun_name')
+    LUN_ID = PropertyDescriptor(
+        '-id',
+        'LOGICAL UNIT NUMBER\s*(\d+)\s*',
+        'lun_id',
+        int)
+    LUN_POOL = PropertyDescriptor(
+        '-poolName',
+        'Pool Name:\s*(.*)\s*',
+        'pool')
+
+    LUN_ALL = [LUN_STATE, LUN_STATUS, LUN_OPERATION,
+               LUN_CAPACITY, LUN_OWNER, LUN_ATTACHEDSNAP]
+
+    LUN_WITH_POOL = [LUN_STATE, LUN_CAPACITY, LUN_OWNER,
+                     LUN_ATTACHEDSNAP, LUN_POOL]
+
+    POOL_TOTAL_CAPACITY = PropertyDescriptor(
+        '-userCap',
+        'User Capacity \(GBs\):\s*(.*)\s*',
+        'total_capacity_gb',
+        float)
+    POOL_FREE_CAPACITY = PropertyDescriptor(
+        '-availableCap',
+        'Available Capacity *\(GBs\) *:\s*(.*)\s*',
+        'free_capacity_gb',
+        float)
+    POOL_NAME = PropertyDescriptor(
+        '-name',
+        'Pool Name:\s*(.*)\s*',
+        'pool_name')
+
+    POOL_ALL = [POOL_TOTAL_CAPACITY, POOL_FREE_CAPACITY]
+
+    def __init__(self, configuration):
+        configuration.append_config_values(san.san_opts)
+
+        self.timeout = configuration.default_timeout * INTERVAL_60_SEC
+        self.max_luns = configuration.max_luns_per_storage_group
 
-    stats = {'driver_version': VERSION,
-             'free_capacity_gb': 'unknown',
-             'reserved_percentage': 0,
-             'storage_protocol': None,
-             'total_capacity_gb': 'unknown',
-             'vendor_name': 'EMC',
-             'volume_backend_name': None}
+        # Checking for existence of naviseccli tool
+        navisecclipath = configuration.naviseccli_path
+        if not os.path.exists(navisecclipath):
+            err_msg = _('naviseccli_path: Could not find '
+                        'NAVISECCLI tool %(path)s.') % {'path': navisecclipath}
+            LOG.error(err_msg)
+            raise exception.VolumeBackendAPIException(data=err_msg)
+
+        self.command = (navisecclipath, '-address')
+        self.active_storage_ip = configuration.san_ip
+        self.primary_storage_ip = self.active_storage_ip
+        self.secondary_storage_ip = configuration.san_secondary_ip
+        if self.secondary_storage_ip == self.primary_storage_ip:
+            LOG.warn(_("san_secondary_ip is configured as "
+                       "the same value as san_ip."))
+            self.secondary_storage_ip = None
+        if not configuration.san_ip:
+            err_msg = _('san_ip: Mandatory field configuration. '
+                        'san_ip is not set.')
+            LOG.error(err_msg)
+            raise exception.VolumeBackendAPIException(data=err_msg)
+
+        self.credentials = ()
+        storage_username = configuration.san_login
+        storage_password = configuration.san_password
+        storage_auth_type = configuration.storage_vnx_authentication_type
+        storage_vnx_security_file = configuration.storage_vnx_security_file_dir
+
+        if storage_auth_type is None:
+            storage_auth_type = 'global'
+        elif storage_auth_type.lower() not in ('ldap', 'local', 'global'):
+            err_msg = (_('Invalid VNX authentication type: %s')
+                       % storage_auth_type)
+            LOG.error(err_msg)
+            raise exception.VolumeBackendAPIException(data=err_msg)
+        # if there is security file path provided, use this security file
+        if storage_vnx_security_file:
+            self.credentials = ('-secfilepath', storage_vnx_security_file)
+            LOG.info(_("Using security file in %s for authentication") %
+                     storage_vnx_security_file)
+        # if there is a username/password provided, use those in the cmd line
+        elif storage_username is not None and len(storage_username) > 0 and\
+                storage_password is not None and len(storage_password) > 0:
+            self.credentials = ('-user', storage_username,
+                                '-password', storage_password,
+                                '-scope', storage_auth_type)
+            LOG.info(_("Plain text credentials are being used for "
+                       "authentication"))
+        else:
+            LOG.info(_("Neither security file nor plain "
+                       "text credentials are specified. Security file under "
+                       "home directory will be used for authentication "
+                       "if present."))
+
+        self.iscsi_initiator_map = None
+        if configuration.iscsi_initiators:
+            self.iscsi_initiator_map = \
+                json.loads(configuration.iscsi_initiators)
+            LOG.info(_("iscsi_initiators: %s"), self.iscsi_initiator_map)
+
+        # extra spec constants
+        self.pool_spec = 'storagetype:pool'
+        self.tiering_spec = 'storagetype:tiering'
+        self.provisioning_spec = 'storagetype:provisioning'
+        self.provisioning_values = {
+            'thin': ['-type', 'Thin'],
+            'thick': ['-type', 'NonThin'],
+            'compressed': ['-type', 'Thin'],
+            'deduplicated': ['-type', 'Thin', '-deduplication', 'on']}
+        self.tiering_values = {
+            'starthighthenauto': [
+                '-initialTier', 'highestAvailable',
+                '-tieringPolicy', 'autoTier'],
+            'auto': [
+                '-initialTier', 'optimizePool',
+                '-tieringPolicy', 'autoTier'],
+            'highestavailable': [
+                '-initialTier', 'highestAvailable',
+                '-tieringPolicy', 'highestAvailable'],
+            'lowestavailable': [
+                '-initialTier', 'lowestAvailable',
+                '-tieringPolicy', 'lowestAvailable'],
+            'nomovement': [
+                '-initialTier', 'optimizePool',
+                '-tieringPolicy', 'noMovement']}
+
+    @log_enter_exit
+    def create_lun_with_advance_feature(self, pool, name, size,
+                                        provisioning, tiering):
+        command_create_lun = ['lun', '-create',
+                              '-capacity', size,
+                              '-sq', 'gb',
+                              '-poolName', pool,
+                              '-name', name]
+        # provisioning
+        if provisioning:
+            command_create_lun.extend(self.provisioning_values[provisioning])
+        # tiering
+        if tiering:
+            command_create_lun.extend(self.tiering_values[tiering])
+
+        # create lun
+        data = self.create_lun_by_cmd(command_create_lun, name)
+
+        # handle compression
+        try:
+            if provisioning == 'compressed':
+                self.enable_or_disable_compression_on_lun(
+                    name, 'on')
+        except EMCVnxCLICmdError as ex:
+            with excutils.save_and_reraise_exception():
+                self.delete_lun(name)
+                LOG.error(_("Failed to enable compression on lun: %s") % ex)
+        return data
+
+    @log_enter_exit
+    def create_lun_by_cmd(self, cmd, name):
+        out, rc = self.command_execute(*cmd)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 4 and out.find('(0x712d8d04)') >= 0:
+                LOG.warn(_('LUN already exists, LUN name %(name)s. '
+                           'Message: %(msg)s') %
+                         {'name': name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(cmd, rc, out)
 
-    def __init__(self, prtcl, configuration=None):
+        def lun_is_ready():
+            data = self.get_lun_by_name(name)
+            return data[self.LUN_STATE.key] == 'Ready' and \
+                data[self.LUN_STATUS.key] == 'OK(0x0)' and \
+                data[self.LUN_OPERATION.key] == 'None'
 
-        self.protocol = prtcl
-        self.configuration = configuration
-        self.configuration.append_config_values(loc_opts)
-        self.configuration.append_config_values(san.san_opts)
-        self.storage_ip = self.configuration.san_ip
-        self.storage_username = self.configuration.san_login
-        self.storage_password = self.configuration.san_password
-
-        self.pool_name = self.configuration.storage_vnx_pool_name
-        if not self.pool_name:
-            msg = (_('Pool name is not specified.'))
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
+        self._wait_for_a_condition(lun_is_ready)
+        lun = self.get_lun_by_name(name)
+        return lun
 
-        self.timeout = self.configuration.default_timeout
-        self.max_luns = self.configuration.max_luns_per_storage_group
-        self.hlu_set = set(xrange(1, self.max_luns + 1))
-        self.navisecclipath = self.configuration.naviseccli_path
-        self.cli_prefix = (self.navisecclipath, '-address', self.storage_ip)
-        self.cli_credentials = ()
-        self.wait_interval = 3
+    @log_enter_exit
+    def delete_lun(self, name):
 
-        # if there is a username/password provided, use those in the cmd line
-        if self.storage_username is not None and \
-                self.storage_password is not None:
-            self.cli_credentials += ('-user', self.storage_username,
-                                     '-password', self.storage_password,
-                                     '-scope', '0')
+        command_delete_lun = ['lun', '-destroy',
+                              '-name', name,
+                              '-forceDetach',
+                              '-o']
+        # executing cli command to delete volume
+        out, rc = self.command_execute(*command_delete_lun)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 9 and out.find("not exist") >= 0:
+                LOG.warn(_("LUN is already deleted, LUN name %(name)s. "
+                           "Message: %(msg)s") %
+                         {'name': name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_delete_lun, rc, out)
+
+    def _wait_for_a_condition(self, testmethod, timeout=None,
+                              interval=INTERVAL_5_SEC):
+        start_time = time.time()
+        if timeout is None:
+            timeout = self.timeout
+
+        def _inner():
+            try:
+                testValue = testmethod()
+            except Exception as ex:
+                testValue = False
+                LOG.debug('CommandLineHelper.'
+                          '_wait_for_condition: %(method_name)s '
+                          'execution failed for %(exception)s'
+                          % {'method_name': testmethod.__name__,
+                             'exception': ex.message})
+            if testValue:
+                raise loopingcall.LoopingCallDone()
 
-        # Checking for existence of naviseccli tool
-        if not os.path.exists(self.navisecclipath):
-            msg = (_('Could not find NAVISECCLI tool.'))
-            LOG.error(msg)
+            if int(time.time()) - start_time > timeout:
+                msg = (_('CommandLineHelper._wait_for_condition: %s timeout')
+                       % testmethod.__name__)
+                LOG.error(msg)
+                raise exception.VolumeBackendAPIException(data=msg)
+
+        timer = loopingcall.FixedIntervalLoopingCall(_inner)
+        timer.start(interval=interval).wait()
+
+    @log_enter_exit
+    def expand_lun(self, name, new_size):
+
+        command_expand_lun = ('lun', '-expand',
+                              '-name', name,
+                              '-capacity', new_size,
+                              '-sq', 'gb',
+                              '-o',
+                              '-ignoreThresholds')
+        out, rc = self.command_execute(*command_expand_lun)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 4 and out.find("(0x712d8e04)") >= 0:
+                LOG.warn(_("LUN %(name)s is already expanded. "
+                           "Message: %(msg)s") %
+                         {'name': name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_expand_lun, rc, out)
+
+    @log_enter_exit
+    def expand_lun_and_wait(self, name, new_size):
+        self.expand_lun(name, new_size)
+
+        def lun_is_extented():
+            data = self.get_lun_by_name(name)
+            return new_size == data[self.LUN_CAPACITY.key]
+
+        self._wait_for_a_condition(lun_is_extented)
+
+    @log_enter_exit
+    def lun_rename(self, lun_id, new_name):
+        """This function used to rename a lun to match
+        the expected name for the volume.
+        """
+        command_lun_rename = ('lun', '-modify',
+                              '-l', lun_id,
+                              '-newName', new_name,
+                              '-o')
+
+        out, rc = self.command_execute(*command_lun_rename)
+        if rc != 0:
+            raise EMCVnxCLICmdError(command_lun_rename, rc, out)
+
+    @log_enter_exit
+    def modify_lun_tiering(self, name, tiering):
+        """This function used to modify a lun's tiering policy."""
+        command_modify_lun = ['lun', '-modify',
+                              '-name', name,
+                              '-o']
+        if tiering:
+            command_modify_lun.extend(self.tiering_values[tiering])
+
+            out, rc = self.command_execute(*command_modify_lun)
+            if rc != 0:
+                raise EMCVnxCLICmdError(command_modify_lun, rc, out)
+
+    @log_enter_exit
+    def create_snapshot(self, volume_name, name):
+        data = self.get_lun_by_name(volume_name)
+        if data[self.LUN_ID.key] is not None:
+            command_create_snapshot = ('snap', '-create',
+                                       '-res', data[self.LUN_ID.key],
+                                       '-name', name,
+                                       '-allowReadWrite', 'yes',
+                                       '-allowAutoDelete', 'no')
+
+            out, rc = self.command_execute(*command_create_snapshot)
+            if rc != 0:
+                # Ignore the error that due to retry
+                if rc == 5 and \
+                        out.find("(0x716d8005)") >= 0:
+                    LOG.warn(_('Snapshot %(name)s already exists. '
+                               'Message: %(msg)s') %
+                             {'name': name, 'msg': out})
+                else:
+                    raise EMCVnxCLICmdError(command_create_snapshot, rc, out)
+        else:
+            msg = _('Failed to get LUN ID for volume %s') % volume_name
             raise exception.VolumeBackendAPIException(data=msg)
 
-        # Testing the naviseccli setup
-        query_list = ("storagepool", "-list",
-                      "-name", self.pool_name, "-state")
-        out, rc = self._cli_execute(*query_list)
+    @log_enter_exit
+    def delete_snapshot(self, name):
+
+        def delete_snapshot_success():
+            command_delete_snapshot = ('snap', '-destroy',
+                                       '-id', name,
+                                       '-o')
+            out, rc = self.command_execute(*command_delete_snapshot)
+            if rc != 0:
+                # Ignore the error that due to retry
+                if rc == 5 and out.find("not exist") >= 0:
+                    LOG.warn(_("Snapshot %(name)s may deleted already. "
+                               "Message: %(msg)s") %
+                             {'name': name, 'msg': out})
+                    return True
+                # The snapshot cannot be destroyed because it is
+                # attached to a snapshot mount point. Wait
+                elif rc == 3 and out.find("(0x716d8003)") >= 0:
+                    LOG.warn(_("Snapshot %(name)s is in use, retry. "
+                               "Message: %(msg)s") %
+                             {'name': name, 'msg': out})
+                    return False
+                else:
+                    raise EMCVnxCLICmdError(command_delete_snapshot, rc, out)
+            else:
+                LOG.info(_('Snapshot %s was deleted successfully.') %
+                         name)
+                return True
+
+        self._wait_for_a_condition(delete_snapshot_success,
+                                   interval=INTERVAL_30_SEC,
+                                   timeout=INTERVAL_30_SEC * 3)
+
+    @log_enter_exit
+    def create_mount_point(self, primary_lun_name, name):
+
+        command_create_mount_point = ('lun', '-create',
+                                      '-type', 'snap',
+                                      '-primaryLunName', primary_lun_name,
+                                      '-name', name)
+
+        out, rc = self.command_execute(*command_create_mount_point)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 4 and out.find("(0x712d8d04)") >= 0:
+                LOG.warn(_("Mount point %(name)s already exists. "
+                           "Message: %(msg)s") %
+                         {'name': name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_create_mount_point, rc, out)
+
+        return rc
+
+    @log_enter_exit
+    def attach_mount_point(self, name, snapshot_name):
+
+        command_attach_mount_point = ('lun', '-attach',
+                                      '-name', name,
+                                      '-snapName', snapshot_name)
+
+        out, rc = self.command_execute(*command_attach_mount_point)
         if rc != 0:
-            LOG.error(_("Failed to find pool %s"), self.pool_name)
-            raise exception.VolumeBackendAPIException(data=out)
+            # Ignore the error that due to retry
+            if rc == 85 and out.find('(0x716d8055)') >= 0:
+                LOG.warn(_("Snapshot %(snapname)s is attached to snapshot "
+                           "mount point %(mpname)s already. "
+                           "Message: %(msg)s") %
+                         {'snapname': snapshot_name,
+                          'mpname': name,
+                          'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_attach_mount_point, rc, out)
 
-    def _cli_execute(self, *cmd, **kwargv):
+        return rc
+
+    @log_enter_exit
+    def check_smp_not_attached(self, smp_name):
+        """Ensure a snap mount point with snap become a LUN."""
+
+        def _wait_for_sync_status():
+            lun_list = ('lun', '-list', '-name', smp_name,
+                        '-attachedSnapshot')
+            out, rc = self.command_execute(*lun_list)
+            if rc == 0:
+                vol_details = out.split('\n')
+                snap_name = vol_details[2].split(':')[1].strip()
+            if (snap_name == 'N/A'):
+                return True
+            return False
+
+        self._wait_for_a_condition(_wait_for_sync_status)
+
+    @log_enter_exit
+    def migrate_lun(self, src_id, dst_id, log_failure_as_error=True):
+        command_migrate_lun = ('migrate', '-start',
+                               '-source', src_id,
+                               '-dest', dst_id,
+                               '-rate', 'high',
+                               '-o')
+        # SP HA is not supported by LUN migration
+        out, rc = self.command_execute(*command_migrate_lun,
+                                       retry_disable=True)
+
+        if 0 != rc:
+            raise EMCVnxCLICmdError(command_migrate_lun, rc, out,
+                                    log_failure_as_error)
+
+        return rc
+
+    @log_enter_exit
+    def migrate_lun_with_verification(self, src_id,
+                                      dst_id=None,
+                                      dst_name=None):
+        try:
+            self.migrate_lun(src_id, dst_id, log_failure_as_error=False)
+        except EMCVnxCLICmdError as ex:
+            migration_succeed = False
+            if self._is_sp_unavailable_error(ex.out):
+                LOG.warn(_("Migration command may get network timeout. "
+                           "Double check whether migration in fact "
+                           "started successfully. Message: %(msg)s") %
+                         {'msg': ex.out})
+                command_migrate_list = ('migrate', '-list',
+                                        '-source', src_id)
+                rc = self.command_execute(*command_migrate_list)[1]
+                if rc == 0:
+                    migration_succeed = True
+
+            if not migration_succeed:
+                LOG.warn(_("Start migration failed. Message: %s") %
+                         ex.out)
+                LOG.debug("Delete temp LUN after migration "
+                          "start failed. LUN: %s" % dst_name)
+                if(dst_name is not None):
+                    self.delete_lun(dst_name)
+                return False
+
+        # Set the proper interval to verify the migration status
+        def migration_is_ready():
+            mig_ready = False
+            command_migrate_list = ('migrate', '-list',
+                                    '-source', src_id)
+            out, rc = self.command_execute(*command_migrate_list)
+            LOG.debug("Migration output: %s" % out)
+            if rc == 0:
+                # parse the percentage
+                out = re.split(r'\n', out)
+                log = "Migration in process %s %%." % out[7].split(":  ")[1]
+                LOG.debug(log)
+            else:
+                if re.search(r'The specified source LUN '
+                             'is not currently migrating', out):
+                    LOG.debug("Migration of LUN %s is finished." % src_id)
+                    mig_ready = True
+                else:
+                    reason = _("Querying migrating status error.")
+                    LOG.error(reason)
+                    raise exception.VolumeBackendAPIException(
+                        data="%(reason)s : %(output)s" %
+                        {'reason': reason, 'output': out})
+            return mig_ready
+
+        self._wait_for_a_condition(migration_is_ready,
+                                   interval=INTERVAL_30_SEC)
+
+        return True
+
+    @log_enter_exit
+    def get_storage_group(self, name):
+
+        # ALU/HLU as key/value map
+        lun_map = {}
+
+        data = {'storage_group_name': name,
+                'storage_group_uid': None,
+                'lunmap': lun_map}
+
+        command_get_storage_group = ('storagegroup', '-list',
+                                     '-gname', name)
+
+        out, rc = self.command_execute(*command_get_storage_group)
+        if rc != 0:
+            raise EMCVnxCLICmdError(command_get_storage_group, rc, out)
+
+        re_stroage_group_id = 'Storage Group UID:\s*(.*)\s*'
+        m = re.search(re_stroage_group_id, out)
+        if m is not None:
+            data['storage_group_uid'] = m.group(1)
+
+        re_HLU_ALU_pair = 'HLU\/ALU Pairs:\s*HLU Number' \
+                          '\s*ALU Number\s*[-\s]*(?P<lun_details>(\d+\s*)+)'
+        m = re.search(re_HLU_ALU_pair, out)
+        if m is not None:
+            lun_details = m.group('lun_details').strip()
+            values = re.split('\s*', lun_details)
+            while (len(values) >= 2):
+                key = values.pop()
+                value = values.pop()
+                lun_map[int(key)] = int(value)
+
+        return data
+
+    @log_enter_exit
+    def create_storage_group(self, name):
+
+        command_create_storage_group = ('storagegroup', '-create',
+                                        '-gname', name)
+
+        out, rc = self.command_execute(*command_create_storage_group)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 66 and out.find("name already in use") >= 0:
+                LOG.warn(_('Storage group %(name)s already exists. '
+                           'Message: %(msg)s') %
+                         {'name': name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_create_storage_group, rc, out)
+
+    @log_enter_exit
+    def delete_storage_group(self, name):
+
+        command_delete_storage_group = ('storagegroup', '-destroy',
+                                        '-gname', name, '-o')
+
+        out, rc = self.command_execute(*command_delete_storage_group)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 83 and out.find("group name or UID does not "
+                                     "match any storage groups") >= 0:
+                LOG.warn(_("Storage group %(name)s doesn't exist, "
+                           "may have already been deleted. "
+                           "Message: %(msg)s") %
+                         {'name': name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_delete_storage_group, rc, out)
+
+    @log_enter_exit
+    def connect_host_to_storage_group(self, hostname, sg_name):
+
+        command_host_connect = ('storagegroup', '-connecthost',
+                                '-host', hostname,
+                                '-gname', sg_name,
+                                '-o')
+
+        out, rc = self.command_execute(*command_host_connect)
+        if rc != 0:
+            raise EMCVnxCLICmdError(command_host_connect, rc, out)
+
+    @log_enter_exit
+    def disconnect_host_from_storage_group(self, hostname, sg_name):
+        command_host_disconnect = ('storagegroup', '-disconnecthost',
+                                   '-host', hostname,
+                                   '-gname', sg_name,
+                                   '-o')
+
+        out, rc = self.command_execute(*command_host_disconnect)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 116 and \
+                re.search("host is not.*connected to.*storage group",
+                          out) is not None:
+                LOG.warn(_("Host %(host)s has already disconnected from "
+                           "storage group %(sgname)s. Message: %(msg)s") %
+                         {'host': hostname, 'sgname': sg_name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_host_disconnect, rc, out)
+
+    @log_enter_exit
+    def add_hlu_to_storage_group(self, hlu, alu, sg_name):
+
+        command_add_hlu = ('storagegroup', '-addhlu',
+                           '-hlu', hlu,
+                           '-alu', alu,
+                           '-gname', sg_name)
+
+        out, rc = self.command_execute(*command_add_hlu)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 66 and \
+                    re.search("LUN.*already.*added to.*Storage Group",
+                              out) is not None:
+                LOG.warn(_("LUN %(lun)s has already added to "
+                           "Storage Group %(sgname)s. "
+                           "Message: %(msg)s") %
+                         {'lun': alu, 'sgname': sg_name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_add_hlu, rc, out)
+
+    @log_enter_exit
+    def remove_hlu_from_storagegroup(self, hlu, sg_name):
+
+        command_remove_hlu = ('storagegroup', '-removehlu',
+                              '-hlu', hlu,
+                              '-gname', sg_name,
+                              '-o')
+
+        out, rc = self.command_execute(*command_remove_hlu)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if rc == 66 and\
+                    out.find("No such Host LUN in this Storage Group") >= 0:
+                LOG.warn(_("HLU %(hlu)s has already been removed from "
+                           "%(sgname)s. Message: %(msg)s") %
+                         {'hlu': hlu, 'sgname': sg_name, 'msg': out})
+            else:
+                raise EMCVnxCLICmdError(command_remove_hlu, rc, out)
+
+    @log_enter_exit
+    def get_iscsi_protocol_endpoints(self, device_sp):
+
+        command_get_port = ('connection', '-getport',
+                            '-sp', device_sp)
+
+        out, rc = self.command_execute(*command_get_port)
+        if rc != 0:
+            raise EMCVnxCLICmdError(command_get_port, rc, out)
+
+        re_port_wwn = 'Port WWN:\s*(.*)\s*'
+        initiator_address = re.findall(re_port_wwn, out)
+
+        return initiator_address
+
+    @log_enter_exit
+    def get_pool_name_of_lun(self, lun_name):
+        data = self.get_lun_properties(
+            ('-name', lun_name), self.LUN_WITH_POOL)
+        return data.get('pool', '')
+
+    @log_enter_exit
+    def get_lun_by_name(self, name, properties=LUN_ALL):
+        data = self.get_lun_properties(('-name', name), properties)
+        return data
+
+    @log_enter_exit
+    def get_lun_by_id(self, lunid, properties=LUN_ALL):
+        data = self.get_lun_properties(('-l', lunid), properties)
+        return data
+
+    @log_enter_exit
+    def get_pool(self, name):
+        data = self.get_pool_properties(('-name', name))
+        return data
+
+    @log_enter_exit
+    def get_pool_properties(self, filter_option, properties=POOL_ALL):
+        module_list = ('storagepool', '-list')
+        data = self._get_lun_or_pool_properties(
+            module_list, filter_option,
+            base_properties=[self.POOL_NAME],
+            adv_properties=properties)
+        return data
+
+    @log_enter_exit
+    def get_lun_properties(self, filter_option, properties=LUN_ALL):
+        module_list = ('lun', '-list')
+        data = self._get_lun_or_pool_properties(
+            module_list, filter_option,
+            base_properties=[self.LUN_NAME, self.LUN_ID],
+            adv_properties=properties)
+        return data
+
+    def _get_lun_or_pool_properties(self, module_list,
+                                    filter_option,
+                                    base_properties=tuple(),
+                                    adv_properties=tuple()):
+        # to do instance check
+        command_get_lun = module_list + filter_option
+        for prop in adv_properties:
+            command_get_lun += (prop.option, )
+        out, rc = self.command_execute(*command_get_lun)
+
+        if rc != 0:
+            raise EMCVnxCLICmdError(command_get_lun, rc, out)
+
+        data = {}
+        for baseprop in base_properties:
+            data[baseprop.key] = self._get_property_value(out, baseprop)
+
+        for prop in adv_properties:
+            data[prop.key] = self._get_property_value(out, prop)
+
+        LOG.debug('Return LUN or Pool properties. Data: %s' % data)
+        return data
+
+    def _get_property_value(self, out, propertyDescriptor):
+        label = propertyDescriptor.label
+        m = re.search(label, out)
+        if m:
+            if (propertyDescriptor.converter is not None):
+                try:
+                    return propertyDescriptor.converter(m.group(1))
+                except ValueError:
+                    LOG.error(_("Invalid value for %(key)s, "
+                                "value is %(value)s.") %
+                              {'key': propertyDescriptor.key,
+                               'value': m.group(1)})
+                    return None
+            else:
+                return m.group(1)
+        else:
+            LOG.debug('%s value is not found in the output.'
+                      % propertyDescriptor.label)
+            return None
+
+    @log_enter_exit
+    def check_lun_has_snap(self, lun_id):
+        cmd = ('snap', '-list', '-res', lun_id)
+        rc = self.command_execute(*cmd)[1]
+        if rc == 0:
+            LOG.debug("Find snapshots for %s." % lun_id)
+            return True
+        else:
+            return False
+
+    # Return a pool list
+    @log_enter_exit
+    def get_pool_list(self, no_poll=False):
+        temp_cache = []
+        cmd = ('-np', 'storagepool', '-list', '-availableCap', '-state') \
+            if no_poll \
+            else ('storagepool', '-list', '-availableCap', '-state')
+        out, rc = self.command_execute(*cmd)
+        if rc != 0:
+            raise EMCVnxCLICmdError(cmd, rc, out)
+
+        try:
+            for pool in out.split('\n\n'):
+                if len(pool.strip()) == 0:
+                    continue
+                obj = {}
+                obj['name'] = self._get_property_value(pool, self.POOL_NAME)
+                obj['free_space'] = self._get_property_value(
+                    pool, self.POOL_FREE_CAPACITY)
+                temp_cache.append(obj)
+        except Exception as ex:
+            LOG.error(_("Error happened during storage pool querying, %s.")
+                      % ex)
+            # NOTE: Do not want to continue raise the exception
+            # as the pools may temporarly unavailable
+            pass
+        return temp_cache
+
+    @log_enter_exit
+    def get_array_serial(self, no_poll=False):
+        """return array Serial No for pool backend."""
+        data = {'array_serial': 'unknown'}
+
+        command_get_array_serial = ('-np', 'getagent', '-serial') \
+            if no_poll else ('getagent', '-serial')
+        # Set the property timeout to get array serial
+        out, rc = self.command_execute(*command_get_array_serial)
+        if 0 == rc:
+            m = re.search(r'Serial No:\s+(\w+)', out)
+            if m:
+                data['array_serial'] = m.group(1)
+            else:
+                LOG.warn(_("No array serial number returned, "
+                           "set as unknown."))
+        else:
+            raise EMCVnxCLICmdError(command_get_array_serial, rc, out)
+
+        return data
+
+    @log_enter_exit
+    def get_status_up_ports(self, storage_group_name):
+        """Function to get ports whose status are up."""
+        cmd_get_hba = ('storagegroup', '-list', '-gname', storage_group_name)
+        out, rc = self.command_execute(*cmd_get_hba)
+        wwns = []
+        if 0 == rc:
+            _re_hba_sp_pair = re.compile('((\w\w:){15}(\w\w)\s*' +
+                                         '(SP\s[A-B]){1}\s*(\d*)\s*\n)')
+            _all_hba_sp_pairs = re.findall(_re_hba_sp_pair, out)
+            sps = [each[3] for each in _all_hba_sp_pairs]
+            portid = [each[4] for each in _all_hba_sp_pairs]
+            cmd_get_port = ('port', '-list', '-sp')
+            out, rc = self.command_execute(*cmd_get_port)
+            if 0 != rc:
+                raise EMCVnxCLICmdError(cmd_get_port, rc, out)
+            for i, sp in enumerate(sps):
+                wwn = self.get_port_wwn(sp, portid[i], out)
+                if (wwn is not None) and (wwn not in wwns):
+                    LOG.debug('Add wwn:%(wwn)s for sg:%(sg)s.'
+                              % {'wwn': wwn,
+                                 'sg': storage_group_name})
+                    wwns.append(wwn)
+        else:
+            raise EMCVnxCLICmdError(cmd_get_hba, rc, out)
+        return wwns
+
+    @log_enter_exit
+    def get_login_ports(self, storage_group_name, connector_wwpns):
+
+        cmd_list_hba = ('port', '-list', '-gname', storage_group_name)
+        out, rc = self.command_execute(*cmd_list_hba)
+        ports = []
+        wwns = []
+        connector_hba_list = []
+        if 0 == rc and out.find('Information about each HBA:') != -1:
+            hba_list = out.split('Information about each SPPORT:')[0].split(
+                'Information about each HBA:')[1:]
+            allports = out.split('Information about each SPPORT:')[1]
+            hba_uid_pat = re.compile('HBA\sUID:\s*((\w\w:){15}(\w\w))')
+            for each in hba_list:
+                obj_search = re.search(hba_uid_pat, each)
+                if obj_search and obj_search.group(1). \
+                        replace(':', '')[16:].lower() in connector_wwpns:
+                    connector_hba_list.append(each)
+            port_pat = re.compile('SP Name:\s*(SP\s\w)\n\s*' +
+                                  'SP Port ID:\s*(\w*)\n\s*' +
+                                  'HBA Devicename:.*\n\s*' +
+                                  'Trusted:.*\n\s*' +
+                                  'Logged In:\s*YES\n')
+            for each in connector_hba_list:
+                ports.extend(re.findall(port_pat, each))
+            ports = list(set(ports))
+            for each in ports:
+                wwn = self.get_port_wwn(each[0], each[1], allports)
+                if wwn:
+                    wwns.append(wwn)
+        else:
+            raise EMCVnxCLICmdError(cmd_list_hba, rc, out)
+        return wwns
+
+    @log_enter_exit
+    def get_port_wwn(self, sp, port_id, allports=None):
+        wwn = None
+        if allports is None:
+            cmd_get_port = ('port', '-list', '-sp')
+            out, rc = self.command_execute(*cmd_get_port)
+            if 0 != rc:
+                raise EMCVnxCLICmdError(cmd_get_port, rc, out)
+            else:
+                allports = out
+        _re_port_wwn = re.compile('SP Name:\s*' + sp +
+                                  '\nSP Port ID:\s*' + port_id +
+                                  '\nSP UID:\s*((\w\w:){15}(\w\w))' +
+                                  '\nLink Status:         Up' +
+                                  '\nPort Status:         Online')
+        _obj_search = re.search(_re_port_wwn, allports)
+        if _obj_search is not None:
+            wwn = _obj_search.group(1).replace(':', '')[16:]
+        return wwn
+
+    @log_enter_exit
+    def get_fc_targets(self):
+        fc_getport = ('port', '-list', '-sp')
+        out, rc = self.command_execute(*fc_getport)
+        if rc != 0:
+            raise EMCVnxCLICmdError(fc_getport, rc, out)
+
+        fc_target_dict = {'A': [], 'B': []}
+
+        _fcport_pat = (r'SP Name:             SP\s(\w)\s*'
+                       r'SP Port ID:\s*(\w*)\n'
+                       r'SP UID:\s*((\w\w:){15}(\w\w))\s*'
+                       r'Link Status:         Up\n'
+                       r'Port Status:         Online\n')
+
+        for m in re.finditer(_fcport_pat, out):
+            sp = m.groups()[0]
+            sp_port_id = m.groups()[1]
+            fc_target_dict[sp].append({'SP': sp,
+                                       'Port ID': sp_port_id})
+        return fc_target_dict
+
+    @log_enter_exit
+    def get_iscsi_targets(self):
+        cmd_getport = ('connection', '-getport', '-address', '-vlanid')
+        out, rc = self.command_execute(*cmd_getport)
+        if rc != 0:
+            raise EMCVnxCLICmdError(cmd_getport, rc, out)
+
+        iscsi_target_dict = {'A': [], 'B': []}
+        iscsi_spport_pat = r'(A|B)\s*' + \
+                           r'Port ID:\s+(\d+)\s*' + \
+                           r'Port WWN:\s+(iqn\S+)'
+        iscsi_vport_pat = r'Virtual Port ID:\s+(\d+)\s*' + \
+                          r'VLAN ID:\s*\S*\s*' + \
+                          r'IP Address:\s+(\S+)'
+        for spport_content in re.split(r'^SP:\s+|\nSP:\s*', out):
+            m_spport = re.match(iscsi_spport_pat, spport_content,
+                                flags=re.IGNORECASE)
+            if not m_spport:
+                continue
+            sp = m_spport.group(1)
+            port_id = int(m_spport.group(2))
+            iqn = m_spport.group(3)
+            for m_vport in re.finditer(iscsi_vport_pat, spport_content):
+                vport_id = int(m_vport.group(1))
+                ip_addr = m_vport.group(2)
+                if ip_addr.find('N/A') != -1:
+                    LOG.debug("Skip port without IP Address: %s",
+                              m_spport.group(0) + m_vport.group(0))
+                    continue
+                iscsi_target_dict[sp].append({'SP': sp,
+                                              'Port ID': port_id,
+                                              'Port WWN': iqn,
+                                              'Virtual Port ID': vport_id,
+                                              'IP Address': ip_addr})
+
+        return iscsi_target_dict
+
+    @log_enter_exit
+    def get_registered_spport_set(self, initiator_iqn, sgname):
+        sg_list = ('storagegroup', '-list', '-gname', sgname)
+        out, rc = self.command_execute(*sg_list)
+        spport_set = set()
+        if rc == 0:
+            for m_spport in re.finditer(r'\n\s+%s\s+SP\s(A|B)\s+(\d+)' %
+                                        initiator_iqn,
+                                        out,
+                                        flags=re.IGNORECASE):
+                spport_set.add((m_spport.group(1), int(m_spport.group(2))))
+                LOG.debug('See path %(path)s in %(sg)s'
+                          % ({'path': m_spport.group(0),
+                              'sg': sgname}))
+        else:
+            raise EMCVnxCLICmdError(sg_list, rc, out)
+        return spport_set
+
+    @log_enter_exit
+    def ping_node(self, target_portal, initiator_ip):
+        connection_pingnode = ('connection', '-pingnode', '-sp',
+                               target_portal['SP'], '-portid',
+                               target_portal['Port ID'], '-vportid',
+                               target_portal['Virtual Port ID'],
+                               '-address', initiator_ip)
+        out, rc = self.command_execute(*connection_pingnode)
+        if rc == 0:
+            ping_ok = re.compile(r'Reply from %s' % initiator_ip)
+            if re.match(ping_ok, out) is not None:
+                LOG.debug("See available iSCSI target: %s",
+                          connection_pingnode)
+                return True
+        LOG.warn(_("See unavailable iSCSI target: %s"), connection_pingnode)
+        return False
+
+    @log_enter_exit
+    def find_avaialable_iscsi_target_one(self, hostname,
+                                         preferred_sp,
+                                         registered_spport_set):
+        if self.iscsi_initiator_map and hostname in self.iscsi_initiator_map:
+            iscsi_initiator_ips = list(self.iscsi_initiator_map[hostname])
+            random.shuffle(iscsi_initiator_ips)
+        else:
+            iscsi_initiator_ips = None
+        # Check the targets on the owner first
+        if preferred_sp == 'A':
+            target_sps = ('A', 'B')
+        else:
+            target_sps = ('B', 'A')
+
+        iscsi_targets = self.get_iscsi_targets()
+        for target_sp in target_sps:
+            target_portals = list(iscsi_targets[target_sp])
+            random.shuffle(target_portals)
+            for target_portal in target_portals:
+                spport = (target_portal['SP'], target_portal['Port ID'])
+                if spport not in registered_spport_set:
+                    LOG.debug("Skip SP Port %(port)s since "
+                              "no path from %(host)s is through it"
+                              % {'port': spport,
+                                 'host': hostname})
+                    continue
+                if iscsi_initiator_ips is not None:
+                    for initiator_ip in iscsi_initiator_ips:
+                        if self.ping_node(target_portal, initiator_ip):
+                            return target_portal
+                else:
+                    LOG.debug("No iSCSI IP address of %(hostname)s is known. "
+                              "Return a random iSCSI target portal %(portal)s."
+                              %
+                              {'hostname': hostname, 'portal': target_portal})
+                    return target_portal
+
+        return None
+
+    def _is_sp_unavailable_error(self, out):
+        error_pattern = '(^Error.*Message.*End of data stream.*)|'\
+                        '(.*Message.*connection refused.*)|'\
+                        '(^Error.*Message.*Service Unavailable.*)'
+        pattern = re.compile(error_pattern)
+        return pattern.match(out)
+
+    @log_enter_exit
+    def command_execute(self, *command, **kwargv):
+        # NOTE: retry_disable need to be removed from kwargv
+        # before it pass to utils.execute, otherwise exception will thrown
+        retry_disable = kwargv.pop('retry_disable', False)
+        if self._is_sp_alive(self.active_storage_ip):
+            out, rc = self._command_execute_on_active_ip(*command, **kwargv)
+            if not retry_disable and self._is_sp_unavailable_error(out):
+                # When active sp is unavailble, swith to another sp
+                # and set it to active
+                if self._toggle_sp():
+                    LOG.debug('EMC: Command Exception: %(rc) %(result)s. '
+                              'Retry on another SP.' % {'rc': rc,
+                                                        'result': out})
+                    out, rc = self._command_execute_on_active_ip(*command,
+                                                                 **kwargv)
+        elif self._toggle_sp() and not retry_disable:
+            # If active ip is not accessible, toggled to another sp
+            out, rc = self._command_execute_on_active_ip(*command, **kwargv)
+        else:
+            # Active IP is inaccessible, and cannot toggle to another SP,
+            # return Error
+            out, rc = "Server Unavailable", 255
+
+        LOG.debug('EMC: Command: %(command)s.'
+                  % {'command': self.command + command})
+        LOG.debug('EMC: Command Result: %(result)s.' %
+                  {'result': out.replace('\n', '\\n')})
+
+        return out, rc
+
+    def _command_execute_on_active_ip(self, *command, **kwargv):
         if "check_exit_code" not in kwargv:
             kwargv["check_exit_code"] = True
         rc = 0
+        out = ""
         try:
-            out, _err = utils.execute(*(self.cli_prefix +
-                                      self.cli_credentials + cmd), **kwargv)
+            active_ip = (self.active_storage_ip,)
+            out, err = utils.execute(
+                *(self.command
+                  + active_ip
+                  + self.credentials
+                  + command),
+                **kwargv)
         except processutils.ProcessExecutionError as pe:
             rc = pe.exit_code
-            out = pe.stdout + pe.stderr
+            out = pe.stdout
+            out = out.replace('\n', '\\n')
         return out, rc
 
-    def create_volume(self, volume):
-        """Creates a EMC volume."""
+    def _is_sp_alive(self, ipaddr):
+        ping_cmd = ('ping', '-c', 1, ipaddr)
+        try:
+            out, err = utils.execute(*ping_cmd,
+                                     check_exit_code=True)
+        except processutils.ProcessExecutionError as pe:
+            out = pe.stdout
+            rc = pe.exit_code
+            if rc != 0:
+                LOG.debug('%s is unavaialbe' % ipaddr)
+                return False
+        LOG.debug('Ping SP %(spip)s Command Result: %(result)s.' %
+                  {'spip': self.active_storage_ip, 'result': out})
+        return True
+
+    def _toggle_sp(self):
+        """This function toggles the storage IP
+        Address between primary IP and secondary IP, if no SP IP address has
+        exchanged, return False, otherwise True will be returned.
+        """
+        if self.secondary_storage_ip is None:
+            return False
+        old_ip = self.active_storage_ip
+        self.active_storage_ip = self.secondary_storage_ip if\
+            self.active_storage_ip == self.primary_storage_ip else\
+            self.primary_storage_ip
+
+        LOG.info(_('Toggle storage_vnx_ip_adress from %(old)s to '
+                   '%(new)s.') %
+                 {'old': old_ip,
+                  'new': self.primary_storage_ip})
+        return True
+
+    @log_enter_exit
+    def get_enablers_on_array(self, no_poll=False):
+        """The function would get all the enabler installed
+        on array.
+        """
+        enablers = []
+        cmd_list = ('-np', 'ndu', '-list') \
+            if no_poll else ('ndu', '-list')
+        out, rc = self.command_execute(*cmd_list)
 
-        LOG.debug('Entering create_volume.')
-        volumesize = volume['size']
-        volumename = volume['name']
+        if rc != 0:
+            raise EMCVnxCLICmdError(cmd_list, rc, out)
+        else:
+            enabler_pat = r'Name of the software package:\s*(\S+)\s*'
+            for m in re.finditer(enabler_pat, out):
+                enablers.append(m.groups()[0])
 
-        LOG.info(_('Create Volume: %(volume)s  Size: %(size)s')
-                 % {'volume': volumename,
-                    'size': volumesize})
+        LOG.debug('Enablers on array %s.' % enablers)
+        return enablers
 
-        # defining CLI command
-        thinness = self._get_provisioning_by_volume(volume)
-
-        # executing CLI command to create volume
-        LOG.debug('Create Volume: %(volumename)s'
-                  % {'volumename': volumename})
-
-        lun_create = ('lun', '-create',
-                      '-type', thinness,
-                      '-capacity', volumesize,
-                      '-sq', 'gb',
-                      '-poolName', self.pool_name,
-                      '-name', volumename)
-        out, rc = self._cli_execute(*lun_create)
-        LOG.debug('Create Volume: %(volumename)s  Return code: %(rc)s'
-                  % {'volumename': volumename,
-                     'rc': rc})
-        if rc == 4:
-            LOG.warn(_('Volume %s already exists'), volumename)
-        elif rc != 0:
-            msg = (_('Failed to create %(volumename)s: %(out)s') %
-                   {'volumename': volumename, 'out': out})
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
+    @log_enter_exit
+    def enable_or_disable_compression_on_lun(self, volumename, compression):
+        """The function will enable or disable the compression
+        on lun
+        """
+        lun_data = self.get_lun_by_name(volumename)
 
-        # wait for up to a minute to verify that the LUN has progressed
-        # to Ready state
-        def _wait_for_lun_ready(volumename, start_time):
-            # executing cli command to check volume
-            command_to_verify = ('lun', '-list', '-name', volumename)
-            out, rc = self._cli_execute(*command_to_verify)
-            if rc == 0 and out.find("Ready") > -1:
-                raise loopingcall.LoopingCallDone()
-            if int(time.time()) - start_time > self.timeout * 60:
-                msg = (_('LUN %s failed to become Ready'), volumename)
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
+        command_compression_cmd = ('compression', '-' + compression,
+                                   '-l', lun_data['lun_id'],
+                                   '-ignoreThresholds', '-o')
 
-        timer = loopingcall.FixedIntervalLoopingCall(
-            _wait_for_lun_ready, volumename, int(time.time()))
-        timer.start(interval=self.wait_interval).wait()
+        out, rc = self.command_execute(*command_compression_cmd)
 
-    def delete_volume(self, volume):
-        """Deletes an EMC volume."""
+        if 0 != rc:
+            raise EMCVnxCLICmdError(command_compression_cmd, rc, out)
+        return rc, out
+
+
+class EMCVnxCliBase(object):
+    """This class defines the functions to use the native CLI functionality."""
 
-        LOG.debug('Entering delete_volume.')
+    VERSION = '04.00.00'
+    stats = {'driver_version': VERSION,
+             'free_capacity_gb': 'unknown',
+             'reserved_percentage': 0,
+             'storage_protocol': None,
+             'total_capacity_gb': 'unknown',
+             'vendor_name': 'EMC',
+             'volume_backend_name': None,
+             'compression_support': 'False',
+             'fast_support': 'False',
+             'deduplication_support': 'False',
+             'thinprovisioning_support': 'False'}
+    enablers = []
+
+    def __init__(self, prtcl, configuration=None):
+        self.protocol = prtcl
+        self.configuration = configuration
+        self.timeout = self.configuration.default_timeout * 60
+        self.max_luns_per_sg = self.configuration.max_luns_per_storage_group
+        self.destroy_empty_sg = self.configuration.destroy_empty_storage_group
+        self.itor_auto_reg = self.configuration.initiator_auto_registration
+        # if zoning_mode is fabric, use lookup service to build itor_tgt_map
+        self.zonemanager_lookup_service = None
+        zm_conf = Configuration(manager.volume_manager_opts)
+        if (zm_conf.safe_get('zoning_mode') == 'fabric' or
+                self.configuration.safe_get('zoning_mode') == 'fabric'):
+            from cinder.zonemanager.fc_san_lookup_service \
+                import FCSanLookupService
+            self.zonemanager_lookup_service = \
+                FCSanLookupService(configuration=configuration)
+        self.max_retries = 5
+        if self.destroy_empty_sg:
+            LOG.warn(_("destroy_empty_storage_group: True. "
+                       "Empty storage group will be deleted "
+                       "after volume is detached."))
+        if not self.itor_auto_reg:
+            LOG.info(_("initiator_auto_registration: False. "
+                       "Initiator auto registration is not enabled. "
+                       "Please register initiator manually."))
+        self.hlu_set = set(xrange(1, self.max_luns_per_sg + 1))
+        self._client = CommandLineHelper(self.configuration)
+        self.array_serial = None
+
+    def get_target_storagepool(self, volume, source_volume_name=None):
+        raise NotImplementedError
+
+    def dumps_provider_location(self, pl_dict):
+        return '|'.join([k + '^' + pl_dict[k] for k in pl_dict])
+
+    def get_array_serial(self):
+        if not self.array_serial:
+            self.array_serial = self._client.get_array_serial()
+        return self.array_serial['array_serial']
+
+    @log_enter_exit
+    def create_volume(self, volume):
+        """Creates a EMC volume."""
+        volumesize = volume['size']
         volumename = volume['name']
+
+        self._volume_creation_check(volume)
         # defining CLI command
-        lun_destroy = ('lun', '-destroy',
-                       '-name', volumename,
-                       '-forceDetach', '-o')
-
-        # executing CLI command to delete volume
-        out, rc = self._cli_execute(*lun_destroy)
-        LOG.debug('Delete Volume: %(volumename)s  Output: %(out)s'
-                  % {'volumename': volumename, 'out': out})
-        if rc not in (0, 9):
-            msg = (_('Failed to destroy %s'), volumename)
+        specs = self.get_volumetype_extraspecs(volume)
+        pool = self.get_target_storagepool(volume)
+        provisioning, tiering = self.get_extra_spec_value(specs)
+
+        if not provisioning:
+            provisioning = 'thick'
+
+        LOG.info(_('Create Volume: %(volume)s  Size: %(size)s '
+                   'pool: %(pool)s '
+                   'provisioning: %(provisioning)s '
+                   'tiering: %(tiering)s.')
+                 % {'volume': volumename,
+                    'size': volumesize,
+                    'pool': pool,
+                    'provisioning': provisioning,
+                    'tiering': tiering})
+
+        data = self._client.create_lun_with_advance_feature(
+            pool, volumename, volumesize,
+            provisioning, tiering)
+        pl_dict = {'system': self.get_array_serial(),
+                   'type': 'lun',
+                   'id': str(data['lun_id'])}
+        model_update = {'provider_location':
+                        self.dumps_provider_location(pl_dict)}
+        volume['provider_location'] = model_update['provider_location']
+        return model_update
+
+    def _volume_creation_check(self, volume):
+        """This function will perform the check on the
+        extra spec before the volume can be created. The
+        check is a common check between the array based
+        and pool based backend.
+        """
+
+        specs = self.get_volumetype_extraspecs(volume)
+        provisioning, tiering = self.get_extra_spec_value(specs)
+
+        # step 1: check extra spec value
+        if provisioning:
+            self.check_extra_spec_value(
+                provisioning,
+                self._client.provisioning_values.keys())
+        if tiering:
+            self.check_extra_spec_value(
+                tiering,
+                self._client.tiering_values.keys())
+
+        # step 2: check extra spec combination
+        self.check_extra_spec_combination(specs)
+
+    def check_extra_spec_value(self, extra_spec, valid_values):
+        """check whether an extra spec's value is valid."""
+
+        if not extra_spec or not valid_values:
+            LOG.error(_('The given extra_spec or valid_values is None.'))
+        elif extra_spec not in valid_values:
+            msg = _("The extra_spec: %s is invalid.") % extra_spec
             LOG.error(msg)
             raise exception.VolumeBackendAPIException(data=msg)
+        return
 
-    def extend_volume(self, volume, new_size):
-        """Extends an EMC volume."""
+    def get_extra_spec_value(self, extra_specs):
+        """get EMC extra spec values."""
+        provisioning = 'thick'
+        tiering = None
 
-        LOG.debug('Entering extend_volume.')
-        volumename = volume['name']
+        if self._client.provisioning_spec in extra_specs:
+            provisioning = extra_specs[self._client.provisioning_spec].lower()
+        if self._client.tiering_spec in extra_specs:
+            tiering = extra_specs[self._client.tiering_spec].lower()
 
-        # defining CLI command
-        lun_expand = ('lun', '-expand',
-                      '-name', volumename,
-                      '-capacity', new_size,
-                      '-sq', 'gb',
-                      '-o', '-ignoreThresholds')
-
-        # executing CLI command to extend volume
-        out, rc = self._cli_execute(*lun_expand)
-
-        LOG.debug('Extend Volume: %(volumename)s  Output: %(out)s'
-                  % {'volumename': volumename,
-                     'out': out})
-        if rc == 97:
-            msg = (_('The LUN cannot be expanded or shrunk because '
-                     'it has snapshots. Command to extend the specified '
-                     'volume failed.'))
+        return provisioning, tiering
+
+    def check_extra_spec_combination(self, extra_specs):
+        """check whether extra spec combination is valid."""
+
+        provisioning, tiering = self.get_extra_spec_value(extra_specs)
+        enablers = self.enablers
+
+        # check provisioning and tiering
+        # deduplicated and tiering can not be both enabled
+        if provisioning == 'deduplicated' and tiering is not None:
+            msg = _("deduplicated and auto tiering can't be both enabled.")
             LOG.error(msg)
             raise exception.VolumeBackendAPIException(data=msg)
-        if rc != 0:
-            msg = (_('Failed to expand %s'), volumename)
+        elif provisioning == 'compressed' and '-Compression' not in enablers:
+            msg = _("Compression Enabler is not installed. "
+                    "Can not create compressed volume.")
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+        elif provisioning == 'deduplicated' and \
+                '-Deduplication' not in enablers:
+            msg = _("Deduplication Enabler is not installed."
+                    " Can not create deduplicated volume")
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+        elif provisioning in ['thin', 'deduplicated', 'compressed'] and \
+                '-ThinProvisioning' not in enablers:
+            msg = _("ThinProvisioning Enabler is not installed. "
+                    "Can not create thin volume")
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+        elif tiering is not None and '-FAST' not in enablers:
+            msg = _("FAST VP Enabler is not installed. "
+                    "Can't set tiering policy for the volume")
             LOG.error(msg)
             raise exception.VolumeBackendAPIException(data=msg)
+        return
 
-    def update_volume_status(self):
-        """Retrieve status info."""
-        LOG.debug("Updating volume status")
+    @log_enter_exit
+    def delete_volume(self, volume):
+        """Deletes an EMC volume."""
+        self._client.delete_lun(volume['name'])
 
-        poolname = self.pool_name
-        pool_list = ('storagepool', '-list',
-                     '-name', poolname,
-                     '-userCap', '-availableCap')
-        out, rc = self._cli_execute(*pool_list)
-        if rc == 0:
-            pool_details = out.split('\n')
-            self.stats['total_capacity_gb'] = float(
-                pool_details[3].split(':')[1].strip())
-            self.stats['free_capacity_gb'] = float(
-                pool_details[5].split(':')[1].strip())
+    @log_enter_exit
+    def extend_volume(self, volume, new_size):
+        """Extends an EMC volume."""
+        self._client.expand_lun_and_wait(volume['name'], new_size)
+
+    def _get_original_status(self, volume):
+        if (volume['instance_uuid'] is None and
+                volume['attached_host'] is None):
+            return 'available'
         else:
-            msg = (_('Failed to list %s'), poolname)
-            LOG.error(msg)
+            return 'in-use'
+
+    def _is_valid_for_storage_assisted_migration(
+            self, volume, host, new_type=None):
+        """Check the src and dest volume to decide the mogration type."""
+        false_ret = (False, None)
+
+        if 'location_info' not in host['capabilities']:
+            LOG.warn(_("Failed to get target_pool_name and "
+                       "target_array_serial. 'location_info' "
+                       "is not in host['capabilities']."))
+            return false_ret
+
+        # mandatory info should be ok
+        info = host['capabilities']['location_info']
+        LOG.debug("Host for migration is %s." % info)
+        try:
+            info_detail = info.split('|')
+            target_pool_name = info_detail[0]
+            target_array_serial = info_detail[1]
+        except AttributeError:
+            LOG.warn(_("Error on parsing target_pool_name/"
+                       "target_array_serial."))
+            return false_ret
+
+        if len(target_pool_name) == 0:
+            # if retype, try to get the pool of the volume
+            # when it's array-based
+            if new_type:
+                if 'storagetype:pool' in new_type['extra_specs']\
+                        and new_type['extra_specs']['storagetype:pool']\
+                        is not None:
+                    target_pool_name = \
+                        new_type['extra_specs']['storagetype:pool']
+                else:
+                    target_pool_name = self._client.get_pool_name_of_lun(
+                        volume['name'])
+
+        if len(target_pool_name) == 0:
+            LOG.debug("Skip storage-assisted migration because "
+                      "it doesn't support array backend .")
+            return false_ret
+        # source and destination should be on same array
+        array_serial = self._client.get_array_serial()
+        if target_array_serial != array_serial['array_serial']:
+            LOG.debug('Skip storage-assisted migration because '
+                      'target and source backend are not managing'
+                      'the same array.')
+            return false_ret
+        # same protocol should be used if volume is in-use
+        if host['capabilities']['storage_protocol'] != self.protocol \
+                and self._get_original_status(volume) == 'in-use':
+            LOG.debug('Skip storage-assisted migration because '
+                      'in-use volume can not be '
+                      'migrate between diff protocol.')
+            return false_ret
+
+        return (True, target_pool_name)
+
+    @log_enter_exit
+    def migrate_volume(self, ctxt, volume, host, new_type=None):
+        """Leverage the VNX on-array migration functionality.
+
+        This method is invoked at the source backend.
+        """
+        false_ret = (False, None)
+        is_valid, target_pool_name = \
+            self._is_valid_for_storage_assisted_migration(
+                volume, host, new_type)
+        if not is_valid:
+            return false_ret
+
+        return self._migrate_volume(volume, target_pool_name, new_type)
+
+    def _migrate_volume(self, volume, target_pool_name, new_type=None):
+        LOG.debug("Starting real storage-assisted migration...")
+        # first create a new volume with same name and size of source volume
+        volume_name = volume['name']
+        new_volume_name = "%(src)s-%(ts)s" % {'src': volume_name,
+                                              'ts': int(time.time())}
+        src_id = self.get_lun_id(volume)
+
+        provisioning = 'thick'
+        tiering = None
+        if new_type:
+            provisioning, tiering = self.get_extra_spec_value(
+                new_type['extra_specs'])
+        else:
+            provisioning, tiering = self.get_extra_spec_value(
+                self.get_volumetype_extraspecs(volume))
+
+        self._client.create_lun_with_advance_feature(
+            target_pool_name, new_volume_name, volume['size'],
+            provisioning, tiering)
+
+        dst_id = self.get_lun_id_by_name(new_volume_name)
+        moved = self._client.migrate_lun_with_verification(
+            src_id, dst_id, new_volume_name)
+
+        return moved, {}
+
+    @log_enter_exit
+    def retype(self, ctxt, volume, new_type, diff, host):
+        new_specs = new_type['extra_specs']
+        new_provisioning, new_tiering = self.get_extra_spec_value(
+            new_specs)
+
+        # validate new_type
+        if new_provisioning:
+            self.check_extra_spec_value(
+                new_provisioning,
+                self._client.provisioning_values.keys())
+        if new_tiering:
+            self.check_extra_spec_value(
+                new_tiering,
+                self._client.tiering_values.keys())
+        self.check_extra_spec_combination(new_specs)
+
+        # check what changes are needed
+        migration, tiering_change = self.determine_changes_when_retype(
+            volume, new_type, host)
+
+        # reject if volume has snapshot when migration is needed
+        if migration and self._client.check_lun_has_snap(
+                self.get_lun_id(volume)):
+            LOG.debug('Driver is not able to do retype because the volume '
+                      'has snapshot which is forbidden to migrate.')
+            return False
+
+        if migration:
+            # check whether the migration is valid
+            is_valid, target_pool_name = (
+                self._is_valid_for_storage_assisted_migration(
+                    volume, host, new_type))
+            if is_valid:
+                if self._migrate_volume(
+                        volume, target_pool_name, new_type)[0]:
+                    return True
+                else:
+                    LOG.warn(_('Storage-assisted migration failed during '
+                               'retype.'))
+                    return False
+            else:
+                # migration is invalid
+                LOG.debug('Driver is not able to do retype due to '
+                          'storage-assisted migration is not valid '
+                          'in this stuation.')
+                return False
+        elif not migration and tiering_change:
+            # modify lun to change tiering policy
+            self._client.modify_lun_tiering(volume['name'], new_tiering)
+            return True
+        else:
+            return True
+
+    def determine_changes_when_retype(self, volume, new_type, host):
+        migration = False
+        tiering_change = False
+
+        old_specs = self.get_volumetype_extraspecs(volume)
+        old_provisioning, old_tiering = self.get_extra_spec_value(
+            old_specs)
+        old_pool = self.get_specific_extra_spec(
+            old_specs,
+            self._client.pool_spec)
+
+        new_specs = new_type['extra_specs']
+        new_provisioning, new_tiering = self.get_extra_spec_value(
+            new_specs)
+        new_pool = self.get_specific_extra_spec(
+            new_specs,
+            self._client.pool_spec)
+
+        if volume['host'] != host['host'] or \
+                old_provisioning != new_provisioning:
+            migration = True
+        elif new_pool and new_pool != old_pool:
+            migration = True
+
+        if new_tiering != old_tiering:
+            tiering_change = True
+        return migration, tiering_change
+
+    def get_specific_extra_spec(self, specs, key):
+        return specs.get(key, None)
+
+    def determine_all_enablers_exist(self, enablers):
+        """Determine all wanted enablers whether exist."""
+        wanted = ['-ThinProvisioning',
+                  '-Deduplication',
+                  '-FAST',
+                  '-Compression']
+        for each in wanted:
+            if each not in enablers:
+                return False
+        return True
+
+    @log_enter_exit
+    def update_volume_stats(self):
+        """Update the common status share with pool and
+        array backend.
+        """
+        if not self.determine_all_enablers_exist(self.enablers):
+            self.enablers = self._client.get_enablers_on_array(NO_POLL)
+        if '-Compression' in self.enablers:
+            self.stats['compression_support'] = 'True'
+        else:
+            self.stats['compression_support'] = 'False'
+        if '-FAST' in self.enablers:
+            self.stats['fast_support'] = 'True'
+        else:
+            self.stats['fast_support'] = 'False'
+        if '-Deduplication' in self.enablers:
+            self.stats['deduplication_support'] = 'True'
+        else:
+            self.stats['deduplication_support'] = 'False'
+        if '-ThinProvisioning' in self.enablers:
+            self.stats['thinprovisioning_support'] = 'True'
+        else:
+            self.stats['thinprovisioning_support'] = 'False'
+        if '-FASTCache' in self.enablers:
+            self.stats['fast_cache_enabled'] = 'True'
+        else:
+            self.stats['fast_cache_enabled'] = 'False'
 
         return self.stats
 
+    @log_enter_exit
     def create_export(self, context, volume):
         """Driver entry point to get the export info for a new volume."""
         volumename = volume['name']
 
-        device_id = self._find_lun_id(volumename)
+        data = self._client.get_lun_by_name(volumename)
 
-        LOG.debug('create_export: Volume: %(volume)s  Device ID: '
-                  '%(device_id)s'
+        device_id = data['lun_id']
+
+        LOG.debug('Exiting EMCVnxCliBase.create_export: Volume: %(volume)s '
+                  'Device ID: %(device_id)s'
                   % {'volume': volumename,
                      'device_id': device_id})
 
         return {'provider_location': device_id}
 
-    def _find_lun_id(self, volumename):
-        """Returns the LUN of a volume."""
-
-        lun_list = ('lun', '-list', '-name', volumename)
-
-        out, rc = self._cli_execute(*lun_list)
-        if rc == 0:
-            vol_details = out.split('\n')
-            lun = vol_details[0].split(' ')[3]
-        else:
-            msg = (_('Failed to list %s'), volumename)
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        return lun
-
+    @log_enter_exit
     def create_snapshot(self, snapshot):
         """Creates a snapshot."""
-        LOG.debug('Entering create_snapshot.')
+
         snapshotname = snapshot['name']
         volumename = snapshot['volume_name']
+
         LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s')
                  % {'snapshot': snapshotname,
                     'volume': volumename})
 
-        volume_lun = self._find_lun_id(volumename)
-
-        # defining CLI command
-        snap_create = ('snap', '-create',
-                       '-res', volume_lun,
-                       '-name', snapshotname,
-                       '-allowReadWrite', 'yes')
-        # executing CLI command to create snapshot
-        out, rc = self._cli_execute(*snap_create)
-
-        LOG.debug('Create Snapshot: %(snapshotname)s  Unity: %(out)s'
-                  % {'snapshotname': snapshotname,
-                     'out': out})
-        if rc != 0:
-            msg = (_('Failed to create snap %s'), snapshotname)
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
+        self._client.create_snapshot(volumename, snapshotname)
 
+    @log_enter_exit
     def delete_snapshot(self, snapshot):
         """Deletes a snapshot."""
-        LOG.debug('Entering delete_snapshot.')
 
         snapshotname = snapshot['name']
-        volumename = snapshot['volume_name']
-        LOG.info(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s')
-                 % {'snapshot': snapshotname,
-                    'volume': volumename})
 
-        def _wait_for_snap_delete(snapshot, start_time):
-            # defining CLI command
-            snapshotname = snapshot['name']
-            volumename = snapshot['volume_name']
-            snap_destroy = ('snap', '-destroy', '-id', snapshotname, '-o')
-            # executing CLI command
-            out, rc = self._cli_execute(*snap_destroy)
-
-            LOG.debug('Delete Snapshot: Volume: %(volumename)s  Snapshot: '
-                      '%(snapshotname)s  Output: %(out)s'
-                      % {'volumename': volumename,
-                         'snapshotname': snapshotname,
-                         'out': out})
-
-            if rc not in [0, 9, 5]:
-                if rc == 13:
-                    if int(time.time()) - start_time < \
-                            self.timeout * 60:
-                        LOG.info(_('Snapshot %s is in use'), snapshotname)
-                    else:
-                        msg = (_('Failed to destroy %s '
-                               ' because snapshot is in use.'), snapshotname)
-                        LOG.error(msg)
-                        raise exception.SnapshotIsBusy(data=msg)
-                else:
-                    msg = (_('Failed to destroy %s'), snapshotname)
-                    LOG.error(msg)
-                    raise exception.VolumeBackendAPIException(data=msg)
-            else:
-                raise loopingcall.LoopingCallDone()
+        LOG.info(_('Delete Snapshot: %(snapshot)s')
+                 % {'snapshot': snapshotname})
 
-        timer = loopingcall.FixedIntervalLoopingCall(
-            _wait_for_snap_delete, snapshot, int(time.time()))
-        timer.start(interval=self.wait_interval).wait()
+        self._client.delete_snapshot(snapshotname)
 
+    @log_enter_exit
     def create_volume_from_snapshot(self, volume, snapshot):
         """Creates a volume from a snapshot."""
-
-        LOG.debug('Entering create_volume_from_snapshot.')
-
-        snapshotname = snapshot['name']
+        snapshot_name = snapshot['name']
         source_volume_name = snapshot['volume_name']
-        volumename = volume['name']
-        volumesize = snapshot['volume_size']
+        volume_name = volume['name']
+        volume_size = snapshot['volume_size']
 
-        destvolumename = volumename + 'dest'
-
-        # Create a mount point, migrate data from source (snapshot) to
-        # destination volume.  The destination volume is the only new volume
-        # to be created here.
-        LOG.info(_('Creating Destination Volume : %s ') % (destvolumename))
-
-        poolname = self.pool_name
-        thinness = self._get_provisioning_by_volume(volume)
         # defining CLI command
-        lun_create = ('lun', '-create', '-type', thinness,
-                      '-capacity', volumesize, '-sq', 'gb',
-                      '-poolName', poolname,
-                      '-name', destvolumename)
-        # executing CLI command
-        out, rc = self._cli_execute(*lun_create)
-
-        LOG.debug('Create temporary Volume: %(volumename)s  '
-                  'Output : %(out)s'
-                  % {'volumename': destvolumename, 'out': out})
-
-        if rc != 0:
-            msg = (_('Command to create the destination volume failed'))
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
+        self._client.create_mount_point(source_volume_name, volume_name)
 
         # defining CLI command
-        smp_create = ('lun', '-create', '-type', 'Snap',
-                      '-primaryLunName', source_volume_name,
-                      '-name', volumename)
-
-        # executing CLI command
-        out, rc = self._cli_execute(*smp_create)
-        LOG.debug('Create mount point : Volume: %(volumename)s  '
-                  'Source Volume: %(sourcevolumename)s  Output: %(out)s'
-                  % {'volumename': volumename,
-                     'sourcevolumename': source_volume_name,
-                     'out': out})
+        self._client.attach_mount_point(volume_name, snapshot_name)
 
-        if rc != 0:
-            msg = (_('Failed to create SMP %s'), volumename)
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-
-        # defining CLI command
-        lun_attach = ('lun', '-attach',
-                      '-name', volumename,
-                      '-snapName', snapshotname)
-
-        # executing CLI command
-        out, rc = self._cli_execute(*lun_attach)
-        LOG.debug('Attaching mount point Volume: %(volumename)s  '
-                  'with  Snapshot: %(snapshotname)s  Output: %(out)s'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'out': out})
-
-        if rc != 0:
-            msg = (_('Failed to attach snapshotname %s'), snapshotname)
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-
-        source_vol_lun = self._find_lun_id(volumename)
-        dest_vol_lun = self._find_lun_id(destvolumename)
-
-        LOG.info(_('Migrating Mount Point Volume: %s ') % (volumename))
-
-        # defining CLI command
-        migrate_start = ('migrate', '-start',
-                         '-source', source_vol_lun,
-                         '-dest', dest_vol_lun,
-                         '-rate', 'ASAP', '-o')
-
-        # executing CLI command
-        out, rc = self._cli_execute(*migrate_start)
+        dest_volume_name = volume_name + '_dest'
 
-        LOG.debug('Migrate Mount Point  Volume: %(volumename)s  '
-                  'Output : %(out)s'
-                  % {'volumename': volumename,
-                     'out': out})
-
-        if rc != 0:
-            msg = (_('Failed to start migrating SMP %s'), volumename)
+        LOG.debug('Creating Temporary Volume: %s ' % dest_volume_name)
+        pool_name = self.get_target_storagepool(volume, source_volume_name)
+        try:
+            self._volume_creation_check(volume)
+            specs = self.get_volumetype_extraspecs(volume)
+            provisioning, tiering = self.get_extra_spec_value(specs)
+            self._client.create_lun_with_advance_feature(
+                pool_name, dest_volume_name, volume_size,
+                provisioning, tiering)
+        except exception.VolumeBackendAPIException as ex:
+            msg = (_('Command to create the temporary Volume %s failed')
+                   % dest_volume_name)
             LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-
-        def _wait_for_sync_status(volumename, start_time):
-            lun_list = ('lun', '-list', '-name', volumename,
-                        '-attachedSnapshot')
-            out, rc = self._cli_execute(*lun_list)
-            if rc == 0:
-                vol_details = out.split('\n')
-                snapshotname = vol_details[2].split(':')[1].strip()
-            if (snapshotname == 'N/A'):
-                raise loopingcall.LoopingCallDone()
-            else:
-                LOG.info(_('Waiting for the update on Sync status of %s'),
-                         volumename)
-                if int(time.time()) - start_time >= self.timeout * 60:
-                    msg = (_('Failed to really migrate %s'), volumename)
-                    LOG.error(msg)
-                    raise exception.VolumeBackendAPIException(data=msg)
-
-        timer = loopingcall.FixedIntervalLoopingCall(
-            _wait_for_sync_status, volumename, int(time.time()))
-        timer.start(interval=self.wait_interval).wait()
-
+            raise ex
+
+        source_vol_lun_id = self.get_lun_id(volume)
+        temp_vol_lun_id = self.get_lun_id_by_name(dest_volume_name)
+
+        LOG.debug('Migrating Mount Point Volume: %s ' % volume_name)
+        self._client.migrate_lun_with_verification(source_vol_lun_id,
+                                                   temp_vol_lun_id,
+                                                   dest_volume_name)
+        self._client.check_smp_not_attached(volume_name)
+        data = self._client.get_lun_by_name(volume_name)
+        pl_dict = {'system': self.get_array_serial(),
+                   'type': 'lun',
+                   'id': str(data['lun_id'])}
+        model_update = {'provider_location':
+                        self.dumps_provider_location(pl_dict)}
+        volume['provider_location'] = model_update['provider_location']
+        return model_update
+
+    @log_enter_exit
     def create_cloned_volume(self, volume, src_vref):
         """Creates a clone of the specified volume."""
-
         source_volume_name = src_vref['name']
-        volumesize = src_vref['size']
-        snapshotname = source_volume_name + '-temp-snapshot'
+        volume_size = src_vref['size']
+        snapshot_name = 'tmp-snap-%s' % volume['id']
 
         snapshot = {
-            'name': snapshotname,
+            'name': snapshot_name,
             'volume_name': source_volume_name,
-            'volume_size': volumesize,
+            'volume_size': volume_size,
         }
-
         # Create temp Snapshot
         self.create_snapshot(snapshot)
-
+        # Create volume
+        model_update = self.create_volume_from_snapshot(volume, snapshot)
+        # Delete temp Snapshot
+        self.delete_snapshot(snapshot)
+        return model_update
+
+    def get_lun_id_by_name(self, volume_name):
+        data = self._client.get_lun_by_name(volume_name)
+        return data['lun_id']
+
+    def get_lun_id(self, volume):
+        lun_id = None
         try:
-            # Create volume
-            self.create_volume_from_snapshot(volume, snapshot)
-        except Exception:
-            msg = (_('Failed to create cloned volume %s'), volume['name'])
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        finally:
-            # Delete temp Snapshot
-            self.delete_snapshot(snapshot)
-
-    def get_storage_group(self, hostname):
-        """Returns the storage group for the host node."""
-
-        storage_groupname = hostname
-
-        sg_list = ('storagegroup', '-list', '-gname', storage_groupname)
-
-        out, rc = self._cli_execute(*sg_list)
-
-        if rc != 0:
-            LOG.debug('creating new storage group %s', storage_groupname)
-
-            sg_create = ('storagegroup', '-create',
-                         '-gname', storage_groupname)
-            out, rc = self._cli_execute(*sg_create)
-            LOG.debug('Create new storage group : %(storage_groupname)s, '
-                      'Output: %(out)s'
-                      % {'storage_groupname': storage_groupname,
-                         'out': out})
-
-            if rc != 0:
-                msg = (_('Failed to create SG %s'), storage_groupname)
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-
-            # connecting the new storagegroup to the host
-            connect_host = ('storagegroup', '-connecthost',
-                            '-host', hostname,
-                            '-gname', storage_groupname,
-                            '-o')
-
-            out, rc = self._cli_execute(*connect_host)
-            LOG.debug('Connect storage group : %(storage_groupname)s ,'
-                      'To Host : %(hostname)s, Output : %(out)s'
-                      % {'storage_groupname': storage_groupname,
-                         'hostname': hostname,
-                         'out': out})
-
-            if rc != 0:
-                msg = (_('Failed to connect %s'), hostname)
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
+            if volume.get('provider_location') is not None:
+                lun_id = int(
+                    volume['provider_location'].split('|')[2].split('^')[1])
+            if not lun_id:
+                LOG.debug('Lun id is not stored in provider location, '
+                          'query it.')
+                lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
+        except Exception as ex:
+            LOG.debug('Exception when getting lun id: %s.' % (ex))
+            lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
+        LOG.debug('Get lun_id: %s.' % (lun_id))
+        return lun_id
+
+    def get_lun_map(self, storage_group):
+        data = self._client.get_storage_group(storage_group)
+        return data['lunmap']
+
+    def get_storage_group_uid(self, name):
+        data = self._client.get_storage_group(name)
+        return data['storage_group_uid']
+
+    def assure_storage_group(self, storage_group):
+        try:
+            self._client.create_storage_group(storage_group)
+        except EMCVnxCLICmdError as ex:
+            if ex.out.find("Storage Group name already in use") == -1:
+                raise ex
 
+    def assure_host_in_storage_group(self, hostname, storage_group):
+        try:
+            self._client.connect_host_to_storage_group(hostname, storage_group)
+        except EMCVnxCLICmdError as ex:
+            if ex.rc == 83:
+                # SG was not created or was destroyed by another concurrent
+                # operation before connected.
+                # Create SG and try to connect again
+                LOG.warn(_('Storage Group %s is not found. Create it.'),
+                         storage_group)
+                self.assure_storage_group(storage_group)
+                self._client.connect_host_to_storage_group(
+                    hostname, storage_group)
+            else:
+                raise ex
         return hostname
 
     def find_device_details(self, volume, storage_group):
         """Returns the Host Device number for the volume."""
 
-        allocated_lun_id = self._find_lun_id(volume["name"])
         host_lun_id = -1
-        owner_sp = ""
-        lun_map = {}
 
-        sg_list = ('storagegroup', '-list', '-gname', storage_group)
-        out, rc = self._cli_execute(*sg_list)
-        if out.find('HLU/ALU Pairs') == -1:
-            LOG.info(_('NO LUNs in the storagegroup : %s ')
-                     % (storage_group))
-        else:
-            sg_details = out.split('HLU/ALU Pairs:')[1]
-            sg_lun_details = sg_details.split('Shareable')[0]
-            lun_details = sg_lun_details.split('\n')
-
-            for data in lun_details:
-                if data not in ['', '  HLU Number     ALU Number',
-                                '  ----------     ----------']:
-                    data = data.strip()
-                    items = data.split(' ')
-                    lun_map[int(items[len(items) - 1])] = int(items[0])
-            for lun in lun_map.iterkeys():
-                if lun == int(allocated_lun_id):
-                    host_lun_id = lun_map[lun]
-                    LOG.debug('Host Lun Id : %s' % (host_lun_id))
-                    break
-
-        # finding the owner SP for the LUN
-        lun_list = ('lun', '-list', '-l', allocated_lun_id, '-owner')
-        out, rc = self._cli_execute(*lun_list)
-        if rc == 0:
-            output = out.split('\n')
-            owner_sp = output[2].split('Current Owner:  SP ')[1]
-            LOG.debug('Owner SP : %s' % (owner_sp))
+        data = self._client.get_storage_group(storage_group)
+        lun_map = data['lunmap']
+        data = self._client.get_lun_by_name(volume['name'])
+        allocated_lun_id = data['lun_id']
+        owner_sp = data['owner']
+
+        for lun in lun_map.iterkeys():
+            if lun == int(allocated_lun_id):
+                host_lun_id = lun_map[lun]
+                LOG.debug('Host Lun Id : %s' % (host_lun_id))
+                break
+
+        LOG.debug('Owner SP : %s' % (owner_sp))
 
         device = {
             'hostlunid': host_lun_id,
@@ -577,162 +1860,573 @@ class EMCVnxCli(object):
         }
         return device
 
-    def _get_host_lun_id(self, host_lun_id_list):
-        # Returns the host lun id for the LUN to be added
-        # in the storage group.
+    def filter_available_hlu_set(self, used_hlus):
+        used_hlu_set = set(used_hlus)
+        return self.hlu_set - used_hlu_set
 
-        used_hlu_set = set(host_lun_id_list)
-        for hlu in self.hlu_set - used_hlu_set:
-            return hlu
-        return None
-
-    def _add_lun_to_storagegroup(self, volume, storage_group):
+    def _extract_iscsi_uids(self, connector):
+        if 'initiator' not in connector:
+            if self.protocol == 'iSCSI':
+                msg = (_('Host %s has no iSCSI initiator')
+                       % connector['host'])
+                LOG.error(msg)
+                raise exception.VolumeBackendAPIException(data=msg)
+            else:
+                return ()
+        return [connector['initiator']]
 
-        storage_groupname = storage_group
-        volumename = volume['name']
-        allocated_lun_id = self._find_lun_id(volumename)
-        count = 0
-        while(count < 5):
-            device_info = self.find_device_details(volume, storage_group)
-            device_number = device_info['hostlunid']
-            if device_number < 0:
-                lun_map = device_info['lunmap']
-                if lun_map:
-                    host_lun_id_list = lun_map.values()
-
-                    if len(host_lun_id_list) >= self.max_luns:
-                        msg = (_('The storage group has reached the '
-                                 'maximum capacity of LUNs. '
-                                 'Command to add LUN for volume - %s '
-                                 'in storagegroup failed') % (volumename))
-                        LOG.error(msg)
-                        raise exception.VolumeBackendAPIException(data=msg)
-
-                    host_lun_id = self._get_host_lun_id(host_lun_id_list)
-
-                    if host_lun_id is None:
-                        msg = (_('Unable to get new host lun id. Please '
-                                 'check if the storage group can accommodate '
-                                 'new LUN. '
-                                 'Command to add LUN for volume - %s '
-                                 'in storagegroup failed') % (volumename))
-                        LOG.error(msg)
-                        raise exception.VolumeBackendAPIException(data=msg)
-                else:
-                    host_lun_id = 1
-
-                addhlu = ('storagegroup', '-addhlu', '-o',
-                          '-gname', storage_groupname,
-                          '-hlu', host_lun_id,
-                          '-alu', allocated_lun_id)
-                out, rc = self._cli_execute(*addhlu)
-                LOG.debug('Add ALU %(alu)s to SG %(sg)s as %(hlu)s. '
-                          'Output: %(out)s'
-                          % {'alu': allocated_lun_id,
-                             'sg': storage_groupname,
-                             'hlu': host_lun_id,
-                             'out': out})
-                if rc == 0:
-                    return host_lun_id
-                if rc == 66:
-                    LOG.warn(_('Requested Host LUN Number already in use'))
-                count += 1
+    def _extract_fc_uids(self, connector):
+        if 'wwnns' not in connector or 'wwpns' not in connector:
+            if self.protocol == 'FC':
+                msg = _('Host %s has no FC initiators') % connector['host']
+                LOG.error(msg)
+                raise exception.VolumeBackendAPIException(data=msg)
             else:
-                LOG.warn(_('LUN was already added in the storage group'))
-                return device_number
+                return ()
+        wwnns = connector['wwnns']
+        wwpns = connector['wwpns']
+        wwns = [(node + port).upper() for node, port in zip(wwnns, wwpns)]
+        return map(lambda wwn: re.sub(r'\S\S',
+                                      lambda m: m.group(0) + ':',
+                                      wwn,
+                                      len(wwn) / 2 - 1),
+                   wwns)
+
+    def _exec_command_setpath(self, initiator_uid, sp, port_id,
+                              ip, host, vport_id=None):
+        gname = host
+        if vport_id is not None:
+            cmd_iscsi_setpath = ('storagegroup', '-gname', gname, '-setpath',
+                                 '-hbauid', initiator_uid, '-sp', sp,
+                                 '-spport', port_id, '-spvport', vport_id,
+                                 '-ip', ip, '-host', host, '-o')
+            out, rc = self._client.command_execute(*cmd_iscsi_setpath)
+            if rc != 0:
+                raise EMCVnxCLICmdError(cmd_iscsi_setpath, rc, out)
+        else:
+            cmd_fc_setpath = ('storagegroup', '-gname', gname, '-setpath',
+                              '-hbauid', initiator_uid, '-sp', sp,
+                              '-spport', port_id,
+                              '-ip', ip, '-host', host, '-o')
+            out, rc = self._client.command_execute(*cmd_fc_setpath)
+            if rc != 0:
+                raise EMCVnxCLICmdError(cmd_fc_setpath, rc, out)
+
+    def _register_iscsi_initiator(self, ip, host, initiator_uids):
+        for initiator_uid in initiator_uids:
+            iscsi_targets = self._client.get_iscsi_targets()
+            LOG.info(_('Get ISCSI targets %(tg)s to register '
+                       'initiator %(in)s.')
+                     % ({'tg': iscsi_targets,
+                         'in': initiator_uid}))
+
+            target_portals_SPA = list(iscsi_targets['A'])
+            target_portals_SPB = list(iscsi_targets['B'])
+
+            for pa in target_portals_SPA:
+                sp = 'A'
+                port_id = pa['Port ID']
+                vport_id = pa['Virtual Port ID']
+                self._exec_command_setpath(initiator_uid, sp, port_id,
+                                           ip, host, vport_id)
+
+            for pb in target_portals_SPB:
+                sp = 'B'
+                port_id = pb['Port ID']
+                vport_id = pb['Virtual Port ID']
+                self._exec_command_setpath(initiator_uid, sp, port_id,
+                                           ip, host, vport_id)
+
+    def _register_fc_initiator(self, ip, host, initiator_uids):
+        for initiator_uid in initiator_uids:
+            fc_targets = self._client.get_fc_targets()
+            LOG.info(_('Get FC targets %(tg)s to register initiator %(in)s.')
+                     % ({'tg': fc_targets,
+                         'in': initiator_uid}))
+
+            target_portals_SPA = list(fc_targets['A'])
+            target_portals_SPB = list(fc_targets['B'])
+
+            for pa in target_portals_SPA:
+                sp = 'A'
+                port_id = pa['Port ID']
+                self._exec_command_setpath(initiator_uid, sp, port_id,
+                                           ip, host)
+
+            for pb in target_portals_SPB:
+                sp = 'B'
+                port_id = pb['Port ID']
+                self._exec_command_setpath(initiator_uid, sp, port_id,
+                                           ip, host)
+
+    def _filter_unregistered_initiators(self, initiator_uids=tuple()):
+        unregistered_initiators = []
+        if not initiator_uids:
+            return unregistered_initiators
+
+        command_get_storage_group = ('storagegroup', '-list')
+        out, rc = self._client.command_execute(*command_get_storage_group)
 
-        if count == 5:
-            msg = (_('Failed to add %s into SG') % (volumename))
+        if rc != 0:
+            raise EMCVnxCLICmdError(command_get_storage_group, rc, out)
+
+        for initiator_uid in initiator_uids:
+            m = re.search(initiator_uid, out)
+            if m is None:
+                unregistered_initiators.append(initiator_uid)
+        return unregistered_initiators
+
+    def auto_register_initiator(self, connector):
+        """Automatically register available initiators."""
+        initiator_uids = []
+        ip = connector['ip']
+        host = connector['host']
+        if self.protocol == 'iSCSI':
+            initiator_uids = self._extract_iscsi_uids(connector)
+            itors_toReg = self._filter_unregistered_initiators(initiator_uids)
+            LOG.debug('iSCSI Initiators %(in)s of %(ins)s need registration.'
+                      % ({'in': itors_toReg,
+                         'ins': initiator_uids}))
+            if not itors_toReg:
+                LOG.debug('Initiators %s are already registered'
+                          % initiator_uids)
+                return
+            self._register_iscsi_initiator(ip, host, itors_toReg)
+
+        elif self.protocol == 'FC':
+            initiator_uids = self._extract_fc_uids(connector)
+            itors_toReg = self._filter_unregistered_initiators(initiator_uids)
+            LOG.debug('FC Initiators %(in)s of %(ins)s need registration.'
+                      % ({'in': itors_toReg,
+                         'ins': initiator_uids}))
+            if not itors_toReg:
+                LOG.debug('Initiators %s are already registered.'
+                          % initiator_uids)
+                return
+            self._register_fc_initiator(ip, host, itors_toReg)
+
+    def assure_host_access(self, volumename, connector):
+        hostname = connector['host']
+        auto_registration_done = False
+        try:
+            self.get_storage_group_uid(hostname)
+        except EMCVnxCLICmdError as ex:
+            if ex.rc != 83:
+                raise ex
+            # Storage Group has not existed yet
+            self.assure_storage_group(hostname)
+            if self.itor_auto_reg:
+                self.auto_register_initiator(connector)
+                auto_registration_done = True
+            else:
+                self._client.connect_host_to_storage_group(hostname, hostname)
+
+        if self.itor_auto_reg and not auto_registration_done:
+            self.auto_register_initiator(connector)
+            auto_registration_done = True
+
+        lun_id = self.get_lun_id_by_name(volumename)
+        lun_map = self.get_lun_map(hostname)
+        if lun_id in lun_map:
+            return lun_map[lun_id]
+        used_hlus = lun_map.values()
+        if len(used_hlus) >= self.max_luns_per_sg:
+            msg = (_('Reach limitation set by configuration '
+                     'option max_luns_per_storage_group. '
+                     'Operation to add %(vol)s into '
+                     'Storage Group %(sg)s is rejected.')
+                   % {'vol': volumename, 'sg': hostname})
             LOG.error(msg)
             raise exception.VolumeBackendAPIException(data=msg)
 
-    def _remove_lun_from_storagegroup(self, device_number, storage_group):
-
-        storage_groupname = storage_group
-        removehlu = ('storagegroup', '-removehlu',
-                     '-gname', storage_groupname,
-                     '-hlu', device_number,
-                     '-o')
+        candidate_hlus = self.filter_available_hlu_set(used_hlus)
+        candidate_hlus = list(candidate_hlus)
+        random.shuffle(candidate_hlus)
+        for i, hlu in enumerate(candidate_hlus):
+            if i >= self.max_retries:
+                break
+            try:
+                self._client.add_hlu_to_storage_group(
+                    hlu,
+                    lun_id,
+                    hostname)
+                return hlu
+            except EMCVnxCLICmdError as ex:
+                # Retry
+                continue
+
+        msg = _("Failed to add %(vol)s into %(sg)s "
+                "after %(retries)s tries.") % \
+            {'vol': volumename,
+             'sg': hostname,
+             'retries': min(self.max_retries, len(candidate_hlus))}
+        LOG.error(msg)
+        raise exception.VolumeBackendAPIException(data=msg)
+
+    def vnx_get_iscsi_properties(self, volume, connector):
+        storage_group = connector['host']
+        device_info = self.find_device_details(volume, storage_group)
+        owner_sp = device_info['ownersp']
+        registered_spports = self._client.get_registered_spport_set(
+            connector['initiator'],
+            storage_group)
+        target = self._client.find_avaialable_iscsi_target_one(
+            storage_group, owner_sp,
+            registered_spports)
+        properties = {'target_discovered': True,
+                      'target_iqn': 'unknown',
+                      'target_portal': 'unknown',
+                      'target_lun': 'unknown',
+                      'volume_id': volume['id']}
+        if target:
+            properties = {'target_discovered': True,
+                          'target_iqn': target['Port WWN'],
+                          'target_portal': "%s:3260" % target['IP Address'],
+                          'target_lun': device_info['hostlunid']}
+            LOG.debug("iSCSI Properties: %s", properties)
+            auth = volume['provider_auth']
+            if auth:
+                (auth_method, auth_username, auth_secret) = auth.split()
+                properties['auth_method'] = auth_method
+                properties['auth_username'] = auth_username
+                properties['auth_password'] = auth_secret
+        else:
+            LOG.error(_('Failed to find an available iSCSI targets for %s.'),
+                      storage_group)
 
-        out, rc = self._cli_execute(*removehlu)
+        return properties
 
-        LOG.debug('Remove %(hlu)s from SG %(sg)s. Output: %(out)s'
-                  % {'hlu': device_number,
-                     'sg': storage_groupname,
-                     'out': out})
-        if rc != 0:
-            msg = (_('Failed to remove %(hlu)s from %(sg)s')
-                   % {'hlu': device_number, 'sg': storage_groupname})
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
+    def vnx_get_fc_properties(self, connector, device_number):
+        ports = self.get_login_ports(connector)
+        return {'target_lun': device_number,
+                'target_discovered': True,
+                'target_wwn': ports}
 
+    @log_enter_exit
     def initialize_connection(self, volume, connector):
-        """Initializes the connection and returns connection info."""
-
-        hostname = connector['host']
-        storage_group = self.get_storage_group(hostname)
-
-        device_number = self._add_lun_to_storagegroup(volume, storage_group)
-        return device_number
+        volume_metadata = {}
+        for metadata in volume['volume_admin_metadata']:
+            volume_metadata[metadata['key']] = metadata['value']
+        access_mode = volume_metadata.get('attached_mode')
+        if access_mode is None:
+            access_mode = ('ro'
+                           if volume_metadata.get('readonly') == 'True'
+                           else 'rw')
+        LOG.debug('Volume %(vol)s Access mode is: %(access)s.'
+                  % {'vol': volume['name'],
+                     'access': access_mode})
 
+        """Initializes the connection and returns connection info."""
+        @lockutils.synchronized('emc-connection-' + connector['host'],
+                                "emc-connection-", True)
+        def do_initialize_connection():
+            device_number = self.assure_host_access(
+                volume['name'], connector)
+            return device_number
+
+        if self.protocol == 'iSCSI':
+            do_initialize_connection()
+            iscsi_properties = self.vnx_get_iscsi_properties(volume,
+                                                             connector)
+            iscsi_properties['access_mode'] = access_mode
+            data = {'driver_volume_type': 'iscsi',
+                    'data': iscsi_properties}
+        elif self.protocol == 'FC':
+            device_number = do_initialize_connection()
+            fc_properties = self.vnx_get_fc_properties(connector,
+                                                       device_number)
+            fc_properties['volume_id'] = volume['id']
+            fc_properties['access_mode'] = access_mode
+            data = {'driver_volume_type': 'fibre_channel',
+                    'data': fc_properties}
+
+        return data
+
+    @log_enter_exit
     def terminate_connection(self, volume, connector):
         """Disallow connection from connector."""
-        hostname = connector['host']
-        storage_group = self.get_storage_group(hostname)
-        device_info = self.find_device_details(volume, storage_group)
-        device_number = device_info['hostlunid']
-        if device_number < 0:
-            LOG.error(_('Could not locate the attached volume.'))
-        else:
-            self._remove_lun_from_storagegroup(device_number, storage_group)
 
-    def _find_iscsi_protocol_endpoints(self, device_sp):
+        @lockutils.synchronized('emc-connection-' + connector['host'],
+                                "emc-connection-", True)
+        def do_terminate_connection():
+            hostname = connector['host']
+            volume_name = volume['name']
+            try:
+                lun_map = self.get_lun_map(hostname)
+            except EMCVnxCLICmdError as ex:
+                if ex.rc == 83:
+                    LOG.warn(_("Storage Group %s is not found. "
+                               "terminate_connection() is unnecessary."),
+                             hostname)
+                    return True
+            try:
+                lun_id = self.get_lun_id(volume)
+            except EMCVnxCLICmdError as ex:
+                if ex.rc == 9:
+                    LOG.warn(_("Volume %s is not found. "
+                               "It has probably been removed in VNX.")
+                             % volume_name)
+
+            if lun_id in lun_map:
+                self._client.remove_hlu_from_storagegroup(
+                    lun_map[lun_id], hostname)
+            else:
+                LOG.warn(_("Volume %(vol)s was not in Storage Group %(sg)s.")
+                         % {'vol': volume_name, 'sg': hostname})
+            if self.destroy_empty_sg or self.zonemanager_lookup_service:
+                try:
+                    lun_map = self.get_lun_map(hostname)
+                    if not lun_map:
+                        LOG.debug("Storage Group %s was empty.", hostname)
+                        if self.destroy_empty_sg:
+                            LOG.info(_("Storage Group %s was empty, "
+                                       "destroy it."), hostname)
+                            self._client.disconnect_host_from_storage_group(
+                                hostname, hostname)
+                            self._client.delete_storage_group(hostname)
+                        return True
+                    else:
+                        LOG.debug("Storage Group %s not empty,", hostname)
+                        return False
+                except Exception:
+                    LOG.warn(_("Failed to destroy Storage Group %s."),
+                             hostname)
+            else:
+                return False
+        return do_terminate_connection()
+
+    @log_enter_exit
+    def adjust_fc_conn_info(self, conn_info, connector, remove_zone=None):
+        target_wwns, itor_tgt_map = self.get_initiator_target_map(
+            connector['wwpns'],
+            self.get_status_up_ports(connector))
+        if target_wwns:
+            conn_info['data']['target_wwn'] = target_wwns
+        if remove_zone is None or remove_zone:
+            # Return initiator_target_map for initialize_connection (None)
+            # Return initiator_target_map for terminate_connection when (True)
+            # no volumes are in the storagegroup for host to use
+            conn_info['data']['initiator_target_map'] = itor_tgt_map
+        return conn_info
+
+    @log_enter_exit
+    def manage_existing_get_size(self, volume, ref):
+        """Return size of volume to be managed by manage_existing.
+        """
+        # Check that the reference is valid
+        if 'id' not in ref:
+            reason = _('Reference must contain lun_id element.')
+            raise exception.ManageExistingInvalidReference(
+                existing_ref=ref,
+                reason=reason)
+
+        # Check for existence of the lun
+        data = self._client.get_lun_by_id(ref['id'])
+        if data is None:
+            reason = _('Find no lun with the specified lun_id.')
+            raise exception.ManageExistingInvalidReference(existing_ref=ref,
+                                                           reason=reason)
+        return data['total_capacity_gb']
+
+    @log_enter_exit
+    def manage_existing(self, volume, ref):
+        raise NotImplementedError
+
+    def find_iscsi_protocol_endpoints(self, device_sp):
         """Returns the iSCSI initiators for a SP."""
+        return self._client.get_iscsi_protocol_endpoints(device_sp)
+
+    def get_login_ports(self, connector):
+        return self._client.get_login_ports(connector['host'],
+                                            connector['wwpns'])
+
+    def get_status_up_ports(self, connector):
+        return self._client.get_status_up_ports(connector['host'])
+
+    def get_initiator_target_map(self, fc_initiators, fc_targets):
+        target_wwns = []
+        itor_tgt_map = {}
+
+        if self.zonemanager_lookup_service:
+            mapping = \
+                self.zonemanager_lookup_service. \
+                get_device_mapping_from_network(fc_initiators, fc_targets)
+            for each in mapping:
+                map_d = mapping[each]
+                target_wwns.extend(map_d['target_port_wwn_list'])
+                for initiator in map_d['initiator_port_wwn_list']:
+                    itor_tgt_map[initiator] = map_d['target_port_wwn_list']
+        return list(set(target_wwns)), itor_tgt_map
+
+    def get_volumetype_extraspecs(self, volume):
+        specs = {}
 
-        initiator_address = []
+        type_id = volume['volume_type_id']
+        if type_id is not None:
+            specs = volume_types.get_volume_type_extra_specs(type_id)
 
-        connection_getport = ('connection', '-getport', '-sp', device_sp)
-        out, _rc = self._cli_execute(*connection_getport)
-        output = out.split('SP:  ')
+        return specs
 
-        for port in output:
-            port_info = port.split('\n')
-            if port_info[0] == device_sp:
-                port_wwn = port_info[2].split('Port WWN:')[1].strip()
-                initiator_address.append(port_wwn)
 
-        LOG.debug('WWNs found for SP %(devicesp)s '
-                  'are: %(initiator_address)s'
-                  % {'devicesp': device_sp,
-                     'initiator_address': initiator_address})
+class EMCVnxCliPool(EMCVnxCliBase):
+
+    def __init__(self, prtcl, configuration):
+        super(EMCVnxCliPool, self).__init__(prtcl, configuration=configuration)
+        self.storage_pool = configuration.storage_vnx_pool_name.strip()
+        self._client.get_pool(self.storage_pool)
+
+    def get_target_storagepool(self,
+                               volume=None,
+                               source_volume_name=None):
+        pool_spec_id = "storagetype:pool"
+        if volume is not None:
+            specs = self.get_volumetype_extraspecs(volume)
+            if specs and pool_spec_id in specs:
+                expect_pool = specs[pool_spec_id].strip()
+                if expect_pool != self.storage_pool:
+                    msg = _("Storage pool %s is not supported"
+                            " by this Cinder Volume") % expect_pool
+                    LOG.error(msg)
+                    raise exception.VolumeBackendAPIException(data=msg)
+        return self.storage_pool
 
-        return initiator_address
+    def is_pool_fastcache_enabled(self, storage_pool, no_poll=False):
+        command_check_fastcache = None
+        if no_poll:
+            command_check_fastcache = ('-np', 'storagepool', '-list', '-name',
+                                       storage_pool, '-fastcache')
+        else:
+            command_check_fastcache = ('storagepool', '-list', '-name',
+                                       storage_pool, '-fastcache')
+        out, rc = self._client.command_execute(*command_check_fastcache)
 
-    def _get_volumetype_extraspecs(self, volume):
-        specs = {}
+        if 0 != rc:
+            raise EMCVnxCLICmdError(command_check_fastcache, rc, out)
+        else:
+            re_fastcache = 'FAST Cache:\s*(.*)\s*'
+            m = re.search(re_fastcache, out)
+            if m is not None:
+                result = True if 'Enabled' == m.group(1) else False
+            else:
+                LOG.error(_("Error parsing output for FastCache Command."))
+        return result
+
+    @log_enter_exit
+    def update_volume_stats(self):
+        """Retrieve stats info."""
+        self.stats = super(EMCVnxCliPool, self).update_volume_stats()
+        data = self._client.get_pool(self.get_target_storagepool())
+        self.stats['total_capacity_gb'] = data['total_capacity_gb']
+        self.stats['free_capacity_gb'] = data['free_capacity_gb']
+
+        array_serial = self._client.get_array_serial(NO_POLL)
+        self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
+                                       {'pool_name': self.storage_pool,
+                                        'array_serial':
+                                           array_serial['array_serial']})
+        # check if this pool's fast_cache is really enabled
+        if self.stats['fast_cache_enabled'] == 'True' and \
+           not self.is_pool_fastcache_enabled(self.storage_pool, NO_POLL):
+            self.stats['fast_cache_enabled'] = 'False'
+        return self.stats
 
-        type_id = volume['volume_type_id']
-        if type_id is not None:
-            specs = volume_types.get_volume_type_extra_specs(type_id)
+    @log_enter_exit
+    def manage_existing(self, volume, ref):
+        """Manage an existing lun in the array.
 
-        return specs
+        The lun should be in a manageable pool backend, otherwise
+        error would return.
+        Rename the backend storage object so that it matches the,
+        volume['name'] which is how drivers traditionally map between a
+        cinder volume and the associated backend storage object.
 
-    def _get_provisioning_by_volume(self, volume):
-        # By default, the user can not create thin LUN without thin
-        # provisioning enabler.
-        thinness = 'NonThin'
-        spec_id = 'storagetype:provisioning'
-
-        specs = self._get_volumetype_extraspecs(volume)
-        if specs and spec_id in specs:
-            provisioning = specs[spec_id].lower()
-            if 'thin' == provisioning:
-                thinness = 'Thin'
-            elif 'thick' != provisioning:
-                LOG.warning(_('Invalid value of extra spec '
-                            '\'storagetype:provisioning\': %(provisioning)s')
-                            % {'provisioning': specs[spec_id]})
+        existing_ref:{
+            'id':lun_id
+        }
+        """
+
+        data = self._client.get_lun_by_id(
+            ref['id'], self._client.LUN_WITH_POOL)
+        if self.storage_pool != data['pool']:
+            reason = _('The input lun is not in a manageable pool backend '
+                       'by cinder')
+            raise exception.ManageExistingInvalidReference(existing_ref=ref,
+                                                           reason=reason)
+        self._client.lun_rename(ref['id'], volume['name'])
+
+
+class EMCVnxCliArray(EMCVnxCliBase):
+
+    def __init__(self, prtcl, configuration):
+        super(EMCVnxCliArray, self).__init__(prtcl,
+                                             configuration=configuration)
+        self._update_pool_cache()
+
+    def _update_pool_cache(self):
+        LOG.debug("Updating Pool Cache")
+        self.pool_cache = self._client.get_pool_list(NO_POLL)
+
+    def get_target_storagepool(self, volume, source_volume_name=None):
+        """Find the storage pool for given volume."""
+        pool_spec_id = "storagetype:pool"
+        specs = self.get_volumetype_extraspecs(volume)
+        if specs and pool_spec_id in specs:
+            return specs[pool_spec_id]
+        elif source_volume_name:
+            data = self._client.get_lun_by_name(source_volume_name,
+                                                [self._client.LUN_POOL])
+            if data is None:
+                msg = _("Failed to find storage pool for source volume %s") \
+                    % source_volume_name
+                LOG.error(msg)
+                raise exception.VolumeBackendAPIException(data=msg)
+            return data[self._client.LUN_POOL.key]
         else:
-            LOG.info(_('No extra spec \'storagetype:provisioning\' exist'))
+            if len(self.pool_cache) > 0:
+                pools = sorted(self.pool_cache,
+                               key=lambda po: po['free_space'],
+                               reverse=True)
+                return pools[0]['name']
+
+        msg = (_("Failed to find storage pool to create volume %s.")
+               % volume['name'])
+        LOG.error(msg)
+        raise exception.VolumeBackendAPIException(data=msg)
+
+    @log_enter_exit
+    def update_volume_stats(self):
+        """Retrieve stats info."""
+        self.stats = super(EMCVnxCliArray, self).update_volume_stats()
+        self._update_pool_cache()
+        self.stats['total_capacity_gb'] = 'unknown'
+        self.stats['free_capacity_gb'] = 'unknown'
+        array_serial = self._client.get_array_serial(NO_POLL)
+        self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
+                                       {'pool_name': '',
+                                        'array_serial':
+                                        array_serial['array_serial']})
+        self.stats['fast_cache_enabled'] = 'unknown'
+        return self.stats
+
+    @log_enter_exit
+    def manage_existing(self, volume, ref):
+        """Rename the backend storage object so that it matches the,
+        volume['name'] which is how drivers traditionally map between a
+        cinder volume and the associated backend storage object.
+
+        existing_ref:{
+            'id':lun_id
+        }
+        """
+
+        self._client.lun_rename(ref['id'], volume['name'])
+
+
+def getEMCVnxCli(prtcl, configuration=None):
+    configuration.append_config_values(loc_opts)
+    pool_name = configuration.safe_get("storage_vnx_pool_name")
 
-        return thinness
+    if pool_name is None or len(pool_name.strip()) == 0:
+        return EMCVnxCliArray(prtcl, configuration=configuration)
+    else:
+        return EMCVnxCliPool(prtcl, configuration=configuration)
index 97776091a73b8fdd5eb6b4479d6d82dfcfd20ed9..3ee2451dcc8a0b3b5537ef5518e36be765525069 100644 (file)
 # Options defined in cinder.volume.drivers.emc.emc_vnx_cli
 #
 
-# Naviseccli Path (string value)
+# VNX authentication scope type. (string value)
+#storage_vnx_authentication_type=global
+
+# Directory path that contains the VNX security file. Make
+# sure the security file is generated first. (string value)
+#storage_vnx_security_file_dir=<None>
+
+# Naviseccli Path. (string value)
 #naviseccli_path=
 
-# ISCSI pool name (string value)
+# Storage pool name (string value)
 #storage_vnx_pool_name=<None>
 
-# Default Time Out For CLI operations in minutes (integer
-# value)
-#default_timeout=20
+# VNX secondary SP IP Address. (string value)
+#san_secondary_ip=<None>
 
-# Default max number of LUNs in a storage group (integer
-# value)
-#max_luns_per_storage_group=256
+# Default Time Out For CLI operations in minutes. By default,
+# it is 365 days long. (integer value)
+#default_timeout=525600
+
+# Default max number of LUNs in a storage group. By default,
+# the value is 255. (integer value)
+#max_luns_per_storage_group=255
+
+# To destroy storage group when the last LUN is removed from
+# it. By default, the value is False. (boolean value)
+#destroy_empty_storage_group=false
+
+# Mapping between hostname and its iSCSI initiator IP
+# addresses. (string value)
+#iscsi_initiators=
+
+# Automatically register initiators. By default, the value is
+# False. (boolean value)
+#initiator_auto_registration=false
 
 
 #