]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
EMC VNX Cinder Driver Update
authorJeegn Chen <jeegn.chen@emc.com>
Sun, 14 Dec 2014 09:17:41 +0000 (17:17 +0800)
committerJeegn Chen <jeegn.chen@emc.com>
Wed, 4 Feb 2015 08:17:53 +0000 (16:17 +0800)
VNX Direct Driver was contributed in Icehouse and updated in Juno.
This commit is to continuously improve the driver with the
following enhancements in Kilo:

* Performance improvement, especially the synchronized operations
initiatlize_connetion and terminate_connection.
* LUN Number Threshold Support
* Initiator Auto Deregistration
* Force Deleting LUN in Storage Groups
* Code refactor to enhance the robustness

Change-Id: Id263a5d0405ba942582ce06beed09b436b80ff3c
Implements: blueprint emc-vnx-direct-driver-kilo-update

cinder/exception.py
cinder/tests/test_emc_vnxdirect.py
cinder/volume/drivers/emc/emc_cli_fc.py
cinder/volume/drivers/emc/emc_cli_iscsi.py
cinder/volume/drivers/emc/emc_vnx_cli.py

index b5458de25c67c16705c43458dde7ec4771194429..7559c8e9b6e2777ac1233270eb103b34eaca53e8 100644 (file)
@@ -844,23 +844,8 @@ class NetAppDriverException(VolumeDriverException):
 
 
 class EMCVnxCLICmdError(VolumeBackendAPIException):
-    def __init__(self, cmd=None, rc=None, out='',
-                 log_as_error=True, **kwargs):
-        self.cmd = cmd
-        self.rc = rc
-        self.out = out
-        msg = _("EMCVnxCLICmdError : %(cmd)s "
-                "(Return Code: %(rc)s) "
-                "(Output: %(out)s) ") % \
-            {'cmd': cmd,
-             'rc': rc,
-             'out': out.split('\n')}
-        kwargs["data"] = msg
-        super(EMCVnxCLICmdError, self).__init__(**kwargs)
-        if log_as_error:
-            LOG.error(msg)
-        else:
-            LOG.warn(msg)
+    message = _("EMC VNX Cinder Driver CLI exception: %(cmd)s "
+                "(Return Code: %(rc)s) (Output: %(out)s).")
 
 
 # ConsistencyGroup
index 4848b773863a3663c74f78c20f2f5b20920b0228..6a5c873d1c05a4aa7adf59c7969b3d0941e86b50 100644 (file)
@@ -12,7 +12,6 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-
 import os
 import re
 
@@ -28,7 +27,6 @@ from cinder.volume.drivers.emc.emc_cli_iscsi import EMCCLIISCSIDriver
 import cinder.volume.drivers.emc.emc_vnx_cli as emc_vnx_cli
 from cinder.volume.drivers.emc.emc_vnx_cli import CommandLineHelper
 from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCLICmdError
-from cinder.volume import volume_types
 from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
 
 SUCCEED = ("", 0)
@@ -44,6 +42,7 @@ class EMCVNXCLIDriverTestData():
         'id': '1',
         'provider_auth': None,
         'project_id': 'project',
+        'provider_location': 'system^FNM11111|type^lun|lun_id^1',
         'display_name': 'vol1',
         'display_description': 'test volume',
         'volume_type_id': None,
@@ -141,11 +140,37 @@ class EMCVNXCLIDriverTestData():
         'consistencygroup_id': None,
         'display_description': 'test failed volume',
         'volume_type_id': None}
+
+    test_volume1_in_sg = {
+        'name': 'vol1_in_sg',
+        'size': 1,
+        'volume_name': 'vol1_in_sg',
+        'id': '4',
+        'provider_auth': None,
+        'project_id': 'project',
+        'display_name': 'failed_vol',
+        'display_description': 'Volume 1 in SG',
+        'volume_type_id': None,
+        'provider_location': 'system^fakesn|type^lun|id^4'}
+
+    test_volume2_in_sg = {
+        'name': 'vol2_in_sg',
+        'size': 1,
+        'volume_name': 'vol2_in_sg',
+        'id': '5',
+        'provider_auth': None,
+        'project_id': 'project',
+        'display_name': 'failed_vol',
+        'display_description': 'Volume 2 in SG',
+        'volume_type_id': None,
+        'provider_location': 'system^fakesn|type^lun|id^3'}
+
     test_snapshot = {
         'name': 'snapshot1',
         'size': 1,
         'id': '4444',
         'volume_name': 'vol1',
+        'volume': test_volume,
         'volume_size': 1,
         'consistencygroup_id': None,
         'cgsnapshot_id': None,
@@ -155,6 +180,7 @@ class EMCVNXCLIDriverTestData():
         'size': 1,
         'id': '5555',
         'volume_name': 'vol-vol1',
+        'volume': test_volume,
         'volume_size': 1,
         'project_id': 'project'}
     test_clone = {
@@ -200,6 +226,7 @@ class EMCVNXCLIDriverTestData():
                     'attach_status': 'detached',
                     'volume_type': [],
                     'attached_host': None,
+                    'provider_location': 'system^FNM11111|type^lun|lun_id^1',
                     '_name_id': None, 'volume_metadata': []}
 
     test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
@@ -322,14 +349,15 @@ class EMCVNXCLIDriverTestData():
                 '-allowReadWrite', 'yes',
                 '-allowAutoDelete', 'no')
 
+    def SNAP_LIST_CMD(self, res_id=1, poll=True):
+        cmd = ('snap', '-list', '-res', res_id)
+        if not poll:
+            cmd = ('-np',) + cmd
+        return cmd
+
     def LUN_DELETE_CMD(self, name):
         return ('lun', '-destroy', '-name', name, '-forceDetach', '-o')
 
-    def LUN_CREATE_CMD(self, name, isthin=False):
-        return ('lun', '-create', '-type', 'Thin' if isthin else 'NonThin',
-                '-capacity', 1, '-sq', 'gb', '-poolName',
-                'unit_test_pool', '-name', name)
-
     def LUN_EXTEND_CMD(self, name, newsize):
         return ('lun', '-expand', '-name', name, '-capacity', newsize,
                 '-sq', 'gb', '-o', '-ignoreThresholds')
@@ -340,8 +368,9 @@ class EMCVNXCLIDriverTestData():
                 '-attachedSnapshot')
 
     def MIGRATION_CMD(self, src_id=1, dest_id=1):
-        return ("migrate", "-start", "-source", src_id, "-dest", dest_id,
-                "-rate", "high", "-o")
+        cmd = ("migrate", "-start", "-source", src_id, "-dest", dest_id,
+               "-rate", "high", "-o")
+        return cmd
 
     def MIGRATION_VERIFY_CMD(self, src_id):
         return ("migrate", "-list", "-source", src_id)
@@ -351,7 +380,7 @@ class EMCVNXCLIDriverTestData():
 
     def PINGNODE_CMD(self, sp, portid, vportid, ip):
         return ("connection", "-pingnode", "-sp", sp, '-portid', portid,
-                "-vportid", vportid, "-address", ip)
+                "-vportid", vportid, "-address", ip, '-count', '1')
 
     def GETFCPORT_CMD(self):
         return ('port', '-list', '-sp')
@@ -364,6 +393,16 @@ class EMCVNXCLIDriverTestData():
         return ('compression', '-on',
                 '-l', lun_id, '-ignoreThresholds', '-o')
 
+    def STORAGEGROUP_LIST_CMD(self, gname=None):
+        if gname:
+            return ('storagegroup', '-list', '-gname', gname)
+        else:
+            return ('storagegroup', '-list')
+
+    def STORAGEGROUP_REMOVEHLU_CMD(self, gname, hlu):
+        return ('storagegroup', '-removehlu',
+                '-hlu', hlu, '-gname', gname, '-o')
+
     provisioning_values = {
         'thin': ['-type', 'Thin'],
         'thick': ['-type', 'NonThin'],
@@ -386,12 +425,15 @@ class EMCVNXCLIDriverTestData():
             '-initialTier', 'optimizePool',
             '-tieringPolicy', 'noMovement']}
 
-    def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering):
+    def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering,
+                         poll=True):
         initial = ['lun', '-create',
                    '-capacity', size,
                    '-sq', 'gb',
                    '-poolName', pool,
                    '-name', name]
+        if not poll:
+            initial = ['-np'] + initial
         if provisioning:
             initial.extend(self.provisioning_values[provisioning])
         else:
@@ -401,7 +443,7 @@ class EMCVNXCLIDriverTestData():
         return tuple(initial)
 
     def CHECK_FASTCACHE_CMD(self, storage_pool):
-        return ('-np', 'storagepool', '-list', '-name',
+        return ('storagepool', '-list', '-name',
                 storage_pool, '-fastcache')
 
     def CREATE_CONSISTENCYGROUP_CMD(self, cg_name):
@@ -454,11 +496,12 @@ State:  Ready
     POOL_PROPERTY = ("""\
 Pool Name:  unit_test_pool
 Pool ID:  1
-User Capacity (Blocks):  5769501696
-User Capacity (GBs):  10000.5
-Available Capacity (Blocks):  5676521472
-Available Capacity (GBs):  1000.6
-                        """, 0)
+User Capacity (Blocks):  6881061888
+User Capacity (GBs):  3281.146
+Available Capacity (Blocks):  6832207872
+Available Capacity (GBs):  3257.851
+
+""", 0)
 
     ALL_PORTS = ("SP:  A\n" +
                  "Port ID:  4\n" +
@@ -477,7 +520,7 @@ Available Capacity (GBs):  1000.6
                   'target_discovered': True,
                   'target_iqn':
                   'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
-                  'target_lun': 1,
+                  'target_lun': 2,
                   'target_portal': '10.244.214.118:3260'},
          'driver_volume_type': 'iscsi'}
 
@@ -486,7 +529,7 @@ Available Capacity (GBs):  1000.6
                   'target_discovered': True,
                   'target_iqn':
                   'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
-                  'target_lun': 1,
+                  'target_lun': 2,
                   'target_portal': '10.244.214.118:3260'},
          'driver_volume_type': 'iscsi'}
 
@@ -631,6 +674,89 @@ Available Capacity (GBs):  1000.6
             1               1
         Shareable:             YES""" % sgname, 0)
 
+    def STORAGE_GROUP_HAS_MAP_2(self, sgname):
+
+        return ("""\
+        Storage Group Name:    %s
+        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
+        HBA/SP Pairs:
+
+          HBA UID                                          SP Name     SPPort
+          -------                                          -------     ------
+          iqn.1993-08.org.debian:01:222                     SP A         4
+
+        HLU/ALU Pairs:
+
+          HLU Number     ALU Number
+          ----------     ----------
+            1               1
+            2               3
+        Shareable:             YES""" % sgname, 0)
+
+    def POOL_FEATURE_INFO_POOL_LUNS_CMD(self):
+        cmd = ('storagepool', '-feature', '-info',
+               '-maxPoolLUNs', '-numPoolLUNs')
+        return cmd
+
+    def POOL_FEATURE_INFO_POOL_LUNS(self, max, total):
+        return (('Max. Pool LUNs:  %s\n' % max) +
+                ('Total Number of Pool LUNs:  %s\n' % total), 0)
+
+    def STORAGE_GROUPS_HAS_MAP(self, sgname1, sgname2):
+
+        return ("""
+
+        Storage Group Name:    irrelative
+        Storage Group UID:     9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
+        HBA/SP Pairs:
+
+          HBA UID                                          SP Name     SPPort
+          -------                                          -------     ------
+          iqn.1993-08.org.debian:01:5741c6307e60            SP A         6
+
+        Storage Group Name:    %(sgname1)s
+        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
+        HBA/SP Pairs:
+
+          HBA UID                                          SP Name     SPPort
+          -------                                          -------     ------
+          iqn.1993-08.org.debian:01:222                     SP A         4
+
+        HLU/ALU Pairs:
+
+          HLU Number     ALU Number
+          ----------     ----------
+            31              3
+            41              4
+        Shareable:             YES
+
+        Storage Group Name:    %(sgname2)s
+        Storage Group UID:     9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
+        HBA/SP Pairs:
+
+          HBA UID                                          SP Name     SPPort
+          -------                                          -------     ------
+          iqn.1993-08.org.debian:01:5741c6307e60            SP A         6
+
+        HLU/ALU Pairs:
+
+          HLU Number     ALU Number
+          ----------     ----------
+            32              3
+            42              4
+        Shareable:             YES""" % {'sgname1': sgname1,
+                                         'sgname2': sgname2}, 0)
+
+    def LUN_DELETE_IN_SG_ERROR(self, up_to_date=True):
+        if up_to_date:
+            return ("Cannot unbind LUN "
+                    "because it's contained in a Storage Group",
+                    156)
+        else:
+            return ("SP B: Request failed.  "
+                    "Host LUN/LUN mapping still exists.",
+                    0)
+
 
 class EMCVNXCLIDriverISCSITestCase(test.TestCase):
 
@@ -638,7 +764,7 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
         super(EMCVNXCLIDriverISCSITestCase, self).setUp()
 
         self.stubs.Set(CommandLineHelper, 'command_execute',
-                       self.succeed_fake_command_execute)
+                       self.fake_setup_command_execute)
         self.stubs.Set(CommandLineHelper, 'get_array_serial',
                        mock.Mock(return_value={'array_serial':
                                                'fakeSerial'}))
@@ -658,15 +784,13 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
         #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
         self.configuration.default_timeout = 0.0002
         self.configuration.initiator_auto_registration = True
+        self.configuration.check_max_pool_luns_threshold = False
         self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
         self.testData = EMCVNXCLIDriverTestData()
         self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
             '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
         self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
 
-    def tearDown(self):
-        super(EMCVNXCLIDriverISCSITestCase, self).tearDown()
-
     def driverSetup(self, commands=tuple(), results=tuple()):
         self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
         fake_command_execute = self.get_command_execute_simulator(
@@ -677,7 +801,6 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
 
     def get_command_execute_simulator(self, commands=tuple(),
                                       results=tuple()):
-
         assert(len(commands) == len(results))
 
         def fake_command_execute(*args, **kwargv):
@@ -728,8 +851,9 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol1', 1,
                 'unit_test_pool',
-                'thick', None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+                'thick', None, False)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                      poll=False),
             mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
 
         fake_cli.assert_has_calls(expect_cmd)
@@ -737,14 +861,16 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
     @mock.patch(
         "eventlet.event.Event.wait",
         mock.Mock(return_value=None))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
     def test_create_volume_compressed(self):
-        extra_specs = {'storagetype:provisioning': 'compressed'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                     self.testData.NDU_LIST_CMD]
         results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.LUN_PROPERTY('vol_with_type', True),
                    self.testData.NDU_LIST_RESULT]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
@@ -758,11 +884,11 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'compressed', None)),
+                'compressed', None, False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                'vol_with_type')),
+                'vol_with_type'), poll=False),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                'vol_with_type')),
+                'vol_with_type'), poll=True),
             mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
                 1))]
         fake_cli.assert_has_calls(expect_cmd)
@@ -770,15 +896,17 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
     @mock.patch(
         "eventlet.event.Event.wait",
         mock.Mock(return_value=None))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'compressed',
+                                'storagetype:tiering': 'HighestAvailable'}))
     def test_create_volume_compressed_tiering_highestavailable(self):
-        extra_specs = {'storagetype:provisioning': 'compressed',
-                       'storagetype:tiering': 'HighestAvailable'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                     self.testData.NDU_LIST_CMD]
         results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.LUN_PROPERTY('vol_with_type', True),
                    self.testData.NDU_LIST_RESULT]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
@@ -793,11 +921,11 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'compressed', 'highestavailable')),
+                'compressed', 'highestavailable', False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                'vol_with_type')),
+                'vol_with_type'), poll=False),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                'vol_with_type')),
+                'vol_with_type'), poll=True),
             mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
                 1))]
         fake_cli.assert_has_calls(expect_cmd)
@@ -805,14 +933,16 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
     @mock.patch(
         "eventlet.event.Event.wait",
         mock.Mock(return_value=None))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
     def test_create_volume_deduplicated(self):
-        extra_specs = {'storagetype:provisioning': 'deduplicated'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                     self.testData.NDU_LIST_CMD]
         results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.LUN_PROPERTY('vol_with_type', True),
                    self.testData.NDU_LIST_RESULT]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
@@ -827,20 +957,22 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'deduplicated', None))]
+                'deduplicated', None, False))]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch(
         "eventlet.event.Event.wait",
         mock.Mock(return_value=None))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
     def test_create_volume_tiering_auto(self):
-        extra_specs = {'storagetype:tiering': 'Auto'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                     self.testData.NDU_LIST_CMD]
         results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.LUN_PROPERTY('vol_with_type', True),
                    self.testData.NDU_LIST_RESULT]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
@@ -855,15 +987,15 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                None, 'auto'))]
+                None, 'auto', False))]
         fake_cli.assert_has_calls(expect_cmd)
 
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:tiering': 'Auto',
+                                'storagetype:provisioning': 'Deduplicated'}))
     def test_create_volume_deduplicated_tiering_auto(self):
-        extra_specs = {'storagetype:tiering': 'Auto',
-                       'storagetype:provisioning': 'Deduplicated'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                     self.testData.NDU_LIST_CMD]
         results = [self.testData.LUN_PROPERTY('vol_with_type', True),
@@ -877,11 +1009,11 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             re.match(r".*deduplicated and auto tiering can't be both enabled",
                      ex.msg))
 
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'Compressed'}))
     def test_create_volume_compressed_no_enabler(self):
-        extra_specs = {'storagetype:provisioning': 'Compressed'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                     self.testData.NDU_LIST_CMD]
         results = [self.testData.LUN_PROPERTY('vol_with_type', True),
@@ -898,6 +1030,11 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
     @mock.patch(
         "eventlet.event.Event.wait",
         mock.Mock(return_value=None))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
+                                'storagetype:pool': 'unit_test_pool'}))
     def test_create_compression_volume_on_array_backend(self):
         """Unit test for create a compression volume on array
         backend.
@@ -917,14 +1054,11 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
         self.driver = EMCCLIISCSIDriver(configuration=config)
         assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
 
-        extra_specs = {'storagetype:provisioning': 'Compressed',
-                       'storagetype:pool': 'unit_test_pool'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                     self.testData.NDU_LIST_CMD]
         results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.LUN_PROPERTY('vol_with_type', True),
                    self.testData.NDU_LIST_RESULT]
         fake_command_execute = self.get_command_execute_simulator(
             commands, results)
@@ -943,32 +1077,31 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol_with_type', 1,
                 'unit_test_pool',
-                'compressed', None)),
+                'compressed', None, False)),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                'vol_with_type')),
+                'vol_with_type'), poll=False),
             mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                'vol_with_type')),
+                'vol_with_type'), poll=True),
             mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
                 1))]
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_get_volume_stats(self):
-        #expect_result = [POOL_PROPERTY]
         self.driverSetup()
         stats = self.driver.get_volume_stats(True)
         self.assertTrue(stats['driver_version'] is not None,
-                        "dirver_version is not returned")
+                        "driver_version is not returned")
         self.assertTrue(
-            stats['free_capacity_gb'] == 1000.6,
+            stats['free_capacity_gb'] == 3257.851,
             "free_capacity_gb is not correct")
         self.assertTrue(
-            stats['reserved_percentage'] == 0,
+            stats['reserved_percentage'] == 3,
             "reserved_percentage is not correct")
         self.assertTrue(
             stats['storage_protocol'] == 'iSCSI',
             "storage_protocol is not correct")
         self.assertTrue(
-            stats['total_capacity_gb'] == 10000.5,
+            stats['total_capacity_gb'] == 3281.146,
             "total_capacity_gb is not correct")
         self.assertTrue(
             stats['vendor_name'] == "EMC",
@@ -978,12 +1111,39 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             "volume backend name is not correct")
         self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
         self.assertTrue(
-            stats['driver_version'] == "04.01.00",
+            stats['driver_version'] == "05.00.00",
             "driver version is incorrect.")
 
+    def test_get_volume_stats_too_many_luns(self):
+        commands = [self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
+        results = [self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
+        fake_cli = self.driverSetup(commands, results)
+
+        self.driver.cli.check_max_pool_luns_threshold = True
+        stats = self.driver.get_volume_stats(True)
+        self.assertTrue(
+            stats['free_capacity_gb'] == 0,
+            "free_capacity_gb is not correct")
+        expect_cmd = [
+            mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
+                      poll=False)]
+        fake_cli.assert_has_calls(expect_cmd)
+        expect_cmd = [
+            mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
+                      poll=False)]
+        fake_cli.assert_has_calls(expect_cmd)
+
+        self.driver.cli.check_max_pool_luns_threshold = False
+        stats = self.driver.get_volume_stats(True)
+        self.assertTrue(stats['driver_version'] is not None,
+                        "driver_version is not returned")
+        self.assertTrue(
+            stats['free_capacity_gb'] == 3257.851,
+            "free_capacity_gb is not correct")
+
     @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
                 "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(return_value=True))
+                mock.Mock(return_value={'lun_id': 1}))
     @mock.patch(
         "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
         mock.Mock(
@@ -1012,10 +1172,10 @@ Percent Complete:  100
 Time Remaining:  0 second(s)
 """
         results = [(FAKE_ERROR_MSG, 255),
-                   [SUCCEED,
-                    (FAKE_MIGRATE_PROPERTY, 0),
-                    ('The specified source LUN is not currently migrating',
-                     23)]]
+                   [(FAKE_MIGRATE_PROPERTY, 0),
+                   (FAKE_MIGRATE_PROPERTY, 0),
+                   ('The specified source LUN is not currently migrating',
+                    23)]]
         fake_cli = self.driverSetup(commands, results)
         fakehost = {'capabilities': {'location_info':
                                      "unit_test_pool2|fakeSerial",
@@ -1025,15 +1185,20 @@ Time Remaining:  0 second(s)
         self.assertTrue(ret)
         #verification
         expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                                retry_disable=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+                                retry_disable=True,
+                                poll=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                                poll=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                                poll=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                                poll=False)]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
                 "CommandLineHelper.create_lun_by_cmd",
                 mock.Mock(
-                    return_value=True))
+                    return_value={'lun_id': 1}))
     @mock.patch(
         "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
         mock.Mock(
@@ -1056,32 +1221,39 @@ Current State:  MIGRATED
 Percent Complete:  100
 Time Remaining:  0 second(s)
 """
-        results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0),
-                             ('The specified source LUN is not '
-                              'currently migrating',
-                              23)]]
+        results = [SUCCEED,
+                   [(FAKE_MIGRATE_PROPERTY, 0),
+                    ('The specified source LUN is not '
+                     'currently migrating', 23)]]
         fake_cli = self.driverSetup(commands, results)
-        fakehost = {'capabilities': {'location_info':
-                                     "unit_test_pool2|fakeSerial",
-                                     'storage_protocol': 'iSCSI'}}
+        fake_host = {'capabilities': {'location_info':
+                                      "unit_test_pool2|fakeSerial",
+                                      'storage_protocol': 'iSCSI'}}
         ret = self.driver.migrate_volume(None, self.testData.test_volume,
-                                         fakehost)[0]
+                                         fake_host)[0]
         self.assertTrue(ret)
         #verification
         expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
-                                retry_disable=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+                                retry_disable=True,
+                                poll=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                                poll=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                                poll=False)]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
                 "CommandLineHelper.create_lun_by_cmd",
                 mock.Mock(
-                    return_value=True))
+                    return_value={'lun_id': 5}))
     @mock.patch(
         "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
         "get_lun_id_by_name",
         mock.Mock(return_value=5))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
     def test_volume_migration_02(self):
 
         commands = [self.testData.MIGRATION_CMD(5, 5),
@@ -1096,10 +1268,10 @@ Current State:  MIGRATED
 Percent Complete:  100
 Time Remaining:  0 second(s)
 """
-        results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0),
-                             ('The specified source LUN is not '
-                              'currently migrating',
-                              23)]]
+        results = [SUCCEED,
+                   [(FAKE_MIGRATE_PROPERTY, 0),
+                    ('The specified source LUN is not currently migrating',
+                     23)]]
         fake_cli = self.driverSetup(commands, results)
         fakehost = {'capabilities': {'location_info':
                                      "unit_test_pool2|fakeSerial",
@@ -1109,15 +1281,18 @@ Time Remaining:  0 second(s)
         self.assertTrue(ret)
         #verification
         expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5),
-                                retry_disable=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(5)),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(5))]
+                                retry_disable=True,
+                                poll=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
+                                poll=True),
+                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
+                                poll=False)]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
                 "CommandLineHelper.create_lun_by_cmd",
                 mock.Mock(
-                    return_value=True))
+                    return_value={'lun_id': 1}))
     @mock.patch(
         "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
         mock.Mock(
@@ -1138,7 +1313,8 @@ Time Remaining:  0 second(s)
         self.assertFalse(ret)
         #verification
         expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
-                                retry_disable=True)]
+                                retry_disable=True,
+                                poll=True)]
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_destroy_volume_snapshot(self):
@@ -1149,9 +1325,10 @@ Time Remaining:  0 second(s)
         self.driver.delete_snapshot(self.testData.test_snapshot)
 
         #verification
-        expect_cmd = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-                      mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1')),
-                      mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'))]
+        expect_cmd = [mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1'),
+                                poll=False),
+                      mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'),
+                                poll=True)]
 
         fake_cli.assert_has_calls(expect_cmd)
 
@@ -1160,21 +1337,19 @@ Time Remaining:  0 second(s)
         mock.Mock(
             return_value=(
                 "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
-    @mock.patch("random.shuffle", mock.Mock())
+    @mock.patch('random.randint',
+                mock.Mock(return_value=0))
     def test_initialize_connection(self):
         # Test for auto registration
         self.configuration.initiator_auto_registration = True
         commands = [('storagegroup', '-list', '-gname', 'fakehost'),
-                    self.testData.GETPORT_CMD(),
                     self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
         results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
-                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
                     self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   self.testData.ALL_PORTS,
                    self.testData.PING_OK]
 
         fake_cli = self.driverSetup(commands, results)
+
         connection_info = self.driver.initialize_connection(
             self.testData.test_volume,
             self.testData.connector)
@@ -1182,22 +1357,20 @@ Time Remaining:  0 second(s)
         self.assertEqual(connection_info,
                          self.testData.iscsi_connection_info_ro)
 
-        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=False),
                     mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-list'),
-                    mock.call(*self.testData.GETPORT_CMD()),
                     mock.call('storagegroup', '-gname', 'fakehost', '-setpath',
                               '-hbauid', 'iqn.1993-08.org.debian:01:222',
                               '-sp', 'A', '-spport', 4, '-spvport', 0,
                               '-ip', '10.0.0.2', '-host', 'fakehost', '-o'),
-                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
-                              '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call(*self.testData.GETPORT_CMD()),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=True),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
+                              '-gname', 'fakehost',
+                              poll=False),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                              poll=False),
                     mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
                                                           '10.0.0.2'))]
         fake_cli.assert_has_calls(expected)
@@ -1207,35 +1380,168 @@ Time Remaining:  0 second(s)
 
         commands = [('storagegroup', '-list', '-gname', 'fakehost'),
                     self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
-                    self.testData.GETPORT_CMD(),
                     self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
-        results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
-                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
-                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   ('', 0),
-                   self.testData.ALL_PORTS,
-                   self.testData.PING_OK]
+        results = [
+            [("No group", 83),
+             self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+            ('', 0),
+            self.testData.PING_OK
+        ]
         fake_cli = self.driverSetup(commands, results)
+        test_volume_rw = self.testData.test_volume_rw.copy()
+        test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
         connection_info = self.driver.initialize_connection(
-            self.testData.test_volume_rw,
+            test_volume_rw,
             self.testData.connector)
 
         self.assertEqual(connection_info,
                          self.testData.iscsi_connection_info_rw)
 
-        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=False),
                     mock.call('storagegroup', '-create', '-gname', 'fakehost'),
                     mock.call('storagegroup', '-connecthost',
                               '-host', 'fakehost', '-gname', 'fakehost', '-o'),
-                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
-                              '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('connection', '-getport', '-address', '-vlanid')]
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=True),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
+                              '-gname', 'fakehost', poll=False),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                              poll=False),
+                    mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
+                                                          '10.0.0.2'))]
+        fake_cli.assert_has_calls(expected)
+
+    @mock.patch(
+        "oslo_concurrency.processutils.execute",
+        mock.Mock(
+            return_value=(
+                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(
+            return_value=3))
+    @mock.patch('random.randint',
+                mock.Mock(return_value=0))
+    def test_initialize_connection_exist(self):
+        """A LUN is added to the SG right before the attach,
+        it may not exists in the first SG query
+        """
+        # Test for auto registration
+        self.configuration.initiator_auto_registration = True
+        self.configuration.max_luns_per_storage_group = 2
+        commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+                    ('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
+                     '-gname', 'fakehost'),
+                    self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
+        results = [[self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')],
+                   ("fakeerror", 23),
+                   self.testData.PING_OK]
+
+        fake_cli = self.driverSetup(commands, results)
+
+        iscsi_data = self.driver.initialize_connection(
+            self.testData.test_volume,
+            self.testData.connector
+        )
+        self.assertTrue(iscsi_data['data']['target_lun'] == 2,
+                        "iSCSI initialize connection returned wrong HLU")
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=False),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
+                              '-gname', 'fakehost',
+                              poll=False),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=True),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                              poll=False),
+                    mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
+                                                          '10.0.0.2'))]
+        fake_cli.assert_has_calls(expected)
+
+    @mock.patch(
+        "oslo_concurrency.processutils.execute",
+        mock.Mock(
+            return_value=(
+                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(
+            return_value=4))
+    @mock.patch('random.randint',
+                mock.Mock(return_value=0))
+    def test_initialize_connection_no_hlu_left_1(self):
+        """There is no hlu per the first SG query
+        But there are hlu left after the full poll
+        """
+        # Test for auto registration
+        self.configuration.initiator_auto_registration = True
+        self.configuration.max_luns_per_storage_group = 2
+        commands = [('storagegroup', '-list', '-gname', 'fakehost'),
+                    ('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
+                     '-gname', 'fakehost'),
+                    self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
+        results = [[self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
+                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
+                   ("", 0),
+                   self.testData.PING_OK]
+
+        fake_cli = self.driverSetup(commands, results)
+
+        iscsi_data = self.driver.initialize_connection(
+            self.testData.test_volume,
+            self.testData.connector)
+        self.assertTrue(iscsi_data['data']['target_lun'] == 2,
+                        "iSCSI initialize connection returned wrong HLU")
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=False),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=True),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
+                              '-gname', 'fakehost',
+                              poll=False),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                              poll=False),
+                    mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
+                                                          u'10.0.0.2'))]
+        fake_cli.assert_has_calls(expected)
+
+    @mock.patch(
+        "oslo_concurrency.processutils.execute",
+        mock.Mock(
+            return_value=(
+                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
+    @mock.patch(
+        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
+        mock.Mock(
+            return_value=4))
+    @mock.patch('random.randint',
+                mock.Mock(return_value=0))
+    def test_initialize_connection_no_hlu_left_2(self):
+        """There is no usable hlu for the SG
+        """
+        # Test for auto registration
+        self.configuration.initiator_auto_registration = True
+        self.configuration.max_luns_per_storage_group = 2
+        commands = [('storagegroup', '-list', '-gname', 'fakehost')]
+        results = [
+            [self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
+             self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')]
+        ]
+
+        fake_cli = self.driverSetup(commands, results)
+
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.initialize_connection,
+                          self.testData.test_volume,
+                          self.testData.connector)
+        expected = [
+            mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                      poll=False),
+            mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                      poll=True),
+        ]
         fake_cli.assert_has_calls(expected)
 
     def test_terminate_connection(self):
@@ -1269,7 +1575,7 @@ Time Remaining:  0 second(s)
 
     def test_create_volume_cli_failed(self):
         commands = [self.testData.LUN_CREATION_CMD(
-            'failed_vol1', 1, 'unit_test_pool', None, None)]
+            'failed_vol1', 1, 'unit_test_pool', None, None, False)]
         results = [FAKE_ERROR_RETURN]
         fake_cli = self.driverSetup(commands, results)
 
@@ -1277,7 +1583,7 @@ Time Remaining:  0 second(s)
                           self.driver.create_volume,
                           self.testData.test_failed_volume)
         expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
-            'failed_vol1', 1, 'unit_test_pool', None, None))]
+            'failed_vol1', 1, 'unit_test_pool', None, None, False))]
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_volume_snapshot_failed(self):
@@ -1293,29 +1599,24 @@ Time Remaining:  0 second(s)
         #verification
         expect_cmd = [
             mock.call(
-                *self.testData.LUN_PROPERTY_ALL_CMD(
-                    'vol-vol1')),
-            mock.call(
-                *self.testData.SNAP_CREATE_CMD(
-                    'failed_snapshot'))]
+                *self.testData.SNAP_CREATE_CMD('failed_snapshot'),
+                poll=False)]
 
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_volume_from_snapshot(self):
         #set up
-        cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
-        output_smp = ("""LOGICAL UNIT NUMBER 1
-                     Name:  vol2
-                     Attached Snapshot:  N/A""", 0)
         cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+        cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
         output_dest = self.testData.LUN_PROPERTY("vol2_dest")
         cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
         output_migrate = ("", 0)
         cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
         output_migrate_verify = (r'The specified source LUN '
                                  'is not currently migrating', 23)
-        commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
-        results = [output_smp, output_dest, output_migrate,
+        commands = [cmd_dest, cmd_dest_np, cmd_migrate,
+                    cmd_migrate_verify]
+        results = [output_dest, output_dest, output_migrate,
                    output_migrate_verify]
         fake_cli = self.driverSetup(commands, results)
 
@@ -1324,41 +1625,81 @@ Time Remaining:  0 second(s)
         expect_cmd = [
             mock.call(
                 *self.testData.SNAP_MP_CREATE_CMD(
-                    name='vol2', source='vol1')),
+                    name='vol2', source='vol1'),
+                poll=False),
             mock.call(
                 *self.testData.SNAP_ATTACH_CMD(
                     name='vol2', snapName='snapshot1')),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol2_dest', 1, 'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+                      poll=True),
             mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                      retry_disable=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
-
-            mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot')]
+                      retry_disable=True,
+                      poll=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                      poll=True)]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
                 new=ZeroIntervalLoopingCall)
     def test_create_volume_from_snapshot_sync_failed(self):
 
-        output_smp = ("""LOGICAL UNIT NUMBER 1
-                    Name:  vol1
-                    Attached Snapshot:  fakesnap""", 0)
-        cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
         cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+        cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
         output_dest = self.testData.LUN_PROPERTY("vol2_dest")
         cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
+        cmd_detach_lun = ('lun', '-detach', '-name', 'vol2')
         output_migrate = ("", 0)
         cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
-        results = [output_smp, output_dest, output_migrate,
-                   output_migrate_verify]
+
+        commands = [cmd_dest, cmd_dest_np, cmd_migrate,
+                    cmd_migrate_verify]
+        results = [output_dest, output_dest, output_migrate,
+                   FAKE_ERROR_RETURN]
+        fake_cli = self.driverSetup(commands, results)
+
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume_from_snapshot,
+                          self.testData.test_volume2,
+                          self.testData.test_snapshot)
+        expect_cmd = [
+            mock.call(
+                *self.testData.SNAP_MP_CREATE_CMD(
+                    name='vol2', source='vol1'),
+                poll=False),
+            mock.call(
+                *self.testData.SNAP_ATTACH_CMD(
+                    name='vol2', snapName='snapshot1')),
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol2_dest', 1, 'unit_test_pool', None, None)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+                      poll=True),
+            mock.call(*self.testData.MIGRATION_CMD(1, 1),
+                      retry_disable=True,
+                      poll=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                      poll=True),
+            mock.call(*self.testData.LUN_DELETE_CMD('vol2_dest')),
+            mock.call(*cmd_detach_lun),
+            mock.call(*self.testData.LUN_DELETE_CMD('vol2'))]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    def test_create_vol_from_snap_failed_in_migrate_lun(self):
+        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+        output_dest = self.testData.LUN_PROPERTY("vol2_dest")
+        cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
+        cmd_detach_lun = ('lun', '-detach', '-name', 'vol2')
+        commands = [cmd_dest, cmd_migrate]
+        results = [output_dest, FAKE_ERROR_RETURN]
         fake_cli = self.driverSetup(commands, results)
 
         self.assertRaises(exception.VolumeBackendAPIException,
@@ -1368,36 +1709,38 @@ Time Remaining:  0 second(s)
         expect_cmd = [
             mock.call(
                 *self.testData.SNAP_MP_CREATE_CMD(
-                    name='vol2', source='vol1')),
+                    name='vol2', source='vol1'), poll=False),
             mock.call(
                 *self.testData.SNAP_ATTACH_CMD(
                     name='vol2', snapName='snapshot1')),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol2_dest', 1, 'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'), poll=True),
             mock.call(*self.testData.MIGRATION_CMD(1, 1),
+                      poll=True,
                       retry_disable=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
+            mock.call(*self.testData.LUN_DELETE_CMD('vol2_dest')),
+            mock.call(*cmd_detach_lun),
+            mock.call(*self.testData.LUN_DELETE_CMD('vol2'))]
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_cloned_volume(self):
-        cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot')
-        output_smp = ("""LOGICAL UNIT NUMBER 1
-                     Name:  vol1
-                     Attached Snapshot:  N/A""", 0)
         cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
+        cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
         output_dest = self.testData.LUN_PROPERTY("vol1_dest")
         cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
         output_migrate = ("", 0)
         cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
         output_migrate_verify = (r'The specified source LUN '
                                  'is not currently migrating', 23)
-        commands = [cmd_smp, cmd_dest, cmd_migrate,
+        commands = [cmd_dest, cmd_dest_p, cmd_migrate,
                     cmd_migrate_verify,
                     self.testData.NDU_LIST_CMD]
-        results = [output_smp, output_dest, output_migrate,
+        results = [output_dest, output_dest, output_migrate,
                    output_migrate_verify,
                    self.testData.NDU_LIST_RESULT]
         fake_cli = self.driverSetup(commands, results)
@@ -1406,26 +1749,31 @@ Time Remaining:  0 second(s)
                                          self.testData.test_snapshot)
         tmp_snap = 'tmp-snap-' + self.testData.test_volume['id']
         expect_cmd = [
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('snapshot1')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
+                      poll=True),
             mock.call(
-                *self.testData.SNAP_CREATE_CMD(tmp_snap)),
-            mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
-                                                        source='snapshot1')),
+                *self.testData.SNAP_CREATE_CMD(tmp_snap), poll=False),
+            mock.call(*self.testData.SNAP_MP_CREATE_CMD(
+                name='vol1',
+                source='snapshot1'), poll=False),
             mock.call(
                 *self.testData.SNAP_ATTACH_CMD(
                     name='vol1', snapName=tmp_snap)),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol1_dest', 1, 'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                      poll=True),
             mock.call(*self.testData.MIGRATION_CMD(1, 1),
+                      poll=True,
                       retry_disable=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
-            mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-            mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap))]
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                      poll=True),
+            mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
+                      poll=True)]
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_delete_volume_failed(self):
@@ -1439,6 +1787,63 @@ Time Remaining:  0 second(s)
         expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))]
         fake_cli.assert_has_calls(expected)
 
+    def test_delete_volume_in_sg_failed(self):
+        commands = [self.testData.LUN_DELETE_CMD('vol1_in_sg'),
+                    self.testData.LUN_DELETE_CMD('vol2_in_sg')]
+        results = [self.testData.LUN_DELETE_IN_SG_ERROR(),
+                   self.testData.LUN_DELETE_IN_SG_ERROR(False)]
+        self.driverSetup(commands, results)
+        self.assertRaises(EMCVnxCLICmdError,
+                          self.driver.delete_volume,
+                          self.testData.test_volume1_in_sg)
+        self.assertRaises(EMCVnxCLICmdError,
+                          self.driver.delete_volume,
+                          self.testData.test_volume2_in_sg)
+
+    def test_delete_volume_in_sg_force(self):
+        commands = [self.testData.LUN_DELETE_CMD('vol1_in_sg'),
+                    self.testData.STORAGEGROUP_LIST_CMD(),
+                    self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
+                                                             '41'),
+                    self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
+                                                             '42'),
+                    self.testData.LUN_DELETE_CMD('vol2_in_sg'),
+                    self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
+                                                             '31'),
+                    self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
+                                                             '32')]
+        results = [[self.testData.LUN_DELETE_IN_SG_ERROR(),
+                    SUCCEED],
+                   self.testData.STORAGE_GROUPS_HAS_MAP('fakehost1',
+                                                        'fakehost2'),
+                   SUCCEED,
+                   SUCCEED,
+                   [self.testData.LUN_DELETE_IN_SG_ERROR(False),
+                    SUCCEED],
+                   SUCCEED,
+                   SUCCEED]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.force_delete_lun_in_sg = True
+        self.driver.delete_volume(self.testData.test_volume1_in_sg)
+        self.driver.delete_volume(self.testData.test_volume2_in_sg)
+        expected = [mock.call(*self.testData.LUN_DELETE_CMD('vol1_in_sg')),
+                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
+                              poll=True),
+                    mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
+                        'fakehost1', '41'), poll=False),
+                    mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
+                        'fakehost2', '42'), poll=False),
+                    mock.call(*self.testData.LUN_DELETE_CMD('vol1_in_sg')),
+                    mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg')),
+                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
+                              poll=True),
+                    mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
+                        'fakehost1', '31'), poll=False),
+                    mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
+                        'fakehost2', '32'), poll=False),
+                    mock.call(*self.testData.LUN_DELETE_CMD('vol2_in_sg'))]
+        fake_cli.assert_has_calls(expected)
+
     def test_extend_volume(self):
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
         results = [self.testData.LUN_PROPERTY('vol1', size=2)]
@@ -1446,9 +1851,10 @@ Time Remaining:  0 second(s)
 
         # case
         self.driver.extend_volume(self.testData.test_volume, 2)
-        expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2)),
-                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                        'vol1'))]
+        expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2),
+                              poll=False),
+                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                              poll=False)]
         fake_cli.assert_has_calls(expected)
 
     def test_extend_volume_has_snapshot(self):
@@ -1460,7 +1866,8 @@ Time Remaining:  0 second(s)
                           self.driver.extend_volume,
                           self.testData.test_failed_volume,
                           2)
-        expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2))]
+        expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2),
+                              poll=False)]
         fake_cli.assert_has_calls(expected)
 
     @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
@@ -1476,31 +1883,19 @@ Time Remaining:  0 second(s)
                           3)
         expected = [
             mock.call(
-                *self.testData.LUN_EXTEND_CMD('failed_vol1', 3)),
+                *self.testData.LUN_EXTEND_CMD('failed_vol1', 3),
+                poll=False),
             mock.call(
-                *self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'))]
-        fake_cli.assert_has_calls(expected)
-
-    def test_create_remove_export(self):
-        fake_cli = self.driverSetup()
-
-        self.driver.create_export(None, self.testData.test_volume)
-        self.driver.remove_export(None, self.testData.test_volume)
-        expected = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'))]
+                *self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'),
+                poll=False)]
         fake_cli.assert_has_calls(expected)
 
     def test_manage_existing(self):
-        """Unit test for the manage_existing function
-        of driver
-        """
-        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
-                       '-state', '-userCap', '-owner',
-                       '-attachedSnapshot', '-poolName')
         lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
                           '-newName', 'vol_with_type', '-o')
-        commands = [get_lun_cmd, lun_rename_cmd]
+        commands = [lun_rename_cmd]
 
-        results = [self.testData.LUN_PROPERTY('lun_name'), SUCCEED]
+        results = [SUCCEED]
         self.configuration.storage_vnx_pool_name = \
             self.testData.test_pool_name
         self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
@@ -1513,15 +1908,10 @@ Time Remaining:  0 second(s)
         self.driver.manage_existing(
             self.testData.test_volume_with_type,
             self.testData.test_existing_ref)
-        expected = [mock.call(*get_lun_cmd),
-                    mock.call(*lun_rename_cmd)]
+        expected = [mock.call(*lun_rename_cmd, poll=False)]
         fake_cli.assert_has_calls(expected)
 
     def test_manage_existing_lun_in_another_pool(self):
-        """Unit test for the manage_existing function
-        of driver with a invalid pool backend.
-        An exception would occur in this case
-        """
         get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
                        '-state', '-userCap', '-owner',
                        '-attachedSnapshot', '-poolName')
@@ -1539,22 +1929,19 @@ Time Remaining:  0 second(s)
         self.driver.cli._client.command_execute = fake_cli
         ex = self.assertRaises(
             exception.ManageExistingInvalidReference,
-            self.driver.manage_existing,
+            self.driver.manage_existing_get_size,
             self.testData.test_volume_with_type,
             self.testData.test_existing_ref)
         self.assertTrue(
             re.match(r'.*not in a manageable pool backend by cinder',
                      ex.msg))
-        expected = [mock.call(*get_lun_cmd)]
+        expected = [mock.call(*get_lun_cmd, poll=True)]
         fake_cli.assert_has_calls(expected)
 
-    def test_manage_existing_get_size(self):
-        """Unit test for the manage_existing_get_size
-        function of driver.
-        """
+    def test_manage_existing_get_size_pool_backend(self):
         get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
-                       '-state', '-status', '-opDetails', '-userCap', '-owner',
-                       '-attachedSnapshot')
+                       '-state', '-userCap', '-owner',
+                       '-attachedSnapshot', '-poolName')
         test_size = 2
         commands = [get_lun_cmd]
         results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
@@ -1573,7 +1960,7 @@ Time Remaining:  0 second(s)
         get_size = self.driver.manage_existing_get_size(
             self.testData.test_volume_with_type,
             self.testData.test_existing_ref)
-        expected = [mock.call(*get_lun_cmd)]
+        expected = [mock.call(*get_lun_cmd, poll=True)]
         assert get_size == test_size
         fake_cli.assert_has_calls(expected)
         #Test the function with invalid reference.
@@ -1583,6 +1970,32 @@ Time Remaining:  0 second(s)
                           self.testData.test_volume_with_type,
                           invaild_ref)
 
+    def test_manage_existing_get_size_array_backend(self):
+        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+                       '-state', '-status', '-opDetails', '-userCap', '-owner',
+                       '-attachedSnapshot',)
+        test_size = 2
+        commands = [get_lun_cmd]
+        results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
+
+        self.configuration.safe_get = mock.Mock(return_value=None)
+        self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
+
+        # Mock the command executor
+        fake_command_execute = self.get_command_execute_simulator(
+            commands, results)
+        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
+        self.driver.cli._client.command_execute = fake_cli
+
+        get_size = self.driver.manage_existing_get_size(
+            self.testData.test_volume_with_type,
+            self.testData.test_existing_ref)
+        expected = [mock.call(*get_lun_cmd, poll=True)]
+        assert get_size == test_size
+        fake_cli.assert_has_calls(expected)
+        self.configuration.safe_get = self.fake_safe_get
+
     def test_manage_existing_with_array_backend(self):
         """Unit test for the manage_existing with the
         array backend which is not support the manage
@@ -1615,7 +2028,7 @@ Time Remaining:  0 second(s)
         self.driver.manage_existing(
             self.testData.test_volume_with_type,
             self.testData.test_existing_ref)
-        expected = [mock.call(*lun_rename_cmd)]
+        expected = [mock.call(*lun_rename_cmd, poll=False)]
         fake_cli.assert_has_calls(expected)
 
     @mock.patch(
@@ -1631,6 +2044,10 @@ Time Remaining:  0 second(s)
     @mock.patch(
         "time.time",
         mock.Mock(return_value=123456))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
     def test_retype_compressed_to_deduplicated(self):
         """Unit test for retype compressed to deduplicated."""
         diff_data = {'encryption': {}, 'qos_specs': {},
@@ -1650,10 +2067,15 @@ Time Remaining:  0 second(s)
                            'volume_backend_name': 'pool_backend_1',
                            'storage_protocol': 'iSCSI'}}
 
+        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
+        output_migrate_verify = (r'The specified source LUN '
+                                 'is not currently migrating', 23)
         commands = [self.testData.NDU_LIST_CMD,
-                    ('snap', '-list', '-res', 1)]
+                    self.testData.SNAP_LIST_CMD(),
+                    cmd_migrate_verify]
         results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023)]
+                   ('No snap', 1023),
+                   output_migrate_verify]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
                                     '-Deduplication',
@@ -1662,19 +2084,19 @@ Time Remaining:  0 second(s)
         CommandLineHelper.get_array_serial = mock.Mock(
             return_value={'array_serial': "FNM00124500890"})
 
-        extra_specs = {'storagetype:provisioning': 'compressed'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
         self.driver.retype(None, self.testData.test_volume3,
                            new_type_data,
                            diff_data,
                            host_test_data)
         expect_cmd = [
-            mock.call('snap', '-list', '-res', 1),
+            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456')),
-            mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456'),
+                      poll=False),
+            mock.call(*self.testData.MIGRATION_CMD(1, None),
+                      retry_disable=True,
+                      poll=True)]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch(
@@ -1690,6 +2112,10 @@ Time Remaining:  0 second(s)
     @mock.patch(
         "time.time",
         mock.Mock(return_value=123456))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'thin'}))
     def test_retype_thin_to_compressed_auto(self):
         """Unit test for retype thin to compressed and auto tiering."""
         diff_data = {'encryption': {}, 'qos_specs': {},
@@ -1710,11 +2136,15 @@ Time Remaining:  0 second(s)
                           {'location_info': 'unit_test_pool|FNM00124500890',
                            'volume_backend_name': 'pool_backend_1',
                            'storage_protocol': 'iSCSI'}}
-
+        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
+        output_migrate_verify = (r'The specified source LUN '
+                                 'is not currently migrating', 23)
         commands = [self.testData.NDU_LIST_CMD,
-                    ('snap', '-list', '-res', 1)]
+                    self.testData.SNAP_LIST_CMD(),
+                    cmd_migrate_verify]
         results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023)]
+                   ('No snap', 1023),
+                   output_migrate_verify]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
                                     '-Deduplication',
@@ -1723,19 +2153,18 @@ Time Remaining:  0 second(s)
         CommandLineHelper.get_array_serial = mock.Mock(
             return_value={'array_serial': "FNM00124500890"})
 
-        extra_specs = {'storagetype:provisioning': 'thin'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
         self.driver.retype(None, self.testData.test_volume3,
                            new_type_data,
                            diff_data,
                            host_test_data)
         expect_cmd = [
-            mock.call('snap', '-list', '-res', 1),
+            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol3-123456', 2, 'unit_test_pool', 'compressed', 'auto')),
             mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
-            mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+            mock.call(*self.testData.MIGRATION_CMD(),
+                      retry_disable=True,
+                      poll=True)]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch(
@@ -1751,6 +2180,11 @@ Time Remaining:  0 second(s)
     @mock.patch(
         "time.time",
         mock.Mock(return_value=123456))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'deduplicated',
+                                'storagetype:pool': 'unit_test_pool'}))
     def test_retype_pool_changed_dedup_to_compressed_auto(self):
         """Unit test for retype dedup to compressed and auto tiering
         and pool changed
@@ -1779,9 +2213,11 @@ Time Remaining:  0 second(s)
                            'storage_protocol': 'iSCSI'}}
 
         commands = [self.testData.NDU_LIST_CMD,
-                    ('snap', '-list', '-res', 1)]
+                    self.testData.SNAP_LIST_CMD(),
+                    self.testData.MIGRATION_VERIFY_CMD(1)]
         results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023)]
+                   ('No snap', 1023),
+                   ('The specified source LUN is not currently migrating', 23)]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
                                     '-Deduplication',
@@ -1790,20 +2226,20 @@ Time Remaining:  0 second(s)
         CommandLineHelper.get_array_serial = mock.Mock(
             return_value={'array_serial': "FNM00124500890"})
 
-        extra_specs = {'storagetype:provisioning': 'deduplicated',
-                       'storagetype:pool': 'unit_test_pool'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
         self.driver.retype(None, self.testData.test_volume3,
                            new_type_data,
                            diff_data,
                            host_test_data)
         expect_cmd = [
-            mock.call('snap', '-list', '-res', 1),
+            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol3-123456', 2, 'unit_test_pool2', 'compressed', 'auto')),
             mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
-            mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+            mock.call(*self.testData.MIGRATION_CMD(),
+                      retry_disable=True,
+                      poll=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                      poll=True)]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch(
@@ -1813,6 +2249,12 @@ Time Remaining:  0 second(s)
         "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
         "get_lun_by_name",
         mock.Mock(return_value={'lun_id': 1}))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'compressed',
+                                'storagetype:pool': 'unit_test_pool',
+                                'storagetype:tiering': 'auto'}))
     def test_retype_compressed_auto_to_compressed_nomovement(self):
         """Unit test for retype only tiering changed."""
         diff_data = {'encryption': {}, 'qos_specs': {},
@@ -1835,7 +2277,7 @@ Time Remaining:  0 second(s)
                            'storage_protocol': 'iSCSI'}}
 
         commands = [self.testData.NDU_LIST_CMD,
-                    ('snap', '-list', '-res', 1)]
+                    self.testData.SNAP_LIST_CMD(poll=False)]
         results = [self.testData.NDU_LIST_RESULT,
                    ('No snap', 1023)]
         fake_cli = self.driverSetup(commands, results)
@@ -1846,11 +2288,6 @@ Time Remaining:  0 second(s)
         CommandLineHelper.get_array_serial = mock.Mock(
             return_value={'array_serial': "FNM00124500890"})
 
-        extra_specs = {'storagetype:provisioning': 'compressed',
-                       'storagetype:pool': 'unit_test_pool',
-                       'storagetype:tiering': 'auto'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
         self.driver.retype(None, self.testData.test_volume3,
                            new_type_data,
                            diff_data,
@@ -1867,6 +2304,11 @@ Time Remaining:  0 second(s)
         "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
         "get_lun_by_name",
         mock.Mock(return_value={'lun_id': 1}))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'thin',
+                                'storagetype:pool': 'unit_test_pool'}))
     def test_retype_compressed_to_thin_cross_array(self):
         """Unit test for retype cross array."""
         diff_data = {'encryption': {}, 'qos_specs': {},
@@ -1887,7 +2329,7 @@ Time Remaining:  0 second(s)
                            'storage_protocol': 'iSCSI'}}
 
         commands = [self.testData.NDU_LIST_CMD,
-                    ('snap', '-list', '-res', 1)]
+                    self.testData.SNAP_LIST_CMD(poll=False)]
         results = [self.testData.NDU_LIST_RESULT,
                    ('No snap', 1023)]
         self.driverSetup(commands, results)
@@ -1898,10 +2340,6 @@ Time Remaining:  0 second(s)
         CommandLineHelper.get_array_serial = mock.Mock(
             return_value={'array_serial': "FNM00124500890"})
 
-        extra_specs = {'storagetype:provisioning': 'thin',
-                       'storagetype:pool': 'unit_test_pool'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
         retyped = self.driver.retype(None, self.testData.test_volume3,
                                      new_type_data, diff_data,
                                      host_test_data)
@@ -1922,6 +2360,12 @@ Time Remaining:  0 second(s)
     @mock.patch(
         "time.time",
         mock.Mock(return_value=123456))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'thin',
+                                'storagetype:tiering': 'auto',
+                                'storagetype:pool': 'unit_test_pool'}))
     def test_retype_thin_auto_to_dedup_diff_procotol(self):
         """Unit test for retype different procotol."""
         diff_data = {'encryption': {}, 'qos_specs': {},
@@ -1944,9 +2388,11 @@ Time Remaining:  0 second(s)
                            'storage_protocol': 'FC'}}
 
         commands = [self.testData.NDU_LIST_CMD,
-                    ('snap', '-list', '-res', 1)]
+                    self.testData.SNAP_LIST_CMD(),
+                    self.testData.MIGRATION_VERIFY_CMD(1)]
         results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023)]
+                   ('No snap', 1023),
+                   ('The specified source LUN is not currently migrating', 23)]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.enablers = ['-Compression',
                                     '-Deduplication',
@@ -1955,21 +2401,19 @@ Time Remaining:  0 second(s)
         CommandLineHelper.get_array_serial = mock.Mock(
             return_value={'array_serial': "FNM00124500890"})
 
-        extra_specs = {'storagetype:provisioning': 'thin',
-                       'storagetype:tiering': 'auto',
-                       'storagetype:pool': 'unit_test_pool'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         self.driver.retype(None, self.testData.test_volume3,
                            new_type_data,
                            diff_data,
                            host_test_data)
         expect_cmd = [
-            mock.call('snap', '-list', '-res', 1),
+            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
-            mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
+            mock.call(*self.testData.MIGRATION_CMD(),
+                      retry_disable=True,
+                      poll=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                      poll=True)]
         fake_cli.assert_has_calls(expect_cmd)
 
     @mock.patch(
@@ -1979,6 +2423,12 @@ Time Remaining:  0 second(s)
         "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
         "get_lun_by_name",
         mock.Mock(return_value={'lun_id': 1}))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'thin',
+                                'storagetype:tiering': 'auto',
+                                'storagetype:pool': 'unit_test_pool'}))
     def test_retype_thin_auto_has_snap_to_thick_highestavailable(self):
         """Unit test for retype volume has snap when need migration."""
         diff_data = {'encryption': {}, 'qos_specs': {},
@@ -2001,7 +2451,7 @@ Time Remaining:  0 second(s)
                            'storage_protocol': 'iSCSI'}}
 
         commands = [self.testData.NDU_LIST_CMD,
-                    ('snap', '-list', '-res', 1)]
+                    self.testData.SNAP_LIST_CMD(poll=False)]
         results = [self.testData.NDU_LIST_RESULT,
                    ('Has snap', 0)]
         self.driverSetup(commands, results)
@@ -2012,12 +2462,6 @@ Time Remaining:  0 second(s)
         CommandLineHelper.get_array_serial = mock.Mock(
             return_value={'array_serial': "FNM00124500890"})
 
-        extra_specs = {'storagetype:provisioning': 'thin',
-                       'storagetype:tiering': 'auto',
-                       'storagetype:pool': 'unit_test_pool'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         retyped = self.driver.retype(None, self.testData.test_volume3,
                                      new_type_data,
                                      diff_data,
@@ -2033,6 +2477,12 @@ Time Remaining:  0 second(s)
         "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
         "get_lun_by_name",
         mock.Mock(return_value={'lun_id': 1}))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'thin',
+                                'storagetype:tiering': 'auto',
+                                'storagetype:pool': 'unit_test_pool'}))
     def test_retype_thin_auto_to_thin_auto(self):
         """Unit test for retype volume which has no change."""
         diff_data = {'encryption': {}, 'qos_specs': {},
@@ -2062,27 +2512,24 @@ Time Remaining:  0 second(s)
         CommandLineHelper.get_array_serial = mock.Mock(
             return_value={'array_serial': "FNM00124500890"})
 
-        extra_specs = {'storagetype:provisioning': 'thin',
-                       'storagetype:tiering': 'auto',
-                       'storagetype:pool': 'unit_test_pool'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
         self.driver.retype(None, self.testData.test_volume3,
                            new_type_data,
                            diff_data,
                            host_test_data)
 
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'fast_cache_enabled': 'True'}))
     def test_create_volume_with_fastcache(self):
         '''enable fastcache when creating volume.'''
-        extra_specs = {'fast_cache_enabled': 'True'}
-        volume_types.get_volume_type_extra_specs = \
-            mock.Mock(return_value=extra_specs)
-
         commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
                     self.testData.NDU_LIST_CMD,
                     self.testData.CHECK_FASTCACHE_CMD(
                         self.testData.test_pool_name)]
         results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.LUN_PROPERTY('vol_with_type', True),
                    SUCCEED,
                    ('FAST Cache:  Enabled', 0)]
         fake_cli = self.driverSetup(commands, results)
@@ -2107,15 +2554,18 @@ Time Remaining:  0 second(s)
         cli_helper.command_execute = fake_cli
         cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
         cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
+        cli_helper.get_pool = mock.Mock(return_value={'lun_nums': 1000,
+                                                      'total_capacity_gb': 10,
+                                                      'free_capacity_gb': 5})
         self.driver.update_volume_stats()
         self.driver.create_volume(self.testData.test_volume_with_type)
         self.assertEqual(self.driver.cli.stats['fast_cache_enabled'], 'True')
         expect_cmd = [
+            mock.call('connection', '-getport', '-address', '-vlanid',
+                      poll=False),
             mock.call('storagepool', '-list', '-name',
-                      'Pool_02_SASFLASH', '-userCap', '-availableCap'),
-            mock.call('-np', 'storagepool', '-list', '-name',
-                      'Pool_02_SASFLASH', '-fastcache'),
-            mock.call('lun', '-create', '-capacity',
+                      'Pool_02_SASFLASH', '-fastcache', poll=False),
+            mock.call('-np', 'lun', '-create', '-capacity',
                       1, '-sq', 'gb', '-poolName', 'Pool_02_SASFLASH',
                       '-name', 'vol_with_type', '-type', 'NonThin')
         ]
@@ -2189,10 +2639,8 @@ Time Remaining:  0 second(s)
             mock.call(
                 *self.testData.DELETE_CONSISTENCYGROUP_CMD(
                     cg_name)),
-            mock.call(
-                *self.testData.LUN_DELETE_CMD('vol1')),
-            mock.call(
-                *self.testData.LUN_DELETE_CMD('vol1'))]
+            mock.call(*self.testData.LUN_DELETE_CMD('vol1')),
+            mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_cgsnapshot(self):
@@ -2246,20 +2694,16 @@ Time Remaining:  0 second(s)
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol1', 1,
                 'unit_test_pool',
-                None, None)),
-            mock.call('lun', '-list', '-name', 'vol1',
-                      '-state', '-status', '-opDetails',
-                      '-userCap', '-owner', '-attachedSnapshot'),
+                None, None, False)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
+                      poll=False),
             mock.call(*self.testData.ADD_LUN_TO_CG_CMD(
                 'cg_id', 1))]
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_cloned_volume_from_consistnecy_group(self):
-        cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot')
-        output_smp = ("""LOGICAL UNIT NUMBER 1
-                     Name:  vol1
-                     Attached Snapshot:  N/A""", 0)
         cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
+        cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
         output_dest = self.testData.LUN_PROPERTY("vol1_dest")
         cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
         output_migrate = ("", 0)
@@ -2268,9 +2712,9 @@ Time Remaining:  0 second(s)
                                  'is not currently migrating', 23)
         cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
 
-        commands = [cmd_smp, cmd_dest, cmd_migrate,
+        commands = [cmd_dest, cmd_dest_p, cmd_migrate,
                     cmd_migrate_verify]
-        results = [output_smp, output_dest, output_migrate,
+        results = [output_dest, output_dest, output_migrate,
                    output_migrate_verify]
         fake_cli = self.driverSetup(commands, results)
 
@@ -2281,38 +2725,38 @@ Time Remaining:  0 second(s)
             mock.call(
                 *self.testData.CREATE_CG_SNAPSHOT(cg_name, tmp_cgsnapshot)),
             mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
-                                                        source='clone1')),
+                                                        source='clone1'),
+                      poll=False),
             mock.call(
                 *self.testData.SNAP_ATTACH_CMD(
                     name='vol1', snapName=tmp_cgsnapshot)),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol1_dest', 1, 'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'), poll=True),
             mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                      retry_disable=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
-            mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
+                      retry_disable=True,
+                      poll=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                      poll=True),
             mock.call(*self.testData.DELETE_CG_SNAPSHOT(tmp_cgsnapshot))]
         fake_cli.assert_has_calls(expect_cmd)
 
     def test_create_volume_from_cgsnapshot(self):
-        cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
-        output_smp = ("""LOGICAL UNIT NUMBER 1
-                     Name:  vol2
-                     Attached Snapshot:  N/A""", 0)
         cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
+        cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
         output_dest = self.testData.LUN_PROPERTY("vol2_dest")
         cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
         output_migrate = ("", 0)
         cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
         output_migrate_verify = (r'The specified source LUN '
                                  'is not currently migrating', 23)
-        commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
-        results = [output_smp, output_dest, output_migrate,
+        commands = [cmd_dest, cmd_dest_np, cmd_migrate,
+                    cmd_migrate_verify]
+        results = [output_dest, output_dest, output_migrate,
                    output_migrate_verify]
         fake_cli = self.driverSetup(commands, results)
 
@@ -2321,26 +2765,62 @@ Time Remaining:  0 second(s)
         expect_cmd = [
             mock.call(
                 *self.testData.SNAP_MP_CREATE_CMD(
-                    name='vol2', source='vol1')),
+                    name='vol2', source='vol1'),
+                poll=False),
             mock.call(
                 *self.testData.SNAP_ATTACH_CMD(
                     name='vol2', snapName='cgsnapshot_id')),
             mock.call(*self.testData.LUN_CREATION_CMD(
                 'vol2_dest', 1, 'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
+                      poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
+                      poll=True),
             mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                      retry_disable=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
-            mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot'),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'))]
+                      retry_disable=True,
+                      poll=True),
+            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
+                      poll=True)]
+        fake_cli.assert_has_calls(expect_cmd)
+
+    def test_deregister_initiator(self):
+        fake_cli = self.driverSetup()
+        self.driver.cli.destroy_empty_sg = True
+        self.driver.cli.itor_auto_dereg = True
+        cli_helper = self.driver.cli._client
+        data = {'storage_group_name': "fakehost",
+                'storage_group_uid': "2F:D4:00:00:00:00:00:"
+                "00:00:00:FF:E5:3A:03:FD:6D",
+                'lunmap': {1: 16}}
+        cli_helper.get_storage_group = mock.Mock(
+            return_value=data)
+        lun_info = {'lun_name': "unit_test_lun",
+                    'lun_id': 1,
+                    'pool': "unit_test_pool",
+                    'attached_snapshot': "N/A",
+                    'owner': "A",
+                    'total_capacity_gb': 1.0,
+                    'state': "Ready"}
+        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
+        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
+        cli_helper.disconnect_host_from_storage_group = mock.Mock()
+        cli_helper.delete_storage_group = mock.Mock()
+        self.driver.terminate_connection(self.testData.test_volume,
+                                         self.testData.connector)
+        expect_cmd = [
+            mock.call('port', '-removeHBA', '-hbauid',
+                      self.testData.connector['initiator'],
+                      '-o')]
         fake_cli.assert_has_calls(expect_cmd)
 
     def succeed_fake_command_execute(self, *command, **kwargv):
         return SUCCEED
 
+    def fake_setup_command_execute(self, *command, **kwargv):
+        return self.testData.ALL_PORTS
+
     def fake_get_pool_properties(self, filter_option, properties=None):
         pool_info = {'pool_name': "unit_test_pool0",
                      'total_capacity_gb': 1000.0,
@@ -2373,7 +2853,7 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
         super(EMCVNXCLIDriverFCTestCase, self).setUp()
 
         self.stubs.Set(CommandLineHelper, 'command_execute',
-                       self.succeed_fake_command_execute)
+                       self.fake_setup_command_execute)
         self.stubs.Set(CommandLineHelper, 'get_array_serial',
                        mock.Mock(return_value={'array_serial':
                                                "fakeSerial"}))
@@ -2393,15 +2873,14 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
         #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
         self.configuration.default_timeout = 0.0002
         self.configuration.initiator_auto_registration = True
+        self.configuration.check_max_pool_luns_threshold = False
         self.configuration.zoning_mode = None
+        self.configuration.max_luns_per_storage_pool = 4000
         self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
         self.testData = EMCVNXCLIDriverTestData()
         self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
             '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
 
-    def tearDown(self):
-        super(EMCVNXCLIDriverFCTestCase, self).tearDown()
-
     def driverSetup(self, commands=tuple(), results=tuple()):
         self.driver = EMCCLIFCDriver(configuration=self.configuration)
         fake_command_execute = self.get_command_execute_simulator(
@@ -2450,8 +2929,8 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
 
         return standard_default
 
-    def succeed_fake_command_execute(self, *command, **kwargv):
-        return SUCCEED
+    def fake_setup_command_execute(self, *command, **kwargv):
+        return self.testData.ALL_PORTS
 
     def fake_get_pool_properties(self, filter_option, properties=None):
         pool_info = {'pool_name': "unit_test_pool0",
@@ -2483,109 +2962,99 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
         mock.Mock(
             return_value=(
                 "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
-    @mock.patch("random.shuffle", mock.Mock())
+    @mock.patch('random.randint',
+                mock.Mock(return_value=0))
     def test_initialize_connection_fc_auto_reg(self):
         # Test for auto registration
+        test_volume = self.testData.test_volume.copy()
+        test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
         self.configuration.initiator_auto_registration = True
         commands = [('storagegroup', '-list', '-gname', 'fakehost'),
-                    ('storagegroup', '-list'),
                     self.testData.GETFCPORT_CMD(),
                     ('port', '-list', '-gname', 'fakehost')]
         results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
                     self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
                    self.testData.FC_PORTS,
                    self.testData.FAKEHOST_PORTS]
 
         fake_cli = self.driverSetup(commands, results)
-        data = self.driver.initialize_connection(
-            self.testData.test_volume,
+        self.driver.initialize_connection(
+            test_volume,
             self.testData.connector)
 
-        self.assertEqual(data['data']['access_mode'], 'ro')
-
-        expected = [
-            mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-            mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-            mock.call('storagegroup', '-list'),
-            mock.call('port', '-list', '-sp'),
-            mock.call('storagegroup', '-gname', 'fakehost',
-                      '-setpath', '-hbauid',
-                      '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56',
-                      '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
-                      '-host', 'fakehost', '-o'),
-            mock.call('port', '-list', '-sp'),
-            mock.call('storagegroup', '-gname', 'fakehost',
-                      '-setpath', '-hbauid',
-                      '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16',
-                      '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
-                      '-host', 'fakehost', '-o'),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-            mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-            mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
-                      '-gname', 'fakehost'),
-            mock.call('port', '-list', '-gname', 'fakehost'),
-            mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-            mock.call('port', '-list', '-sp')]
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=False),
+                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
+                    mock.call('port', '-list', '-sp'),
+                    mock.call('storagegroup', '-gname', 'fakehost',
+                              '-setpath', '-hbauid',
+                              '22:34:56:78:90:12:34:56:12:34:56:78:'
+                              '90:12:34:56',
+                              '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
+                              '-host', 'fakehost', '-o'),
+                    mock.call('storagegroup', '-gname', 'fakehost',
+                              '-setpath', '-hbauid',
+                              '22:34:56:78:90:54:32:16:12:34:56:78:'
+                              '90:54:32:16',
+                              '-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
+                              '-host', 'fakehost', '-o'),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=True),
+                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
+                              '-gname', 'fakehost',
+                              poll=False),
+                    mock.call('port', '-list', '-gname', 'fakehost')
+                    ]
         fake_cli.assert_has_calls(expected)
 
         # Test for manaul registration
         self.configuration.initiator_auto_registration = False
 
         commands = [('storagegroup', '-list', '-gname', 'fakehost'),
-                    ('storagegroup', '-list'),
                     self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
                     self.testData.GETFCPORT_CMD(),
                     ('port', '-list', '-gname', 'fakehost')]
         results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
-                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
+                    self.testData.STORAGE_GROUP_NO_MAP('fakehost')],
                    ('', 0),
                    self.testData.FC_PORTS,
                    self.testData.FAKEHOST_PORTS]
         fake_cli = self.driverSetup(commands, results)
-        data = self.driver.initialize_connection(
-            self.testData.test_volume_rw,
+        self.driver.initialize_connection(
+            test_volume,
             self.testData.connector)
 
-        self.assertEqual(data['data']['access_mode'], 'rw')
-
-        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=False),
                     mock.call('storagegroup', '-create', '-gname', 'fakehost'),
                     mock.call('storagegroup', '-connecthost',
                               '-host', 'fakehost', '-gname', 'fakehost', '-o'),
-                    mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=True),
                     mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
-                              '-gname', 'fakehost'),
-                    mock.call('port', '-list', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
-                    mock.call('port', '-list', '-sp')]
+                              '-gname', 'fakehost', poll=False),
+                    mock.call('port', '-list', '-gname', 'fakehost')
+                    ]
         fake_cli.assert_has_calls(expected)
 
     @mock.patch(
         "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
         "get_device_mapping_from_network",
         mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
-    @mock.patch("random.shuffle", mock.Mock())
+    @mock.patch('random.randint',
+                mock.Mock(return_value=0))
     def test_initialize_connection_fc_auto_zoning(self):
         # Test for auto zoning
         self.configuration.zoning_mode = 'fabric'
         self.configuration.initiator_auto_registration = False
         commands = [('storagegroup', '-list', '-gname', 'fakehost'),
-                    ('storagegroup', '-list'),
                     self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
-                    self.testData.GETFCPORT_CMD(),
-                    ('port', '-list', '-gname', 'fakehost')]
+                    self.testData.GETFCPORT_CMD()]
         results = [[("No group", 83),
                     self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
                     self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
                    ('', 0),
-                   self.testData.FC_PORTS,
-                   self.testData.FAKEHOST_PORTS]
+                   self.testData.FC_PORTS]
         fake_cli = self.driverSetup(commands, results)
         self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
             configuration=self.configuration)
@@ -2598,18 +3067,18 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
                          EMCVNXCLIDriverTestData.i_t_map)
         self.assertEqual(conn_info['data']['target_wwn'],
                          ['1122334455667777'])
-        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+        expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=False),
                     mock.call('storagegroup', '-create', '-gname', 'fakehost'),
                     mock.call('storagegroup', '-connecthost',
                               '-host', 'fakehost', '-gname', 'fakehost', '-o'),
-                    mock.call('lun', '-list', '-name', 'vol1',
-                              '-state', '-status', '-opDetails',
-                              '-userCap', '-owner', '-attachedSnapshot'),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=True),
                     mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
-                              '-gname', 'fakehost'),
-                    mock.call('port', '-list', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost'),
+                              '-gname', 'fakehost',
+                              poll=False),
+                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
+                              poll=True),
                     mock.call('port', '-list', '-sp')]
         fake_cli.assert_has_calls(expected)
 
@@ -2626,22 +3095,14 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
                 'lunmap': {1: 16, 2: 88, 3: 47}}
         cli_helper.get_storage_group = mock.Mock(
             return_value=data)
-        lun_info = {'lun_name': "unit_test_lun",
-                    'lun_id': 1,
-                    'pool': "unit_test_pool",
-                    'attached_snapshot': "N/A",
-                    'owner': "A",
-                    'total_capacity_gb': 1.0,
-                    'state': "Ready"}
-        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
         cli_helper.remove_hlu_from_storagegroup = mock.Mock()
         self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
             configuration=self.configuration)
         connection_info = self.driver.terminate_connection(
             self.testData.test_volume,
             self.testData.connector)
-        self.assertFalse('initiator_target_map' in connection_info['data'],
-                         'initiator_target_map should not appear.')
+        self.assertFalse(connection_info['data'],
+                         'connection_info data should not be None.')
 
         cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
             16, self.testData.connector["host"])
@@ -2659,14 +3120,6 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
                 'lunmap': {}}
         cli_helper.get_storage_group = mock.Mock(
             return_value=data)
-        lun_info = {'lun_name': "unit_test_lun",
-                    'lun_id': 1,
-                    'pool': "unit_test_pool",
-                    'attached_snapshot': "N/A",
-                    'owner': "A",
-                    'total_capacity_gb': 1.0,
-                    'state': "Ready"}
-        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
         cli_helper.remove_hlu_from_storagegroup = mock.Mock()
         self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
             configuration=self.configuration)
@@ -2679,22 +3132,21 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
                          EMCVNXCLIDriverTestData.i_t_map)
 
     def test_get_volume_stats(self):
-        #expect_result = [POOL_PROPERTY]
         self.driverSetup()
         stats = self.driver.get_volume_stats(True)
         self.assertTrue(stats['driver_version'] is not None,
-                        "dirver_version is not returned")
+                        "driver_version is not returned")
         self.assertTrue(
-            stats['free_capacity_gb'] == 1000.6,
+            stats['free_capacity_gb'] == 3257.851,
             "free_capacity_gb is not correct")
         self.assertTrue(
-            stats['reserved_percentage'] == 0,
+            stats['reserved_percentage'] == 3,
             "reserved_percentage is not correct")
         self.assertTrue(
             stats['storage_protocol'] == 'FC',
             "storage_protocol is not correct")
         self.assertTrue(
-            stats['total_capacity_gb'] == 10000.5,
+            stats['total_capacity_gb'] == 3281.146,
             "total_capacity_gb is not correct")
         self.assertTrue(
             stats['vendor_name'] == "EMC",
@@ -2704,9 +3156,67 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
             "volume backend name is not correct")
         self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
         self.assertTrue(
-            stats['driver_version'] == "04.01.00",
+            stats['driver_version'] == "05.00.00",
             "driver version is incorrect.")
 
+    def test_get_volume_stats_too_many_luns(self):
+        commands = [self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
+        results = [self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
+        fake_cli = self.driverSetup(commands, results)
+
+        self.driver.cli.check_max_pool_luns_threshold = True
+        stats = self.driver.get_volume_stats(True)
+        self.assertTrue(
+            stats['free_capacity_gb'] == 0,
+            "free_capacity_gb is not correct")
+        expect_cmd = [
+            mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
+                      poll=False)]
+        fake_cli.assert_has_calls(expect_cmd)
+        expect_cmd = [
+            mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
+                      poll=False)]
+        fake_cli.assert_has_calls(expect_cmd)
+
+        self.driver.cli.check_max_pool_luns_threshold = False
+        stats = self.driver.get_volume_stats(True)
+        self.assertTrue(stats['driver_version'] is not None,
+                        "driver_version is not returned")
+        self.assertTrue(
+            stats['free_capacity_gb'] == 3257.851,
+            "free_capacity_gb is not correct")
+
+    def test_deregister_initiator(self):
+        fake_cli = self.driverSetup()
+        self.driver.cli.destroy_empty_sg = True
+        self.driver.cli.itor_auto_dereg = True
+        cli_helper = self.driver.cli._client
+        data = {'storage_group_name': "fakehost",
+                'storage_group_uid': "2F:D4:00:00:00:00:00:"
+                "00:00:00:FF:E5:3A:03:FD:6D",
+                'lunmap': {1: 16}}
+        cli_helper.get_storage_group = mock.Mock(
+            return_value=data)
+        lun_info = {'lun_name': "unit_test_lun",
+                    'lun_id': 1,
+                    'pool': "unit_test_pool",
+                    'attached_snapshot': "N/A",
+                    'owner': "A",
+                    'total_capacity_gb': 1.0,
+                    'state': "Ready"}
+        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
+        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
+        cli_helper.disconnect_host_from_storage_group = mock.Mock()
+        cli_helper.delete_storage_group = mock.Mock()
+        self.driver.terminate_connection(self.testData.test_volume,
+                                         self.testData.connector)
+        fc_itor_1 = '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56'
+        fc_itor_2 = '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16'
+        expect_cmd = [
+            mock.call('port', '-removeHBA', '-hbauid', fc_itor_1, '-o'),
+            mock.call('port', '-removeHBA', '-hbauid', fc_itor_2, '-o')]
+        fake_cli.assert_has_calls(expect_cmd)
+
 
 class EMCVNXCLIToggleSPTestData():
     def FAKE_COMMAND_PREFIX(self, sp_address):
@@ -2737,25 +3247,19 @@ class EMCVNXCLIToggleSPTestCase(test.TestCase):
             configuration=self.configuration)
         self.test_data = EMCVNXCLIToggleSPTestData()
 
-    def tearDown(self):
-        super(EMCVNXCLIToggleSPTestCase, self).tearDown()
-
     def test_no_sp_toggle(self):
         self.cli_client.active_storage_ip = '10.10.10.10'
         FAKE_SUCCESS_RETURN = ('success', 0)
         FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN, FAKE_SUCCESS_RETURN]
+        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN]
 
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
             self.cli_client.command_execute(*FAKE_COMMAND)
             self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.10")
-            expected = [mock.call(*('ping', '-c', 1, '10.10.10.10'),
-                                  check_exit_code=True),
-                        mock.call(
-                            *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
-                              + FAKE_COMMAND),
-                            check_exit_code=True)]
+            expected = [
+                mock.call(*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+                          + FAKE_COMMAND), check_exit_code=True)]
             mock_utils.assert_has_calls(expected)
 
     def test_toggle_sp_with_server_unavailabe(self):
@@ -2765,10 +3269,9 @@ Error occurred during HTTP request/response from the target: '10.244.213.142'.
 Message : HTTP/1.1 503 Service Unavailable"""
         FAKE_SUCCESS_RETURN = ('success', 0)
         FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
-                        processutils.ProcessExecutionError(
-                            exit_code=255, stdout=FAKE_ERROR_MSG),
-                        FAKE_SUCCESS_RETURN]
+        SIDE_EFFECTS = [processutils.ProcessExecutionError(
+            exit_code=255, stdout=FAKE_ERROR_MSG),
+            FAKE_SUCCESS_RETURN]
 
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
@@ -2792,10 +3295,9 @@ Error occurred during HTTP request/response from the target: '10.244.213.142'.
 Message : End of data stream"""
         FAKE_SUCCESS_RETURN = ('success', 0)
         FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
-                        processutils.ProcessExecutionError(
-                            exit_code=255, stdout=FAKE_ERROR_MSG),
-                        FAKE_SUCCESS_RETURN]
+        SIDE_EFFECTS = [processutils.ProcessExecutionError(
+            exit_code=255, stdout=FAKE_ERROR_MSG),
+            FAKE_SUCCESS_RETURN]
 
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
@@ -2821,10 +3323,35 @@ Unable to establish a secure connection to the Management Server.
 """
         FAKE_SUCCESS_RETURN = ('success', 0)
         FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
-                        processutils.ProcessExecutionError(
-                            exit_code=255, stdout=FAKE_ERROR_MSG),
-                        FAKE_SUCCESS_RETURN]
+        SIDE_EFFECTS = [processutils.ProcessExecutionError(
+            exit_code=255, stdout=FAKE_ERROR_MSG),
+            FAKE_SUCCESS_RETURN]
+
+        with mock.patch('cinder.utils.execute') as mock_utils:
+            mock_utils.side_effect = SIDE_EFFECTS
+            self.cli_client.command_execute(*FAKE_COMMAND)
+            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+            expected = [
+                mock.call(
+                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+                        + FAKE_COMMAND),
+                    check_exit_code=True),
+                mock.call(
+                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+                        + FAKE_COMMAND),
+                    check_exit_code=True)]
+            mock_utils.assert_has_calls(expected)
+
+    def test_toggle_sp_with_connection_error(self):
+        self.cli_client.active_storage_ip = '10.10.10.10'
+        FAKE_ERROR_MSG = """\
+A network error occurred while trying to connect: '192.168.1.56'.
+Message : Error occurred because of time out"""
+        FAKE_SUCCESS_RETURN = ('success', 0)
+        FAKE_COMMAND = ('list', 'pool')
+        SIDE_EFFECTS = [processutils.ProcessExecutionError(
+            exit_code=255, stdout=FAKE_ERROR_MSG),
+            FAKE_SUCCESS_RETURN]
 
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
index d4c66f40722bf15e80765a652bddecf978ca998e..40580492ba10dd81165abe76335e14562ff2f54d 100644 (file)
@@ -47,6 +47,10 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
                 External Volume Management, Read-only Volume,
                 FC Auto Zoning
         4.1.0 - Consistency group support
+        5.0.0 - Performance enhancement, LUN Number Threshold Support,
+                Initiator Auto Deregistration,
+                Force Deleting LUN in Storage Groups,
+                robust enhancement
     """
 
     def __init__(self, *args, **kwargs):
@@ -158,25 +162,18 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
         """
         conn_info = self.cli.initialize_connection(volume,
                                                    connector)
-        conn_info = self.cli.adjust_fc_conn_info(conn_info, connector)
         LOG.debug("Exit initialize_connection"
                   " - Returning FC connection info: %(conn_info)s."
                   % {'conn_info': conn_info})
-
         return conn_info
 
     @RemoveFCZone
     def terminate_connection(self, volume, connector, **kwargs):
         """Disallow connection from connector."""
-        remove_zone = self.cli.terminate_connection(volume, connector)
-        conn_info = {'driver_volume_type': 'fibre_channel',
-                     'data': {}}
-        conn_info = self.cli.adjust_fc_conn_info(conn_info, connector,
-                                                 remove_zone)
+        conn_info = self.cli.terminate_connection(volume, connector)
         LOG.debug("Exit terminate_connection"
                   " - Returning FC connection info: %(conn_info)s."
                   % {'conn_info': conn_info})
-
         return conn_info
 
     def get_volume_stats(self, refresh=False):
@@ -235,4 +232,4 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
 
     def delete_cgsnapshot(self, context, cgsnapshot):
         """Deletes a cgsnapshot."""
-        return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
\ No newline at end of file
+        return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
index 6c69c768cdbfd3e7264ddf42d4fd1528380381f9..43b0819f5db3eaee8659979099c235b94e2cea45 100644 (file)
@@ -44,6 +44,10 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
                 External Volume Management, Read-only Volume,
                 FC Auto Zoning
         4.1.0 - Consistency group support
+        5.0.0 - Performance enhancement, LUN Number Threshold Support,
+                Initiator Auto Deregistration,
+                Force Deleting LUN in Storage Groups,
+                robust enhancement
     """
 
     def __init__(self, *args, **kwargs):
@@ -98,7 +102,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
 
     def create_export(self, context, volume):
         """Driver entry point to get the export info for a new volume."""
-        self.cli.create_export(context, volume)
+        pass
 
     def remove_export(self, context, volume):
         """Driver entry point to remove an export for a volume."""
@@ -192,4 +196,4 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
 
     def delete_cgsnapshot(self, context, cgsnapshot):
         """Deletes a cgsnapshot."""
-        return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
\ No newline at end of file
+        return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
index 5389b99f7c2eb959218bf0070b8e180d83d4d08e..41c0acd392f87481d203b0ce3660897c6bcbe0ca 100644 (file)
 """
 VNX CLI
 """
-
+import math
 import os
 import random
 import re
 import time
+import types
 
+import eventlet
 from oslo_concurrency import lockutils
 from oslo_concurrency import processutils
 from oslo_config import cfg
@@ -28,6 +30,10 @@ from oslo_serialization import jsonutils as json
 from oslo_utils import excutils
 from oslo_utils import timeutils
 import six
+import taskflow.engines
+from taskflow.patterns import linear_flow
+from taskflow import task
+from taskflow.types import failure
 
 from cinder import exception
 from cinder.exception import EMCVnxCLICmdError
@@ -44,10 +50,11 @@ CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
 
 INTERVAL_5_SEC = 5
+INTERVAL_20_SEC = 20
 INTERVAL_30_SEC = 30
 INTERVAL_60_SEC = 60
 
-NO_POLL = True
+ENABLE_TRACE = False
 
 loc_opts = [
     cfg.StrOpt('storage_vnx_authentication_type',
@@ -92,14 +99,50 @@ loc_opts = [
                 default=False,
                 help='Automatically register initiators. '
                 'By default, the value is False.'),
+    cfg.BoolOpt('initiator_auto_deregistration',
+                default=False,
+                help='Automatically deregister initiators after the related '
+                'storage group is destroyed. '
+                'By default, the value is False.'),
+    cfg.BoolOpt('check_max_pool_luns_threshold',
+                default=False,
+                help='Report free_capacity_gb as 0 when the limit to '
+                'maximum number of pool LUNs is reached. '
+                'By default, the value is False.'),
+    cfg.BoolOpt('force_delete_lun_in_storagegroup',
+                default=False,
+                help='Delete a LUN even if it is in Storage Groups.')
 ]
 
 CONF.register_opts(loc_opts)
 
 
+def decorate_all_methods(method_decorator):
+    """Applies decorator on the methods of a class.
+
+    This is a class decorator, which will apply method decorator referred
+    by method_decorator to all the public methods (without underscore as
+    the prefix) in a class.
+    """
+    if not ENABLE_TRACE:
+        return lambda cls: cls
+
+    def _decorate_all_methods(cls):
+        for attr_name, attr_val in cls.__dict__.items():
+            if (isinstance(attr_val, types.FunctionType) and
+                    not attr_name.startswith("_")):
+                setattr(cls, attr_name, method_decorator(attr_val))
+        return cls
+
+    return _decorate_all_methods
+
+
 def log_enter_exit(func):
+    if not CONF.debug:
+        return func
+
     def inner(self, *args, **kwargs):
-        LOG.debug("Entering %(cls)s.%(method)s" %
+        LOG.debug("Entering %(cls)s.%(method)s",
                   {'cls': self.__class__.__name__,
                    'method': func.__name__})
         start = timeutils.utcnow()
@@ -107,7 +150,7 @@ def log_enter_exit(func):
         end = timeutils.utcnow()
         LOG.debug("Exiting %(cls)s.%(method)s. "
                   "Spent %(duration)s sec. "
-                  "Return %(return)s" %
+                  "Return %(return)s",
                   {'cls': self.__class__.__name__,
                    'duration': timeutils.delta_seconds(start, end),
                    'method': func.__name__,
@@ -124,6 +167,7 @@ class PropertyDescriptor(object):
         self.converter = converter
 
 
+@decorate_all_methods(log_enter_exit)
 class CommandLineHelper(object):
 
     LUN_STATE = PropertyDescriptor(
@@ -188,8 +232,27 @@ class CommandLineHelper(object):
 
     POOL_ALL = [POOL_TOTAL_CAPACITY, POOL_FREE_CAPACITY]
 
+    MAX_POOL_LUNS = PropertyDescriptor(
+        '-maxPoolLUNs',
+        'Max. Pool LUNs:\s*(.*)\s*',
+        'max_pool_luns',
+        int)
+    TOTAL_POOL_LUNS = PropertyDescriptor(
+        '-numPoolLUNs',
+        'Total Number of Pool LUNs:\s*(.*)\s*',
+        'total_pool_luns',
+        int)
+
+    POOL_FEATURE_DEFAULT = (MAX_POOL_LUNS, TOTAL_POOL_LUNS)
+
     CLI_RESP_PATTERN_CG_NOT_FOUND = 'Cannot find'
     CLI_RESP_PATTERN_SNAP_NOT_FOUND = 'The specified snapshot does not exist'
+    CLI_RESP_PATTERN_LUN_NOT_EXIST = 'The (pool lun) may not exist'
+    CLI_RESP_PATTERN_SMP_NOT_ATTACHED = ('The specified Snapshot mount point '
+                                         'is not currently attached.')
+    CLI_RESP_PATTERN_SG_NAME_IN_USE = "Storage Group name already in use"
+    CLI_RESP_PATTERN_LUN_IN_SG_1 = "contained in a Storage Group"
+    CLI_RESP_PATTERN_LUN_IN_SG_2 = "Host LUN/LUN mapping still exists"
 
     def __init__(self, configuration):
         configuration.append_config_values(san.san_opts)
@@ -210,8 +273,8 @@ class CommandLineHelper(object):
         self.primary_storage_ip = self.active_storage_ip
         self.secondary_storage_ip = configuration.san_secondary_ip
         if self.secondary_storage_ip == self.primary_storage_ip:
-            LOG.warn(_LE("san_secondary_ip is configured as "
-                         "the same value as san_ip."))
+            LOG.warning(_LE("san_secondary_ip is configured as "
+                            "the same value as san_ip."))
             self.secondary_storage_ip = None
         if not configuration.san_ip:
             err_msg = _('san_ip: Mandatory field configuration. '
@@ -235,7 +298,7 @@ class CommandLineHelper(object):
         # if there is security file path provided, use this security file
         if storage_vnx_security_file:
             self.credentials = ('-secfilepath', storage_vnx_security_file)
-            LOG.info(_LI("Using security file in %s for authentication") %
+            LOG.info(_LI("Using security file in %s for authentication"),
                      storage_vnx_security_file)
         # if there is a username/password provided, use those in the cmd line
         elif storage_username is not None and len(storage_username) > 0 and\
@@ -283,15 +346,23 @@ class CommandLineHelper(object):
                 '-initialTier', 'optimizePool',
                 '-tieringPolicy', 'noMovement']}
 
-    @log_enter_exit
+    def _raise_cli_error(self, cmd=None, rc=None, out='', **kwargs):
+        raise EMCVnxCLICmdError(cmd=cmd,
+                                rc=rc,
+                                out=out.split('\n'),
+                                **kwargs)
+
     def create_lun_with_advance_feature(self, pool, name, size,
                                         provisioning, tiering,
-                                        consistencygroup_id=None):
+                                        consistencygroup_id=None,
+                                        poll=True):
         command_create_lun = ['lun', '-create',
                               '-capacity', size,
                               '-sq', 'gb',
                               '-poolName', pool,
                               '-name', name]
+        if not poll:
+            command_create_lun = ['-np'] + command_create_lun
         # provisioning
         if provisioning:
             command_create_lun.extend(self.provisioning_values[provisioning])
@@ -310,8 +381,8 @@ class CommandLineHelper(object):
         except EMCVnxCLICmdError as ex:
             with excutils.save_and_reraise_exception():
                 self.delete_lun(name)
-                LOG.error(_LE("Error on enable compression on lun %s.")
-                          six.text_type(ex))
+                LOG.error(_LE("Error on enable compression on lun %s."),
+                          six.text_type(ex))
 
         # handle consistency group
         try:
@@ -322,32 +393,40 @@ class CommandLineHelper(object):
             with excutils.save_and_reraise_exception():
                 self.delete_lun(name)
                 LOG.error(_LE("Error on adding lun to consistency"
-                              " group. %s") % six.text_type(ex))
+                              " group. %s"), six.text_type(ex))
         return data
 
-    @log_enter_exit
     def create_lun_by_cmd(self, cmd, name):
         out, rc = self.command_execute(*cmd)
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 4 and out.find('(0x712d8d04)') >= 0:
-                LOG.warn(_LW('LUN already exists, LUN name %(name)s. '
-                             'Message: %(msg)s') %
-                         {'name': name, 'msg': out})
+                LOG.warning(_LW('LUN already exists, LUN name %(name)s. '
+                                'Message: %(msg)s'),
+                            {'name': name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(cmd, rc, out)
+                self._raise_cli_error(cmd, rc, out)
 
         def lun_is_ready():
-            data = self.get_lun_by_name(name)
-            return data[self.LUN_STATE.key] == 'Ready' and \
-                data[self.LUN_STATUS.key] == 'OK(0x0)' and \
-                data[self.LUN_OPERATION.key] == 'None'
+            try:
+                data = self.get_lun_by_name(name, self.LUN_ALL, False)
+                return (data[self.LUN_STATE.key] == 'Ready' and
+                        data[self.LUN_STATUS.key] == 'OK(0x0)' and
+                        data[self.LUN_OPERATION.key] == 'None')
+            except EMCVnxCLICmdError as ex:
+                orig_out = "\n".join(ex.kwargs["out"])
+                if orig_out.find(
+                        self.CLI_RESP_PATTERN_LUN_NOT_EXIST) >= 0:
+                    return False
+                else:
+                    raise ex
 
-        self._wait_for_a_condition(lun_is_ready)
-        lun = self.get_lun_by_name(name)
+        self._wait_for_a_condition(lun_is_ready,
+                                   None,
+                                   INTERVAL_5_SEC)
+        lun = self.get_lun_by_name(name, self.LUN_ALL, False)
         return lun
 
-    @log_enter_exit
     def delete_lun(self, name):
 
         command_delete_lun = ['lun', '-destroy',
@@ -356,14 +435,37 @@ class CommandLineHelper(object):
                               '-o']
         # executing cli command to delete volume
         out, rc = self.command_execute(*command_delete_lun)
-        if rc != 0:
+        if rc != 0 or out.strip():
             # Ignore the error that due to retry
-            if rc == 9 and out.find("not exist") >= 0:
-                LOG.warn(_LW("LUN is already deleted, LUN name %(name)s. "
-                             "Message: %(msg)s") %
-                         {'name': name, 'msg': out})
+            if rc == 9 and self.CLI_RESP_PATTERN_LUN_NOT_EXIST in out:
+                LOG.warning(_LW("LUN is already deleted, LUN name %(name)s. "
+                                "Message: %(msg)s"),
+                            {'name': name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_delete_lun, rc, out)
+                self._raise_cli_error(command_delete_lun, rc, out)
+
+    def get_hlus(self, lun_id, poll=True):
+        hlus = list()
+        command_storage_group_list = ('storagegroup', '-list')
+        out, rc = self.command_execute(*command_storage_group_list,
+                                       poll=poll)
+        if rc != 0:
+            self._raise_cli_error(command_storage_group_list, rc, out)
+        sg_name_p = re.compile(r'^\s*(?P<sg_name>[^\n\r]+)')
+        hlu_alu_p = re.compile(r'HLU/ALU Pairs:'
+                               r'\s*HLU Number\s*ALU Number'
+                               r'\s*[-\s]*'
+                               r'(\d|\s)*'
+                               r'\s+(?P<hlu>\d+)( |\t)+%s' % lun_id)
+        for sg_info in out.split('Storage Group Name:'):
+            hlu_alu_m = hlu_alu_p.search(sg_info)
+            if hlu_alu_m is None:
+                continue
+            sg_name_m = sg_name_p.search(sg_info)
+            if sg_name_m:
+                hlus.append((hlu_alu_m.group('hlu'),
+                             sg_name_m.group('sg_name')))
+        return hlus
 
     def _wait_for_a_condition(self, testmethod, timeout=None,
                               interval=INTERVAL_5_SEC):
@@ -378,9 +480,9 @@ class CommandLineHelper(object):
                 testValue = False
                 LOG.debug('CommandLineHelper.'
                           '_wait_for_condition: %(method_name)s '
-                          'execution failed for %(exception)s'
-                          {'method_name': testmethod.__name__,
-                             'exception': ex.message})
+                          'execution failed for %(exception)s',
+                          {'method_name': testmethod.__name__,
+                           'exception': six.text_type(ex)})
             if testValue:
                 raise loopingcall.LoopingCallDone()
 
@@ -393,8 +495,7 @@ class CommandLineHelper(object):
         timer = loopingcall.FixedIntervalLoopingCall(_inner)
         timer.start(interval=interval).wait()
 
-    @log_enter_exit
-    def expand_lun(self, name, new_size):
+    def expand_lun(self, name, new_size, poll=True):
 
         command_expand_lun = ('lun', '-expand',
                               '-name', name,
@@ -402,28 +503,27 @@ class CommandLineHelper(object):
                               '-sq', 'gb',
                               '-o',
                               '-ignoreThresholds')
-        out, rc = self.command_execute(*command_expand_lun)
+        out, rc = self.command_execute(*command_expand_lun,
+                                       poll=poll)
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 4 and out.find("(0x712d8e04)") >= 0:
-                LOG.warn(_LW("LUN %(name)s is already expanded. "
-                             "Message: %(msg)s") %
-                         {'name': name, 'msg': out})
+                LOG.warning(_LW("LUN %(name)s is already expanded. "
+                                "Message: %(msg)s"),
+                            {'name': name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_expand_lun, rc, out)
+                self._raise_cli_error(command_expand_lun, rc, out)
 
-    @log_enter_exit
     def expand_lun_and_wait(self, name, new_size):
-        self.expand_lun(name, new_size)
+        self.expand_lun(name, new_size, poll=False)
 
         def lun_is_extented():
-            data = self.get_lun_by_name(name)
+            data = self.get_lun_by_name(name, poll=False)
             return new_size == data[self.LUN_CAPACITY.key]
 
         self._wait_for_a_condition(lun_is_extented)
 
-    @log_enter_exit
-    def lun_rename(self, lun_id, new_name):
+    def lun_rename(self, lun_id, new_name, poll=False):
         """This function used to rename a lun to match
         the expected name for the volume.
         """
@@ -432,11 +532,11 @@ class CommandLineHelper(object):
                               '-newName', new_name,
                               '-o')
 
-        out, rc = self.command_execute(*command_lun_rename)
+        out, rc = self.command_execute(*command_lun_rename,
+                                       poll=poll)
         if rc != 0:
-            raise EMCVnxCLICmdError(command_lun_rename, rc, out)
+            self._raise_cli_error(command_lun_rename, rc, out)
 
-    @log_enter_exit
     def modify_lun_tiering(self, name, tiering):
         """This function used to modify a lun's tiering policy."""
         command_modify_lun = ['lun', '-modify',
@@ -447,9 +547,8 @@ class CommandLineHelper(object):
 
             out, rc = self.command_execute(*command_modify_lun)
             if rc != 0:
-                raise EMCVnxCLICmdError(command_modify_lun, rc, out)
+                self._raise_cli_error(command_modify_lun, rc, out)
 
-    @log_enter_exit
     def create_consistencygroup(self, context, group):
         """create the consistency group."""
         cg_name = group['id']
@@ -463,13 +562,12 @@ class CommandLineHelper(object):
             # Ignore the error if consistency group already exists
             if (rc == 33 and
                     out.find("(0x716d8021)") >= 0):
-                LOG.warn(_LW('Consistency group %(name)s already '
-                             'exists. Message: %(msg)s') %
-                         {'name': cg_name, 'msg': out})
+                LOG.warning(_LW('Consistency group %(name)s already '
+                                'exists. Message: %(msg)s'),
+                            {'name': cg_name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_create_cg, rc, out)
+                self._raise_cli_error(command_create_cg, rc, out)
 
-    @log_enter_exit
     def get_consistency_group_by_name(self, cg_name):
         cmd = ('snap', '-group', '-list', '-id', cg_name)
         data = {
@@ -490,11 +588,10 @@ class CommandLineHelper(object):
                 luns_of_cg = m.groups()[3].split(',')
                 if luns_of_cg:
                     data['Luns'] = [lun.strip() for lun in luns_of_cg]
-                LOG.debug("Found consistent group %s." % data['Name'])
+                LOG.debug("Found consistent group %s.", data['Name'])
 
         return data
 
-    @log_enter_exit
     def add_lun_to_consistency_group(self, cg_name, lun_id):
         add_lun_to_cg_cmd = ('-np', 'snap', '-group',
                              '-addmember', '-id',
@@ -506,25 +603,24 @@ class CommandLineHelper(object):
                    "group %(cg_name)s.") % {'lun': lun_id,
                                             'cg_name': cg_name})
             LOG.error(msg)
-            raise EMCVnxCLICmdError(add_lun_to_cg_cmd, rc, out)
+            self._raise_cli_error(add_lun_to_cg_cmd, rc, out)
 
         def add_lun_to_consistency_success():
             data = self.get_consistency_group_by_name(cg_name)
             if str(lun_id) in data['Luns']:
                 LOG.debug(("Add lun %(lun)s to consistency "
-                           "group %(cg_name)s successfully.") %
+                           "group %(cg_name)s successfully."),
                           {'lun': lun_id, 'cg_name': cg_name})
                 return True
             else:
                 LOG.debug(("Adding lun %(lun)s to consistency "
-                           "group %(cg_name)s.") %
+                           "group %(cg_name)s."),
                           {'lun': lun_id, 'cg_name': cg_name})
                 return False
 
         self._wait_for_a_condition(add_lun_to_consistency_success,
                                    interval=INTERVAL_30_SEC)
 
-    @log_enter_exit
     def delete_consistencygroup(self, cg_name):
         delete_cg_cmd = ('-np', 'snap', '-group',
                          '-destroy', '-id', cg_name)
@@ -532,20 +628,19 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error if CG doesn't exist
             if rc == 13 and out.find(self.CLI_RESP_PATTERN_CG_NOT_FOUND) >= 0:
-                LOG.warn(_LW("CG %(cg_name)s does not exist. "
-                             "Message: %(msg)s") %
-                         {'cg_name': cg_name, 'msg': out})
+                LOG.warning(_LW("CG %(cg_name)s does not exist. "
+                                "Message: %(msg)s"),
+                            {'cg_name': cg_name, 'msg': out})
             elif rc == 1 and out.find("0x712d8801") >= 0:
-                LOG.warn(_LW("CG %(cg_name)s is deleting. "
-                             "Message: %(msg)s") %
-                         {'cg_name': cg_name, 'msg': out})
+                LOG.warning(_LW("CG %(cg_name)s is deleting. "
+                                "Message: %(msg)s"),
+                            {'cg_name': cg_name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(delete_cg_cmd, rc, out)
+                self._raise_cli_error(delete_cg_cmd, rc, out)
         else:
             LOG.info(_LI('Consistency group %s was deleted '
-                         'successfully.') % cg_name)
+                         'successfully.'), cg_name)
 
-    @log_enter_exit
     def create_cgsnapshot(self, cgsnapshot):
         """Create a cgsnapshot (snap group)."""
         cg_name = cgsnapshot['consistencygroup_id']
@@ -562,13 +657,12 @@ class CommandLineHelper(object):
             # Ignore the error if cgsnapshot already exists
             if (rc == 5 and
                     out.find("(0x716d8005)") >= 0):
-                LOG.warn(_LW('Cgsnapshot name %(name)s already '
-                             'exists. Message: %(msg)s') %
-                         {'name': snap_name, 'msg': out})
+                LOG.warning(_LW('Cgsnapshot name %(name)s already '
+                                'exists. Message: %(msg)s'),
+                            {'name': snap_name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(create_cg_snap_cmd, rc, out)
+                self._raise_cli_error(create_cg_snap_cmd, rc, out)
 
-    @log_enter_exit
     def delete_cgsnapshot(self, cgsnapshot):
         """Delete a cgsnapshot (snap group)."""
         snap_name = cgsnapshot['id']
@@ -580,62 +674,61 @@ class CommandLineHelper(object):
             # Ignore the error if cgsnapshot does not exist.
             if (rc == 5 and
                     out.find(self.CLI_RESP_PATTERN_SNAP_NOT_FOUND) >= 0):
-                LOG.warn(_LW('Snapshot %(name)s for consistency group '
-                             'does not exist. Message: %(msg)s') %
-                         {'name': snap_name, 'msg': out})
+                LOG.warning(_LW('Snapshot %(name)s for consistency group '
+                                'does not exist. Message: %(msg)s'),
+                            {'name': snap_name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(delete_cg_snap_cmd, rc, out)
+                self._raise_cli_error(delete_cg_snap_cmd, rc, out)
 
-    @log_enter_exit
-    def create_snapshot(self, volume_name, name):
-        data = self.get_lun_by_name(volume_name)
-        if data[self.LUN_ID.key] is not None:
+    def create_snapshot(self, lun_id, name):
+        if lun_id is not None:
             command_create_snapshot = ('snap', '-create',
-                                       '-res', data[self.LUN_ID.key],
+                                       '-res', lun_id,
                                        '-name', name,
                                        '-allowReadWrite', 'yes',
                                        '-allowAutoDelete', 'no')
 
-            out, rc = self.command_execute(*command_create_snapshot)
+            out, rc = self.command_execute(*command_create_snapshot,
+                                           poll=False)
             if rc != 0:
                 # Ignore the error that due to retry
                 if (rc == 5 and
                         out.find("(0x716d8005)") >= 0):
-                    LOG.warn(_LW('Snapshot %(name)s already exists. '
-                                 'Message: %(msg)s') %
-                             {'name': name, 'msg': out})
+                    LOG.warning(_LW('Snapshot %(name)s already exists. '
+                                    'Message: %(msg)s'),
+                                {'name': name, 'msg': out})
                 else:
-                    raise EMCVnxCLICmdError(command_create_snapshot, rc, out)
+                    self._raise_cli_error(command_create_snapshot, rc, out)
         else:
-            msg = _('Failed to get LUN ID for volume %s.') % volume_name
+            msg = _('Failed to create snapshot as no LUN ID is specified')
             raise exception.VolumeBackendAPIException(data=msg)
 
-    @log_enter_exit
     def delete_snapshot(self, name):
 
         def delete_snapshot_success():
             command_delete_snapshot = ('snap', '-destroy',
                                        '-id', name,
                                        '-o')
-            out, rc = self.command_execute(*command_delete_snapshot)
+            out, rc = self.command_execute(*command_delete_snapshot,
+                                           poll=True)
             if rc != 0:
                 # Ignore the error that due to retry
                 if rc == 5 and out.find("not exist") >= 0:
-                    LOG.warn(_LW("Snapshot %(name)s may deleted already. "
-                                 "Message: %(msg)s") %
-                             {'name': name, 'msg': out})
+                    LOG.warning(_LW("Snapshot %(name)s may deleted already. "
+                                    "Message: %(msg)s"),
+                                {'name': name, 'msg': out})
                     return True
                 # The snapshot cannot be destroyed because it is
                 # attached to a snapshot mount point. Wait
                 elif rc == 3 and out.find("(0x716d8003)") >= 0:
-                    LOG.warn(_LW("Snapshot %(name)s is in use, retry. "
-                                 "Message: %(msg)s") %
-                             {'name': name, 'msg': out})
+                    LOG.warning(_LW("Snapshot %(name)s is in use, retry. "
+                                    "Message: %(msg)s"),
+                                {'name': name, 'msg': out})
                     return False
                 else:
-                    raise EMCVnxCLICmdError(command_delete_snapshot, rc, out)
+                    self._raise_cli_error(command_delete_snapshot, rc, out)
             else:
-                LOG.info(_LI('Snapshot %s was deleted successfully.') %
+                LOG.info(_LI('Snapshot %s was deleted successfully.'),
                          name)
                 return True
 
@@ -643,7 +736,6 @@ class CommandLineHelper(object):
                                    interval=INTERVAL_30_SEC,
                                    timeout=INTERVAL_30_SEC * 3)
 
-    @log_enter_exit
     def create_mount_point(self, primary_lun_name, name):
 
         command_create_mount_point = ('lun', '-create',
@@ -651,19 +743,19 @@ class CommandLineHelper(object):
                                       '-primaryLunName', primary_lun_name,
                                       '-name', name)
 
-        out, rc = self.command_execute(*command_create_mount_point)
+        out, rc = self.command_execute(*command_create_mount_point,
+                                       poll=False)
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 4 and out.find("(0x712d8d04)") >= 0:
-                LOG.warn(_LW("Mount point %(name)s already exists. "
-                             "Message: %(msg)s") %
-                         {'name': name, 'msg': out})
+                LOG.warning(_LW("Mount point %(name)s already exists. "
+                                "Message: %(msg)s"),
+                            {'name': name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_create_mount_point, rc, out)
+                self._raise_cli_error(command_create_mount_point, rc, out)
 
         return rc
 
-    @log_enter_exit
     def attach_mount_point(self, name, snapshot_name):
 
         command_attach_mount_point = ('lun', '-attach',
@@ -674,36 +766,35 @@ class CommandLineHelper(object):
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 85 and out.find('(0x716d8055)') >= 0:
-                LOG.warn(_LW("Snapshot %(snapname)s is attached to snapshot "
-                             "mount point %(mpname)s already. "
-                             "Message: %(msg)s") %
-                         {'snapname': snapshot_name,
-                          'mpname': name,
-                          'msg': out})
+                LOG.warning(_LW("Snapshot %(snapname)s is attached to "
+                                "snapshot mount point %(mpname)s already. "
+                                "Message: %(msg)s"),
+                            {'snapname': snapshot_name,
+                             'mpname': name,
+                             'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_attach_mount_point, rc, out)
+                self._raise_cli_error(command_attach_mount_point, rc, out)
 
         return rc
 
-    @log_enter_exit
-    def check_smp_not_attached(self, smp_name):
-        """Ensure a snap mount point with snap become a LUN."""
+    def detach_mount_point(self, smp_name):
 
-        def _wait_for_sync_status():
-            lun_list = ('lun', '-list', '-name', smp_name,
-                        '-attachedSnapshot')
-            out, rc = self.command_execute(*lun_list)
-            if rc == 0:
-                vol_details = out.split('\n')
-                snap_name = vol_details[2].split(':')[1].strip()
-            if (snap_name == 'N/A'):
-                return True
-            return False
+        command_detach_mount_point = ('lun', '-detach',
+                                      '-name', smp_name)
+
+        out, rc = self.command_execute(*command_detach_mount_point)
+        if rc != 0:
+            # Ignore the error that due to retry
+            if (rc == 162 and
+                    out.find(self.CLI_RESP_PATTERN_SMP_NOT_ATTACHED) >= 0):
+                LOG.warning(_LW("The specified Snapshot mount point %s is not "
+                                "currently attached."), smp_name)
+            else:
+                self._raise_cli_error(command_detach_mount_point, rc, out)
 
-        self._wait_for_a_condition(_wait_for_sync_status)
+        return rc
 
-    @log_enter_exit
-    def migrate_lun(self, src_id, dst_id, log_failure_as_error=True):
+    def migrate_lun(self, src_id, dst_id):
         command_migrate_lun = ('migrate', '-start',
                                '-source', src_id,
                                '-dest', dst_id,
@@ -711,49 +802,50 @@ class CommandLineHelper(object):
                                '-o')
         # SP HA is not supported by LUN migration
         out, rc = self.command_execute(*command_migrate_lun,
-                                       retry_disable=True)
+                                       retry_disable=True,
+                                       poll=True)
 
         if 0 != rc:
-            raise EMCVnxCLICmdError(command_migrate_lun, rc, out,
-                                    log_failure_as_error)
+            self._raise_cli_error(command_migrate_lun, rc, out)
 
         return rc
 
-    @log_enter_exit
     def migrate_lun_with_verification(self, src_id,
                                       dst_id=None,
                                       dst_name=None):
         try:
-            self.migrate_lun(src_id, dst_id, log_failure_as_error=False)
+            self.migrate_lun(src_id, dst_id)
         except EMCVnxCLICmdError as ex:
             migration_succeed = False
-            if self._is_sp_unavailable_error(ex.out):
-                LOG.warn(_LW("Migration command may get network timeout. "
-                             "Double check whether migration in fact "
-                             "started successfully. Message: %(msg)s") %
-                         {'msg': ex.out})
+            orig_out = "\n".join(ex.kwargs["out"])
+            if self._is_sp_unavailable_error(orig_out):
+                LOG.warning(_LW("Migration command may get network timeout. "
+                                "Double check whether migration in fact "
+                                "started successfully. Message: %(msg)s"),
+                            {'msg': ex.kwargs["out"]})
                 command_migrate_list = ('migrate', '-list',
                                         '-source', src_id)
-                rc = self.command_execute(*command_migrate_list)[1]
+                rc = self.command_execute(*command_migrate_list,
+                                          poll=True)[1]
                 if rc == 0:
                     migration_succeed = True
 
             if not migration_succeed:
-                LOG.warn(_LW("Start migration failed. Message: %s") %
-                         ex.out)
-                LOG.debug("Delete temp LUN after migration "
-                          "start failed. LUN: %s" % dst_name)
-                if(dst_name is not None):
+                LOG.warning(_LW("Start migration failed. Message: %s"),
+                            ex.kwargs["out"])
+                if dst_name is not None:
+                    LOG.warning(_LW("Delete temp LUN after migration "
+                                    "start failed. LUN: %s"), dst_name)
                     self.delete_lun(dst_name)
                 return False
 
         # Set the proper interval to verify the migration status
-        def migration_is_ready():
+        def migration_is_ready(poll=False):
             mig_ready = False
-            command_migrate_list = ('migrate', '-list',
-                                    '-source', src_id)
-            out, rc = self.command_execute(*command_migrate_list)
-            LOG.debug("Migration output: %s" % out)
+            cmd_migrate_list = ('migrate', '-list', '-source', src_id)
+            out, rc = self.command_execute(*cmd_migrate_list,
+                                           poll=poll)
+            LOG.debug("Migration output: %s", out)
             if rc == 0:
                 # parse the percentage
                 out = re.split(r'\n', out)
@@ -762,7 +854,7 @@ class CommandLineHelper(object):
             else:
                 if re.search(r'The specified source LUN '
                              'is not currently migrating', out):
-                    LOG.debug("Migration of LUN %s is finished." % src_id)
+                    LOG.debug("Migration of LUN %s is finished.", src_id)
                     mig_ready = True
                 else:
                     reason = _("Querying migrating status error.")
@@ -772,28 +864,33 @@ class CommandLineHelper(object):
                         {'reason': reason, 'output': out})
             return mig_ready
 
+        eventlet.sleep(INTERVAL_30_SEC)
+        if migration_is_ready(True):
+            return True
         self._wait_for_a_condition(migration_is_ready,
                                    interval=INTERVAL_30_SEC)
 
         return True
 
-    @log_enter_exit
-    def get_storage_group(self, name):
+    def get_storage_group(self, name, poll=True):
 
         # ALU/HLU as key/value map
         lun_map = {}
 
         data = {'storage_group_name': name,
                 'storage_group_uid': None,
-                'lunmap': lun_map}
+                'lunmap': lun_map,
+                'raw_output': ''}
 
         command_get_storage_group = ('storagegroup', '-list',
                                      '-gname', name)
 
-        out, rc = self.command_execute(*command_get_storage_group)
+        out, rc = self.command_execute(*command_get_storage_group,
+                                       poll=poll)
         if rc != 0:
-            raise EMCVnxCLICmdError(command_get_storage_group, rc, out)
+            self._raise_cli_error(command_get_storage_group, rc, out)
 
+        data['raw_output'] = out
         re_stroage_group_id = 'Storage Group UID:\s*(.*)\s*'
         m = re.search(re_stroage_group_id, out)
         if m is not None:
@@ -812,7 +909,6 @@ class CommandLineHelper(object):
 
         return data
 
-    @log_enter_exit
     def create_storage_group(self, name):
 
         command_create_storage_group = ('storagegroup', '-create',
@@ -821,14 +917,13 @@ class CommandLineHelper(object):
         out, rc = self.command_execute(*command_create_storage_group)
         if rc != 0:
             # Ignore the error that due to retry
-            if rc == 66 and out.find("name already in use") >= 0:
-                LOG.warn(_LW('Storage group %(name)s already exists. '
-                             'Message: %(msg)s') %
-                         {'name': name, 'msg': out})
+            if rc == 66 and self.CLI_RESP_PATTERN_SG_NAME_IN_USE in out >= 0:
+                LOG.warning(_LW('Storage group %(name)s already exists. '
+                                'Message: %(msg)s'),
+                            {'name': name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_create_storage_group, rc, out)
+                self._raise_cli_error(command_create_storage_group, rc, out)
 
-    @log_enter_exit
     def delete_storage_group(self, name):
 
         command_delete_storage_group = ('storagegroup', '-destroy',
@@ -839,14 +934,13 @@ class CommandLineHelper(object):
             # Ignore the error that due to retry
             if rc == 83 and out.find("group name or UID does not "
                                      "match any storage groups") >= 0:
-                LOG.warn(_LW("Storage group %(name)s doesn't exist, "
-                             "may have already been deleted. "
-                             "Message: %(msg)s") %
-                         {'name': name, 'msg': out})
+                LOG.warning(_LW("Storage group %(name)s doesn't exist, "
+                                "may have already been deleted. "
+                                "Message: %(msg)s"),
+                            {'name': name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_delete_storage_group, rc, out)
+                self._raise_cli_error(command_delete_storage_group, rc, out)
 
-    @log_enter_exit
     def connect_host_to_storage_group(self, hostname, sg_name):
 
         command_host_connect = ('storagegroup', '-connecthost',
@@ -856,9 +950,8 @@ class CommandLineHelper(object):
 
         out, rc = self.command_execute(*command_host_connect)
         if rc != 0:
-            raise EMCVnxCLICmdError(command_host_connect, rc, out)
+            self._raise_cli_error(command_host_connect, rc, out)
 
-    @log_enter_exit
     def disconnect_host_from_storage_group(self, hostname, sg_name):
         command_host_disconnect = ('storagegroup', '-disconnecthost',
                                    '-host', hostname,
@@ -871,53 +964,49 @@ class CommandLineHelper(object):
             if rc == 116 and \
                 re.search("host is not.*connected to.*storage group",
                           out) is not None:
-                LOG.warn(_LW("Host %(host)s has already disconnected from "
-                             "storage group %(sgname)s. Message: %(msg)s") %
-                         {'host': hostname, 'sgname': sg_name, 'msg': out})
+                LOG.warning(_LW("Host %(host)s has already disconnected from "
+                                "storage group %(sgname)s. Message: %(msg)s"),
+                            {'host': hostname, 'sgname': sg_name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_host_disconnect, rc, out)
+                self._raise_cli_error(command_host_disconnect, rc, out)
 
-    @log_enter_exit
     def add_hlu_to_storage_group(self, hlu, alu, sg_name):
+        """Adds a lun into storage group as specified hlu number.
+
+        Return True if the hlu is as specified, otherwise False.
+        """
 
         command_add_hlu = ('storagegroup', '-addhlu',
                            '-hlu', hlu,
                            '-alu', alu,
                            '-gname', sg_name)
 
-        out, rc = self.command_execute(*command_add_hlu)
+        out, rc = self.command_execute(*command_add_hlu, poll=False)
         if rc != 0:
-            # Ignore the error that due to retry
-            if rc == 66 and \
-                    re.search("LUN.*already.*added to.*Storage Group",
-                              out) is not None:
-                LOG.warn(_LW("LUN %(lun)s has already added to "
-                             "Storage Group %(sgname)s. "
-                             "Message: %(msg)s") %
-                         {'lun': alu, 'sgname': sg_name, 'msg': out})
-            else:
-                raise EMCVnxCLICmdError(command_add_hlu, rc, out)
+            # Do not need to consider the retry for add hlu
+            # Retry is handled in the caller
+            self._raise_cli_error(command_add_hlu, rc, out)
 
-    @log_enter_exit
-    def remove_hlu_from_storagegroup(self, hlu, sg_name):
+        return True
+
+    def remove_hlu_from_storagegroup(self, hlu, sg_name, poll=False):
 
         command_remove_hlu = ('storagegroup', '-removehlu',
                               '-hlu', hlu,
                               '-gname', sg_name,
                               '-o')
 
-        out, rc = self.command_execute(*command_remove_hlu)
+        out, rc = self.command_execute(*command_remove_hlu, poll=poll)
         if rc != 0:
             # Ignore the error that due to retry
             if rc == 66 and\
                     out.find("No such Host LUN in this Storage Group") >= 0:
-                LOG.warn(_LW("HLU %(hlu)s has already been removed from "
-                             "%(sgname)s. Message: %(msg)s") %
-                         {'hlu': hlu, 'sgname': sg_name, 'msg': out})
+                LOG.warning(_LW("HLU %(hlu)s has already been removed from "
+                                "%(sgname)s. Message: %(msg)s"),
+                            {'hlu': hlu, 'sgname': sg_name, 'msg': out})
             else:
-                raise EMCVnxCLICmdError(command_remove_hlu, rc, out)
+                self._raise_cli_error(command_remove_hlu, rc, out)
 
-    @log_enter_exit
     def get_iscsi_protocol_endpoints(self, device_sp):
 
         command_get_port = ('connection', '-getport',
@@ -925,64 +1014,77 @@ class CommandLineHelper(object):
 
         out, rc = self.command_execute(*command_get_port)
         if rc != 0:
-            raise EMCVnxCLICmdError(command_get_port, rc, out)
+            self._raise_cli_error(command_get_port, rc, out)
 
         re_port_wwn = 'Port WWN:\s*(.*)\s*'
         initiator_address = re.findall(re_port_wwn, out)
 
         return initiator_address
 
-    @log_enter_exit
-    def get_pool_name_of_lun(self, lun_name):
+    def get_pool_name_of_lun(self, lun_name, poll=True):
         data = self.get_lun_properties(
-            ('-name', lun_name), self.LUN_WITH_POOL)
+            ('-name', lun_name), self.LUN_WITH_POOL, poll=poll)
         return data.get('pool', '')
 
-    @log_enter_exit
-    def get_lun_by_name(self, name, properties=LUN_ALL):
-        data = self.get_lun_properties(('-name', name), properties)
+    def get_lun_by_name(self, name, properties=LUN_ALL, poll=True):
+        data = self.get_lun_properties(('-name', name),
+                                       properties,
+                                       poll=poll)
         return data
 
-    @log_enter_exit
-    def get_lun_by_id(self, lunid, properties=LUN_ALL):
-        data = self.get_lun_properties(('-l', lunid), properties)
+    def get_lun_by_id(self, lunid, properties=LUN_ALL, poll=True):
+        data = self.get_lun_properties(('-l', lunid),
+                                       properties, poll=poll)
         return data
 
-    @log_enter_exit
-    def get_pool(self, name):
-        data = self.get_pool_properties(('-name', name))
+    def get_pool(self, name, poll=True):
+        data = self.get_pool_properties(('-name', name),
+                                        poll=poll)
         return data
 
-    @log_enter_exit
-    def get_pool_properties(self, filter_option, properties=POOL_ALL):
+    def get_pool_properties(self, filter_option, properties=POOL_ALL,
+                            poll=True):
         module_list = ('storagepool', '-list')
-        data = self._get_lun_or_pool_properties(
+        data = self._get_obj_properties(
             module_list, filter_option,
             base_properties=[self.POOL_NAME],
-            adv_properties=properties)
+            adv_properties=properties,
+            poll=poll)
         return data
 
-    @log_enter_exit
-    def get_lun_properties(self, filter_option, properties=LUN_ALL):
+    def get_lun_properties(self, filter_option, properties=LUN_ALL,
+                           poll=True):
         module_list = ('lun', '-list')
-        data = self._get_lun_or_pool_properties(
+        data = self._get_obj_properties(
             module_list, filter_option,
             base_properties=[self.LUN_NAME, self.LUN_ID],
-            adv_properties=properties)
+            adv_properties=properties,
+            poll=poll)
+        return data
+
+    def get_pool_feature_properties(self, properties=POOL_FEATURE_DEFAULT,
+                                    poll=True):
+        module_list = ("storagepool", '-feature', '-info')
+        data = self._get_obj_properties(
+            module_list, tuple(),
+            base_properties=[],
+            adv_properties=properties,
+            poll=poll)
         return data
 
-    def _get_lun_or_pool_properties(self, module_list,
-                                    filter_option,
-                                    base_properties=tuple(),
-                                    adv_properties=tuple()):
+    def _get_obj_properties(self, module_list,
+                            filter_option,
+                            base_properties=tuple(),
+                            adv_properties=tuple(),
+                            poll=True):
         # to do instance check
-        command_get_lun = module_list + filter_option
+        command_get = module_list + filter_option
         for prop in adv_properties:
-            command_get_lun += (prop.option, )
-        out, rc = self.command_execute(*command_get_lun)
+            command_get += (prop.option, )
+        out, rc = self.command_execute(*command_get, poll=poll)
 
         if rc != 0:
-            raise EMCVnxCLICmdError(command_get_lun, rc, out)
+            self._raise_cli_error(command_get, rc, out)
 
         data = {}
         for baseprop in base_properties:
@@ -991,7 +1093,7 @@ class CommandLineHelper(object):
         for prop in adv_properties:
             data[prop.key] = self._get_property_value(out, prop)
 
-        LOG.debug('Return LUN or Pool properties. Data: %s' % data)
+        LOG.debug('Return Object properties. Data: %s', data)
         return data
 
     def _get_property_value(self, out, propertyDescriptor):
@@ -1003,37 +1105,32 @@ class CommandLineHelper(object):
                     return propertyDescriptor.converter(m.group(1))
                 except ValueError:
                     LOG.error(_LE("Invalid value for %(key)s, "
-                                  "value is %(value)s.") %
+                                  "value is %(value)s."),
                               {'key': propertyDescriptor.key,
                                'value': m.group(1)})
                     return None
             else:
                 return m.group(1)
         else:
-            LOG.debug('%s value is not found in the output.'
-                      propertyDescriptor.label)
+            LOG.debug('%s value is not found in the output.',
+                      propertyDescriptor.label)
             return None
 
-    @log_enter_exit
     def check_lun_has_snap(self, lun_id):
         cmd = ('snap', '-list', '-res', lun_id)
-        rc = self.command_execute(*cmd)[1]
+        rc = self.command_execute(*cmd, poll=False)[1]
         if rc == 0:
-            LOG.debug("Find snapshots for %s." % lun_id)
+            LOG.debug("Find snapshots for %s.", lun_id)
             return True
         else:
             return False
 
-    # Return a pool list
-    @log_enter_exit
-    def get_pool_list(self, no_poll=False):
+    def get_pool_list(self, poll=True):
         temp_cache = []
-        cmd = ('-np', 'storagepool', '-list', '-availableCap', '-state') \
-            if no_poll \
-            else ('storagepool', '-list', '-availableCap', '-state')
-        out, rc = self.command_execute(*cmd)
+        cmd = ('storagepool', '-list', '-availableCap', '-state')
+        out, rc = self.command_execute(*cmd, poll=poll)
         if rc != 0:
-            raise EMCVnxCLICmdError(cmd, rc, out)
+            self._raise_cli_error(cmd, rc, out)
 
         try:
             for pool in out.split('\n\n'):
@@ -1045,39 +1142,37 @@ class CommandLineHelper(object):
                     pool, self.POOL_FREE_CAPACITY)
                 temp_cache.append(obj)
         except Exception as ex:
-            LOG.error(_LE("Error happened during storage pool querying, %s.")
-                      ex)
+            LOG.error(_LE("Error happened during storage pool querying, %s."),
+                      ex)
             # NOTE: Do not want to continue raise the exception
             # as the pools may temporarly unavailable
             pass
         return temp_cache
 
-    @log_enter_exit
-    def get_array_serial(self, no_poll=False):
+    def get_array_serial(self, poll=False):
         """return array Serial No for pool backend."""
         data = {'array_serial': 'unknown'}
 
-        command_get_array_serial = ('-np', 'getagent', '-serial') \
-            if no_poll else ('getagent', '-serial')
+        command_get_array_serial = ('getagent', '-serial')
         # Set the property timeout to get array serial
-        out, rc = self.command_execute(*command_get_array_serial)
+        out, rc = self.command_execute(*command_get_array_serial,
+                                       poll=poll)
         if 0 == rc:
             m = re.search(r'Serial No:\s+(\w+)', out)
             if m:
                 data['array_serial'] = m.group(1)
             else:
-                LOG.warn(_LW("No array serial number returned, "
-                             "set as unknown."))
+                LOG.warning(_LW("No array serial number returned, "
+                                "set as unknown."))
         else:
-            raise EMCVnxCLICmdError(command_get_array_serial, rc, out)
+            self._raise_cli_error(command_get_array_serial, rc, out)
 
         return data
 
-    @log_enter_exit
-    def get_status_up_ports(self, storage_group_name):
+    def get_status_up_ports(self, storage_group_name, poll=True):
         """Function to get ports whose status are up."""
         cmd_get_hba = ('storagegroup', '-list', '-gname', storage_group_name)
-        out, rc = self.command_execute(*cmd_get_hba)
+        out, rc = self.command_execute(*cmd_get_hba, poll=poll)
         wwns = []
         if 0 == rc:
             _re_hba_sp_pair = re.compile('((\w\w:){15}(\w\w)\s*' +
@@ -1088,19 +1183,21 @@ class CommandLineHelper(object):
             cmd_get_port = ('port', '-list', '-sp')
             out, rc = self.command_execute(*cmd_get_port)
             if 0 != rc:
-                raise EMCVnxCLICmdError(cmd_get_port, rc, out)
+                self._raise_cli_error(cmd_get_port, rc, out)
             for i, sp in enumerate(sps):
                 wwn = self.get_port_wwn(sp, portid[i], out)
                 if (wwn is not None) and (wwn not in wwns):
-                    LOG.debug('Add wwn:%(wwn)s for sg:%(sg)s.'
-                              {'wwn': wwn,
-                                 'sg': storage_group_name})
+                    LOG.debug('Add wwn:%(wwn)s for sg:%(sg)s.',
+                              {'wwn': wwn,
+                               'sg': storage_group_name})
                     wwns.append(wwn)
+        elif 83 == rc:
+            LOG.warning(_LW("Storage Group %s is not found."),
+                        storage_group_name)
         else:
-            raise EMCVnxCLICmdError(cmd_get_hba, rc, out)
+            self._raise_cli_error(cmd_get_hba, rc, out)
         return wwns
 
-    @log_enter_exit
     def get_login_ports(self, storage_group_name, connector_wwpns):
 
         cmd_list_hba = ('port', '-list', '-gname', storage_group_name)
@@ -1131,17 +1228,16 @@ class CommandLineHelper(object):
                 if wwn:
                     wwns.append(wwn)
         else:
-            raise EMCVnxCLICmdError(cmd_list_hba, rc, out)
+            self._raise_cli_error(cmd_list_hba, rc, out)
         return wwns
 
-    @log_enter_exit
     def get_port_wwn(self, sp, port_id, allports=None):
         wwn = None
         if allports is None:
             cmd_get_port = ('port', '-list', '-sp')
             out, rc = self.command_execute(*cmd_get_port)
             if 0 != rc:
-                raise EMCVnxCLICmdError(cmd_get_port, rc, out)
+                self._raise_cli_error(cmd_get_port, rc, out)
             else:
                 allports = out
         _re_port_wwn = re.compile('SP Name:\s*' + sp +
@@ -1154,12 +1250,11 @@ class CommandLineHelper(object):
             wwn = _obj_search.group(1).replace(':', '')[16:]
         return wwn
 
-    @log_enter_exit
     def get_fc_targets(self):
         fc_getport = ('port', '-list', '-sp')
         out, rc = self.command_execute(*fc_getport)
         if rc != 0:
-            raise EMCVnxCLICmdError(fc_getport, rc, out)
+            self._raise_cli_error(fc_getport, rc, out)
 
         fc_target_dict = {'A': [], 'B': []}
 
@@ -1176,12 +1271,11 @@ class CommandLineHelper(object):
                                        'Port ID': sp_port_id})
         return fc_target_dict
 
-    @log_enter_exit
-    def get_iscsi_targets(self):
+    def get_iscsi_targets(self, poll=True):
         cmd_getport = ('connection', '-getport', '-address', '-vlanid')
-        out, rc = self.command_execute(*cmd_getport)
+        out, rc = self.command_execute(*cmd_getport, poll=poll)
         if rc != 0:
-            raise EMCVnxCLICmdError(cmd_getport, rc, out)
+            self._raise_cli_error(cmd_getport, rc, out)
 
         iscsi_target_dict = {'A': [], 'B': []}
         iscsi_spport_pat = r'(A|B)\s*' + \
@@ -1213,31 +1307,25 @@ class CommandLineHelper(object):
 
         return iscsi_target_dict
 
-    @log_enter_exit
-    def get_registered_spport_set(self, initiator_iqn, sgname):
-        sg_list = ('storagegroup', '-list', '-gname', sgname)
-        out, rc = self.command_execute(*sg_list)
+    def get_registered_spport_set(self, initiator_iqn, sgname, sg_raw_out):
         spport_set = set()
-        if rc == 0:
-            for m_spport in re.finditer(r'\n\s+%s\s+SP\s(A|B)\s+(\d+)' %
-                                        initiator_iqn,
-                                        out,
-                                        flags=re.IGNORECASE):
-                spport_set.add((m_spport.group(1), int(m_spport.group(2))))
-                LOG.debug('See path %(path)s in %(sg)s'
-                          % ({'path': m_spport.group(0),
-                              'sg': sgname}))
-        else:
-            raise EMCVnxCLICmdError(sg_list, rc, out)
+        for m_spport in re.finditer(r'\n\s+%s\s+SP\s(A|B)\s+(\d+)' %
+                                    initiator_iqn,
+                                    sg_raw_out,
+                                    flags=re.IGNORECASE):
+            spport_set.add((m_spport.group(1), int(m_spport.group(2))))
+            LOG.debug('See path %(path)s in %(sg)s',
+                      {'path': m_spport.group(0),
+                       'sg': sgname})
         return spport_set
 
-    @log_enter_exit
     def ping_node(self, target_portal, initiator_ip):
         connection_pingnode = ('connection', '-pingnode', '-sp',
                                target_portal['SP'], '-portid',
                                target_portal['Port ID'], '-vportid',
                                target_portal['Virtual Port ID'],
-                               '-address', initiator_ip)
+                               '-address', initiator_ip,
+                               '-count', '1')
         out, rc = self.command_execute(*connection_pingnode)
         if rc == 0:
             ping_ok = re.compile(r'Reply from %s' % initiator_ip)
@@ -1245,13 +1333,14 @@ class CommandLineHelper(object):
                 LOG.debug("See available iSCSI target: %s",
                           connection_pingnode)
                 return True
-        LOG.warn(_LW("See unavailable iSCSI target: %s"), connection_pingnode)
+        LOG.warning(_LW("See unavailable iSCSI target: %s"),
+                    connection_pingnode)
         return False
 
-    @log_enter_exit
     def find_avaialable_iscsi_target_one(self, hostname,
                                          preferred_sp,
-                                         registered_spport_set):
+                                         registered_spport_set,
+                                         all_iscsi_targets):
         if self.iscsi_initiator_map and hostname in self.iscsi_initiator_map:
             iscsi_initiator_ips = list(self.iscsi_initiator_map[hostname])
             random.shuffle(iscsi_initiator_ips)
@@ -1263,17 +1352,16 @@ class CommandLineHelper(object):
         else:
             target_sps = ('B', 'A')
 
-        iscsi_targets = self.get_iscsi_targets()
         for target_sp in target_sps:
-            target_portals = list(iscsi_targets[target_sp])
+            target_portals = list(all_iscsi_targets[target_sp])
             random.shuffle(target_portals)
             for target_portal in target_portals:
                 spport = (target_portal['SP'], target_portal['Port ID'])
                 if spport not in registered_spport_set:
                     LOG.debug("Skip SP Port %(port)s since "
-                              "no path from %(host)s is through it"
-                              {'port': spport,
-                                 'host': hostname})
+                              "no path from %(host)s is through it",
+                              {'port': spport,
+                               'host': hostname})
                     continue
                 if iscsi_initiator_ips is not None:
                     for initiator_ip in iscsi_initiator_ips:
@@ -1281,9 +1369,9 @@ class CommandLineHelper(object):
                             return target_portal
                 else:
                     LOG.debug("No iSCSI IP address of %(hostname)s is known. "
-                              "Return a random iSCSI target portal %(portal)s."
-                              %
-                              {'hostname': hostname, 'portal': target_portal})
+                              "Return a random target portal %(portal)s.",
+                              {'hostname': hostname,
+                               'portal': target_portal})
                     return target_portal
 
         return None
@@ -1291,38 +1379,33 @@ class CommandLineHelper(object):
     def _is_sp_unavailable_error(self, out):
         error_pattern = '(^Error.*Message.*End of data stream.*)|'\
                         '(.*Message.*connection refused.*)|'\
-                        '(^Error.*Message.*Service Unavailable.*)'
+                        '(^Error.*Message.*Service Unavailable.*)|'\
+                        '(^A network error occurred while trying to'\
+                        ' connect.* )|'\
+                        '(^Exception: Error occurred because of time out\s*)'
         pattern = re.compile(error_pattern)
         return pattern.match(out)
 
-    @log_enter_exit
     def command_execute(self, *command, **kwargv):
+        """Executes command against the VNX array.
+
+        When there is named parameter poll=False, the command will be sent
+        alone with option -np.
+        """
         # NOTE: retry_disable need to be removed from kwargv
         # before it pass to utils.execute, otherwise exception will thrown
         retry_disable = kwargv.pop('retry_disable', False)
-        if self._is_sp_alive(self.active_storage_ip):
-            out, rc = self._command_execute_on_active_ip(*command, **kwargv)
-            if not retry_disable and self._is_sp_unavailable_error(out):
-                # When active sp is unavailble, swith to another sp
-                # and set it to active
-                if self._toggle_sp():
-                    LOG.debug('EMC: Command Exception: %(rc) %(result)s. '
-                              'Retry on another SP.' % {'rc': rc,
-                                                        'result': out})
-                    out, rc = self._command_execute_on_active_ip(*command,
-                                                                 **kwargv)
-        elif self._toggle_sp() and not retry_disable:
-            # If active ip is not accessible, toggled to another sp
-            out, rc = self._command_execute_on_active_ip(*command, **kwargv)
-        else:
-            # Active IP is inaccessible, and cannot toggle to another SP,
-            # return Error
-            out, rc = "Server Unavailable", 255
-
-        LOG.debug('EMC: Command: %(command)s.'
-                  % {'command': self.command + command})
-        LOG.debug('EMC: Command Result: %(result)s.' %
-                  {'result': out.replace('\n', '\\n')})
+        out, rc = self._command_execute_on_active_ip(*command, **kwargv)
+        if not retry_disable and self._is_sp_unavailable_error(out):
+            # When active sp is unavailble, swith to another sp
+            # and set it to active and force a poll
+            if self._toggle_sp():
+                LOG.debug('EMC: Command Exception: %(rc) %(result)s. '
+                          'Retry on another SP.', {'rc': rc,
+                                                   'result': out})
+                kwargv['poll'] = True
+                out, rc = self._command_execute_on_active_ip(*command,
+                                                             **kwargv)
 
         return out, rc
 
@@ -1331,6 +1414,10 @@ class CommandLineHelper(object):
             kwargv["check_exit_code"] = True
         rc = 0
         out = ""
+        need_poll = kwargv.pop('poll', True)
+        if "-np" not in command and not need_poll:
+            command = ("-np",) + command
+
         try:
             active_ip = (self.active_storage_ip,)
             out, err = utils.execute(
@@ -1343,6 +1430,11 @@ class CommandLineHelper(object):
             rc = pe.exit_code
             out = pe.stdout
             out = out.replace('\n', '\\n')
+
+        LOG.debug('EMC: Command: %(command)s. Result: %(result)s.',
+                  {'command': self.command + active_ip + command,
+                   'result': out.replace('\n', '\\n')})
+
         return out, rc
 
     def _is_sp_alive(self, ipaddr):
@@ -1354,9 +1446,9 @@ class CommandLineHelper(object):
             out = pe.stdout
             rc = pe.exit_code
             if rc != 0:
-                LOG.debug('%s is unavaialbe' % ipaddr)
+                LOG.debug('%s is unavaialbe', ipaddr)
                 return False
-        LOG.debug('Ping SP %(spip)s Command Result: %(result)s.' %
+        LOG.debug('Ping SP %(spip)s Command Result: %(result)s.',
                   {'spip': self.active_storage_ip, 'result': out})
         return True
 
@@ -1373,32 +1465,29 @@ class CommandLineHelper(object):
             self.primary_storage_ip
 
         LOG.info(_LI('Toggle storage_vnx_ip_address from %(old)s to '
-                     '%(new)s.') %
+                     '%(new)s.'),
                  {'old': old_ip,
-                  'new': self.primary_storage_ip})
+                  'new': self.active_storage_ip})
         return True
 
-    @log_enter_exit
-    def get_enablers_on_array(self, no_poll=False):
+    def get_enablers_on_array(self, poll=False):
         """The function would get all the enabler installed
         on array.
         """
         enablers = []
-        cmd_list = ('-np', 'ndu', '-list') \
-            if no_poll else ('ndu', '-list')
-        out, rc = self.command_execute(*cmd_list)
+        cmd_list = ('ndu', '-list')
+        out, rc = self.command_execute(*cmd_list, poll=poll)
 
         if rc != 0:
-            raise EMCVnxCLICmdError(cmd_list, rc, out)
+            self._raise_cli_error(cmd_list, rc, out)
         else:
             enabler_pat = r'Name of the software package:\s*(\S+)\s*'
             for m in re.finditer(enabler_pat, out):
                 enablers.append(m.groups()[0])
 
-        LOG.debug('Enablers on array %s.' % enablers)
+        LOG.debug('Enablers on array %s.', enablers)
         return enablers
 
-    @log_enter_exit
     def enable_or_disable_compression_on_lun(self, volumename, compression):
         """The function will enable or disable the compression
         on lun
@@ -1412,14 +1501,39 @@ class CommandLineHelper(object):
         out, rc = self.command_execute(*command_compression_cmd)
 
         if 0 != rc:
-            raise EMCVnxCLICmdError(command_compression_cmd, rc, out)
+            self._raise_cli_error(command_compression_cmd, rc, out)
         return rc, out
 
+    def deregister_initiator(self, initiator_uid):
+        """This function tries to deregister initiators on VNX."""
+        command_deregister = ('port', '-removeHBA',
+                              '-hbauid', initiator_uid,
+                              '-o')
+        out, rc = self.command_execute(*command_deregister)
+        return rc, out
 
+    def is_pool_fastcache_enabled(self, storage_pool, poll=False):
+        command_check_fastcache = ('storagepool', '-list', '-name',
+                                   storage_pool, '-fastcache')
+        out, rc = self.command_execute(*command_check_fastcache, poll=poll)
+
+        if 0 != rc:
+            self._raise_cli_error(command_check_fastcache, rc, out)
+        else:
+            re_fastcache = 'FAST Cache:\s*(.*)\s*'
+            m = re.search(re_fastcache, out)
+            if m is not None:
+                result = True if 'Enabled' == m.group(1) else False
+            else:
+                LOG.error(_LE("Error parsing output for FastCache Command."))
+        return result
+
+
+@decorate_all_methods(log_enter_exit)
 class EMCVnxCliBase(object):
     """This class defines the functions to use the native CLI functionality."""
 
-    VERSION = '04.01.00'
+    VERSION = '05.00.00'
     stats = {'driver_version': VERSION,
              'free_capacity_gb': 'unknown',
              'reserved_percentage': 0,
@@ -1436,10 +1550,12 @@ class EMCVnxCliBase(object):
     def __init__(self, prtcl, configuration=None):
         self.protocol = prtcl
         self.configuration = configuration
-        self.timeout = self.configuration.default_timeout * 60
         self.max_luns_per_sg = self.configuration.max_luns_per_storage_group
         self.destroy_empty_sg = self.configuration.destroy_empty_storage_group
         self.itor_auto_reg = self.configuration.initiator_auto_registration
+        self.itor_auto_dereg = self.configuration.initiator_auto_deregistration
+        self.check_max_pool_luns_threshold = (
+            self.configuration.check_max_pool_luns_threshold)
         # if zoning_mode is fabric, use lookup service to build itor_tgt_map
         self.zonemanager_lookup_service = None
         zm_conf = Configuration(manager.volume_manager_opts)
@@ -1451,9 +1567,9 @@ class EMCVnxCliBase(object):
                 FCSanLookupService(configuration=configuration)
         self.max_retries = 5
         if self.destroy_empty_sg:
-            LOG.warn(_LW("destroy_empty_storage_group: True. "
-                         "Empty storage group will be deleted "
-                         "after volume is detached."))
+            LOG.warning(_LW("destroy_empty_storage_group: True. "
+                            "Empty storage group will be deleted "
+                            "after volume is detached."))
         if not self.itor_auto_reg:
             LOG.info(_LI("initiator_auto_registration: False. "
                          "Initiator auto registration is not enabled. "
@@ -1461,6 +1577,13 @@ class EMCVnxCliBase(object):
         self.hlu_set = set(xrange(1, self.max_luns_per_sg + 1))
         self._client = CommandLineHelper(self.configuration)
         self.array_serial = None
+        if self.protocol == 'iSCSI':
+            self.iscsi_targets = self._client.get_iscsi_targets(poll=True)
+        self.hlu_cache = {}
+        self.force_delete_lun_in_sg = (
+            self.configuration.force_delete_lun_in_storagegroup)
+        if self.force_delete_lun_in_sg:
+            LOG.warning(_LW("force_delete_lun_in_storagegroup=True"))
 
     def get_target_storagepool(self, volume, source_volume_name=None):
         raise NotImplementedError
@@ -1473,17 +1596,42 @@ class EMCVnxCliBase(object):
             self.array_serial = self._client.get_array_serial()
         return self.array_serial['array_serial']
 
-    @log_enter_exit
+    def _construct_store_spec(self, volume, snapshot):
+            if snapshot['cgsnapshot_id']:
+                snapshot_name = snapshot['cgsnapshot_id']
+            else:
+                snapshot_name = snapshot['name']
+            source_volume_name = snapshot['volume_name']
+            volume_name = volume['name']
+            volume_size = snapshot['volume_size']
+            dest_volume_name = volume_name + '_dest'
+
+            pool_name = self.get_target_storagepool(volume, source_volume_name)
+            specs = self.get_volumetype_extraspecs(volume)
+            provisioning, tiering = self._get_extra_spec_value(specs)
+            store_spec = {
+                'source_vol_name': source_volume_name,
+                'volume': volume,
+                'snap_name': snapshot_name,
+                'dest_vol_name': dest_volume_name,
+                'pool_name': pool_name,
+                'provisioning': provisioning,
+                'tiering': tiering,
+                'volume_size': volume_size,
+                'client': self._client
+            }
+            return store_spec
+
     def create_volume(self, volume):
         """Creates a EMC volume."""
-        volumesize = volume['size']
-        volumename = volume['name']
+        volume_size = volume['size']
+        volume_name = volume['name']
 
         self._volume_creation_check(volume)
         # defining CLI command
         specs = self.get_volumetype_extraspecs(volume)
         pool = self.get_target_storagepool(volume)
-        provisioning, tiering = self.get_extra_spec_value(specs)
+        provisioning, tiering = self._get_extra_spec_value(specs)
 
         if not provisioning:
             provisioning = 'thick'
@@ -1491,16 +1639,16 @@ class EMCVnxCliBase(object):
         LOG.info(_LI('Create Volume: %(volume)s  Size: %(size)s '
                      'pool: %(pool)s '
                      'provisioning: %(provisioning)s '
-                     'tiering: %(tiering)s.')
-                 % {'volume': volumename,
-                    'size': volumesize,
-                    'pool': pool,
-                    'provisioning': provisioning,
-                    'tiering': tiering})
+                     'tiering: %(tiering)s.'),
+                 {'volume': volume_name,
+                  'size': volume_size,
+                  'pool': pool,
+                  'provisioning': provisioning,
+                  'tiering': tiering})
 
         data = self._client.create_lun_with_advance_feature(
-            pool, volumename, volumesize,
-            provisioning, tiering, volume['consistencygroup_id'])
+            pool, volume_name, volume_size,
+            provisioning, tiering, volume['consistencygroup_id'], False)
         pl_dict = {'system': self.get_array_serial(),
                    'type': 'lun',
                    'id': str(data['lun_id'])}
@@ -1517,23 +1665,23 @@ class EMCVnxCliBase(object):
         """
 
         specs = self.get_volumetype_extraspecs(volume)
-        provisioning, tiering = self.get_extra_spec_value(specs)
+        provisioning, tiering = self._get_extra_spec_value(specs)
 
         # step 1: check extra spec value
         if provisioning:
-            self.check_extra_spec_value(
+            self._check_extra_spec_value(
                 provisioning,
                 self._client.provisioning_values.keys())
         if tiering:
-            self.check_extra_spec_value(
+            self._check_extra_spec_value(
                 tiering,
                 self._client.tiering_values.keys())
 
         # step 2: check extra spec combination
-        self.check_extra_spec_combination(specs)
+        self._check_extra_spec_combination(specs)
 
-    def check_extra_spec_value(self, extra_spec, valid_values):
-        """check whether an extra spec's value is valid."""
+    def _check_extra_spec_value(self, extra_spec, valid_values):
+        """Checks whether an extra spec's value is valid."""
 
         if not extra_spec or not valid_values:
             LOG.error(_LE('The given extra_spec or valid_values is None.'))
@@ -1543,8 +1691,8 @@ class EMCVnxCliBase(object):
             raise exception.VolumeBackendAPIException(data=msg)
         return
 
-    def get_extra_spec_value(self, extra_specs):
-        """get EMC extra spec values."""
+    def _get_extra_spec_value(self, extra_specs):
+        """Gets EMC extra spec values."""
         provisioning = 'thick'
         tiering = None
 
@@ -1555,10 +1703,10 @@ class EMCVnxCliBase(object):
 
         return provisioning, tiering
 
-    def check_extra_spec_combination(self, extra_specs):
-        """check whether extra spec combination is valid."""
+    def _check_extra_spec_combination(self, extra_specs):
+        """Checks whether extra spec combination is valid."""
 
-        provisioning, tiering = self.get_extra_spec_value(extra_specs)
+        provisioning, tiering = self._get_extra_spec_value(extra_specs)
         enablers = self.enablers
 
         # check provisioning and tiering
@@ -1591,12 +1739,29 @@ class EMCVnxCliBase(object):
             raise exception.VolumeBackendAPIException(data=msg)
         return
 
-    @log_enter_exit
     def delete_volume(self, volume):
         """Deletes an EMC volume."""
-        self._client.delete_lun(volume['name'])
+        try:
+            self._client.delete_lun(volume['name'])
+        except EMCVnxCLICmdError as ex:
+            orig_out = "\n".join(ex.kwargs["out"])
+            if (self.force_delete_lun_in_sg and
+                    (self._client.CLI_RESP_PATTERN_LUN_IN_SG_1 in orig_out or
+                     self._client.CLI_RESP_PATTERN_LUN_IN_SG_2 in orig_out)):
+                LOG.warning(_LW('LUN corresponding to %s is still '
+                                'in some Storage Groups.'
+                                'Try to bring the LUN out of Storage Groups '
+                                'and retry the deletion.'),
+                            volume['name'])
+                lun_id = self.get_lun_id(volume)
+                for hlu, sg in self._client.get_hlus(lun_id):
+                    self._client.remove_hlu_from_storagegroup(hlu, sg)
+                self._client.delete_lun(volume['name'])
+            else:
+                with excutils.save_and_reraise_exception():
+                    # Reraise the original exceiption
+                    pass
 
-    @log_enter_exit
     def extend_volume(self, volume, new_size):
         """Extends an EMC volume."""
         self._client.expand_lun_and_wait(volume['name'], new_size)
@@ -1614,21 +1779,21 @@ class EMCVnxCliBase(object):
         false_ret = (False, None)
 
         if 'location_info' not in host['capabilities']:
-            LOG.warn(_LW("Failed to get target_pool_name and "
-                         "target_array_serial. 'location_info' "
-                         "is not in host['capabilities']."))
+            LOG.warning(_LW("Failed to get target_pool_name and "
+                            "target_array_serial. 'location_info' "
+                            "is not in host['capabilities']."))
             return false_ret
 
         # mandatory info should be ok
         info = host['capabilities']['location_info']
-        LOG.debug("Host for migration is %s." % info)
+        LOG.debug("Host for migration is %s.", info)
         try:
             info_detail = info.split('|')
             target_pool_name = info_detail[0]
             target_array_serial = info_detail[1]
         except AttributeError:
-            LOG.warn(_LW("Error on parsing target_pool_name/"
-                         "target_array_serial."))
+            LOG.warning(_LW("Error on parsing target_pool_name/"
+                            "target_array_serial."))
             return false_ret
 
         if len(target_pool_name) == 0:
@@ -1649,8 +1814,8 @@ class EMCVnxCliBase(object):
                       "it doesn't support array backend .")
             return false_ret
         # source and destination should be on same array
-        array_serial = self._client.get_array_serial()
-        if target_array_serial != array_serial['array_serial']:
+        array_serial = self.get_array_serial()
+        if target_array_serial != array_serial:
             LOG.debug('Skip storage-assisted migration because '
                       'target and source backend are not managing'
                       'the same array.')
@@ -1660,12 +1825,11 @@ class EMCVnxCliBase(object):
                 and self._get_original_status(volume) == 'in-use':
             LOG.debug('Skip storage-assisted migration because '
                       'in-use volume can not be '
-                      'migrate between diff protocol.')
+                      'migrate between different protocols.')
             return false_ret
 
         return (True, target_pool_name)
 
-    @log_enter_exit
     def migrate_volume(self, ctxt, volume, host, new_type=None):
         """Leverage the VNX on-array migration functionality.
 
@@ -1691,38 +1855,37 @@ class EMCVnxCliBase(object):
         provisioning = 'thick'
         tiering = None
         if new_type:
-            provisioning, tiering = self.get_extra_spec_value(
+            provisioning, tiering = self._get_extra_spec_value(
                 new_type['extra_specs'])
         else:
-            provisioning, tiering = self.get_extra_spec_value(
+            provisioning, tiering = self._get_extra_spec_value(
                 self.get_volumetype_extraspecs(volume))
 
-        self._client.create_lun_with_advance_feature(
+        data = self._client.create_lun_with_advance_feature(
             target_pool_name, new_volume_name, volume['size'],
             provisioning, tiering)
 
-        dst_id = self.get_lun_id_by_name(new_volume_name)
+        dst_id = data['lun_id']
         moved = self._client.migrate_lun_with_verification(
             src_id, dst_id, new_volume_name)
 
         return moved, {}
 
-    @log_enter_exit
     def retype(self, ctxt, volume, new_type, diff, host):
         new_specs = new_type['extra_specs']
-        new_provisioning, new_tiering = self.get_extra_spec_value(
+        new_provisioning, new_tiering = self._get_extra_spec_value(
             new_specs)
 
         # validate new_type
         if new_provisioning:
-            self.check_extra_spec_value(
+            self._check_extra_spec_value(
                 new_provisioning,
                 self._client.provisioning_values.keys())
         if new_tiering:
-            self.check_extra_spec_value(
+            self._check_extra_spec_value(
                 new_tiering,
                 self._client.tiering_values.keys())
-        self.check_extra_spec_combination(new_specs)
+        self._check_extra_spec_combination(new_specs)
 
         # check what changes are needed
         migration, tiering_change = self.determine_changes_when_retype(
@@ -1745,14 +1908,14 @@ class EMCVnxCliBase(object):
                         volume, target_pool_name, new_type)[0]:
                     return True
                 else:
-                    LOG.warn(_LW('Storage-assisted migration failed during '
-                                 'retype.'))
+                    LOG.warning(_LW('Storage-assisted migration failed during '
+                                    'retype.'))
                     return False
             else:
                 # migration is invalid
                 LOG.debug('Driver is not able to do retype due to '
                           'storage-assisted migration is not valid '
-                          'in this stuation.')
+                          'in this situation.')
                 return False
         elif not migration and tiering_change:
             # modify lun to change tiering policy
@@ -1766,14 +1929,14 @@ class EMCVnxCliBase(object):
         tiering_change = False
 
         old_specs = self.get_volumetype_extraspecs(volume)
-        old_provisioning, old_tiering = self.get_extra_spec_value(
+        old_provisioning, old_tiering = self._get_extra_spec_value(
             old_specs)
         old_pool = self.get_specific_extra_spec(
             old_specs,
             self._client.pool_spec)
 
         new_specs = new_type['extra_specs']
-        new_provisioning, new_tiering = self.get_extra_spec_value(
+        new_provisioning, new_tiering = self._get_extra_spec_value(
             new_specs)
         new_pool = self.get_specific_extra_spec(
             new_specs,
@@ -1803,13 +1966,12 @@ class EMCVnxCliBase(object):
                 return False
         return True
 
-    @log_enter_exit
     def update_volume_stats(self):
         """Update the common status share with pool and
         array backend.
         """
         if not self.determine_all_enablers_exist(self.enablers):
-            self.enablers = self._client.get_enablers_on_array(NO_POLL)
+            self.enablers = self._client.get_enablers_on_array()
         if '-Compression' in self.enablers:
             self.stats['compression_support'] = 'True'
         else:
@@ -1835,103 +1997,70 @@ class EMCVnxCliBase(object):
         else:
             self.stats['consistencygroup_support'] = 'False'
 
-        return self.stats
-
-    @log_enter_exit
-    def create_export(self, context, volume):
-        """Driver entry point to get the export info for a new volume."""
-        volumename = volume['name']
-
-        data = self._client.get_lun_by_name(volumename)
-
-        device_id = data['lun_id']
-
-        LOG.debug('Exiting EMCVnxCliBase.create_export: Volume: %(volume)s '
-                  'Device ID: %(device_id)s'
-                  % {'volume': volumename,
-                     'device_id': device_id})
+        if self.protocol == 'iSCSI':
+            self.iscsi_targets = self._client.get_iscsi_targets(poll=False)
 
-        return {'provider_location': device_id}
+        return self.stats
 
-    @log_enter_exit
     def create_snapshot(self, snapshot):
         """Creates a snapshot."""
 
-        snapshotname = snapshot['name']
-        volumename = snapshot['volume_name']
-
-        LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s')
-                 % {'snapshot': snapshotname,
-                    'volume': volumename})
-
-        self._client.create_snapshot(volumename, snapshotname)
+        snapshot_name = snapshot['name']
+        volume_name = snapshot['volume_name']
+        volume = snapshot['volume']
+        LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'),
+                 {'snapshot': snapshot_name,
+                  'volume': volume_name})
+        lun_id = self.get_lun_id(volume)
+        self._client.create_snapshot(lun_id, snapshot_name)
 
-    @log_enter_exit
     def delete_snapshot(self, snapshot):
         """Deletes a snapshot."""
 
-        snapshotname = snapshot['name']
+        snapshot_name = snapshot['name']
 
-        LOG.info(_LI('Delete Snapshot: %(snapshot)s')
-                 % {'snapshot': snapshotname})
+        LOG.info(_LI('Delete Snapshot: %(snapshot)s'),
+                 {'snapshot': snapshot_name})
 
-        self._client.delete_snapshot(snapshotname)
+        self._client.delete_snapshot(snapshot_name)
 
-    @log_enter_exit
     def create_volume_from_snapshot(self, volume, snapshot):
-        """Creates a volume from a snapshot."""
-        if snapshot['cgsnapshot_id']:
-            snapshot_name = snapshot['cgsnapshot_id']
-        else:
-            snapshot_name = snapshot['name']
-        source_volume_name = snapshot['volume_name']
-        volume_name = volume['name']
-        volume_size = snapshot['volume_size']
-
-        # defining CLI command
-        self._client.create_mount_point(source_volume_name, volume_name)
-
-        # defining CLI command
-        self._client.attach_mount_point(volume_name, snapshot_name)
-
-        dest_volume_name = volume_name + '_dest'
-
-        LOG.debug('Creating Temporary Volume: %s ' % dest_volume_name)
-        pool_name = self.get_target_storagepool(volume, source_volume_name)
-        try:
-            self._volume_creation_check(volume)
-            specs = self.get_volumetype_extraspecs(volume)
-            provisioning, tiering = self.get_extra_spec_value(specs)
-            self._client.create_lun_with_advance_feature(
-                pool_name, dest_volume_name, volume_size,
-                provisioning, tiering)
-        except exception.VolumeBackendAPIException as ex:
-            msg = (_('Command to create the temporary Volume %s failed')
-                   % dest_volume_name)
-            LOG.error(msg)
-            raise ex
+        """Constructs a work flow to create a volume from snapshot.
 
-        source_vol_lun_id = self.get_lun_id(volume)
-        temp_vol_lun_id = self.get_lun_id_by_name(dest_volume_name)
+        This flow will do the following:
 
-        LOG.debug('Migrating Mount Point Volume: %s ' % volume_name)
-        self._client.migrate_lun_with_verification(source_vol_lun_id,
-                                                   temp_vol_lun_id,
-                                                   dest_volume_name)
-        self._client.check_smp_not_attached(volume_name)
-        data = self._client.get_lun_by_name(volume_name)
-        pl_dict = {'system': self.get_array_serial(),
+        1. Create a snap mount point (SMP) for the snapshot.
+        2. Attach the snapshot to the SMP created in the first step.
+        3. Create a temporary lun prepare for migration.
+        4. Start a migration between the SMP and the temp lun.
+        """
+        self._volume_creation_check(volume)
+        array_serial = self.get_array_serial()
+        flow_name = 'create_volume_from_snapshot'
+        work_flow = linear_flow.Flow(flow_name)
+        store_spec = self._construct_store_spec(volume, snapshot)
+        work_flow.add(CreateSMPTask(),
+                      AttachSnapTask(),
+                      CreateDestLunTask(),
+                      MigrateLunTask())
+        flow_engine = taskflow.engines.load(work_flow,
+                                            store=store_spec)
+        flow_engine.run()
+        new_lun_id = flow_engine.storage.fetch('new_lun_id')
+        pl_dict = {'system': array_serial,
                    'type': 'lun',
-                   'id': str(data['lun_id'])}
+                   'id': str(new_lun_id)}
         model_update = {'provider_location':
                         self.dumps_provider_location(pl_dict)}
         volume['provider_location'] = model_update['provider_location']
         return model_update
 
-    @log_enter_exit
     def create_cloned_volume(self, volume, src_vref):
         """Creates a clone of the specified volume."""
+        self._volume_creation_check(volume)
+        array_serial = self.get_array_serial()
         source_volume_name = src_vref['name']
+        source_lun_id = self.get_lun_id(src_vref)
         volume_size = src_vref['size']
         consistencygroup_id = src_vref['consistencygroup_id']
         snapshot_name = 'tmp-snap-%s' % volume['id']
@@ -1943,30 +2072,42 @@ class EMCVnxCliBase(object):
             'name': snapshot_name,
             'volume_name': source_volume_name,
             'volume_size': volume_size,
+            'volume': src_vref,
             'cgsnapshot_id': tmp_cgsnapshot_name,
             'consistencygroup_id': consistencygroup_id,
             'id': tmp_cgsnapshot_name
         }
-        # Create temp Snapshot
-        if consistencygroup_id:
-            self._client.create_cgsnapshot(snapshot)
-        else:
-            self.create_snapshot(snapshot)
-
-        # Create volume
-        model_update = self.create_volume_from_snapshot(volume, snapshot)
+        store_spec = self._construct_store_spec(volume, snapshot)
+        flow_name = 'create_cloned_volume'
+        work_flow = linear_flow.Flow(flow_name)
+        store_spec.update({'snapshot': snapshot})
+        store_spec.update({'source_lun_id': source_lun_id})
+        work_flow.add(CreateSnapshotTask(),
+                      CreateSMPTask(),
+                      AttachSnapTask(),
+                      CreateDestLunTask(),
+                      MigrateLunTask())
+        flow_engine = taskflow.engines.load(work_flow,
+                                            store=store_spec)
+        flow_engine.run()
+        new_lun_id = flow_engine.storage.fetch('new_lun_id')
         # Delete temp Snapshot
         if consistencygroup_id:
             self._client.delete_cgsnapshot(snapshot)
         else:
             self.delete_snapshot(snapshot)
+
+        pl_dict = {'system': array_serial,
+                   'type': 'lun',
+                   'id': str(new_lun_id)}
+        model_update = {'provider_location':
+                        self.dumps_provider_location(pl_dict)}
         return model_update
 
-    @log_enter_exit
     def create_consistencygroup(self, context, group):
-        """Create a consistency group."""
+        """Creates a consistency group."""
         LOG.info(_LI('Start to create consistency group: %(group_name)s '
-                     'id: %(id)s') %
+                     'id: %(id)s'),
                  {'group_name': group['name'], 'id': group['id']})
 
         model_update = {'status': 'available'}
@@ -1974,22 +2115,20 @@ class EMCVnxCliBase(object):
             self._client.create_consistencygroup(context, group)
         except Exception:
             with excutils.save_and_reraise_exception():
-                msg = (_('Create consistency group %s failed.')
-                       % group['id'])
-                LOG.error(msg)
+                LOG.error(_LE('Create consistency group %s failed.'),
+                          group['id'])
 
         return model_update
 
-    @log_enter_exit
     def delete_consistencygroup(self, driver, context, group):
-        """Delete a consistency group."""
+        """Deletes a consistency group."""
         cg_name = group['id']
         volumes = driver.db.volume_get_all_by_group(context, group['id'])
 
         model_update = {}
         model_update['status'] = group['status']
-        LOG.info(_LI('Start to delete consistency group: %(cg_name)s')
-                 {'cg_name': cg_name})
+        LOG.info(_LI('Start to delete consistency group: %(cg_name)s'),
+                 {'cg_name': cg_name})
         try:
             self._client.delete_consistencygroup(cg_name)
         except Exception:
@@ -2008,16 +2147,15 @@ class EMCVnxCliBase(object):
 
         return model_update, volumes
 
-    @log_enter_exit
     def create_cgsnapshot(self, driver, context, cgsnapshot):
-        """Create a cgsnapshot (snap group)."""
+        """Creates a cgsnapshot (snap group)."""
         cgsnapshot_id = cgsnapshot['id']
         snapshots = driver.db.snapshot_get_all_for_cgsnapshot(
             context, cgsnapshot_id)
 
         model_update = {}
         LOG.info(_LI('Start to create cgsnapshot for consistency group'
-                     ': %(group_name)s') %
+                     ': %(group_name)s'),
                  {'group_name': cgsnapshot['consistencygroup_id']})
 
         try:
@@ -2026,17 +2164,15 @@ class EMCVnxCliBase(object):
                 snapshot['status'] = 'available'
         except Exception:
             with excutils.save_and_reraise_exception():
-                msg = (_('Create cg snapshot %s failed.')
-                       % cgsnapshot_id)
-                LOG.error(msg)
+                LOG.error(_LE('Create cg snapshot %s failed.'),
+                          cgsnapshot_id)
 
         model_update['status'] = 'available'
 
         return model_update, snapshots
 
-    @log_enter_exit
     def delete_cgsnapshot(self, driver, context, cgsnapshot):
-        """delete a cgsnapshot (snap group)."""
+        """Deletes a cgsnapshot (snap group)."""
         cgsnapshot_id = cgsnapshot['id']
         snapshots = driver.db.snapshot_get_all_for_cgsnapshot(
             context, cgsnapshot_id)
@@ -2044,7 +2180,7 @@ class EMCVnxCliBase(object):
         model_update = {}
         model_update['status'] = cgsnapshot['status']
         LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
-                     '%(group_name)s') % {'snap_name': cgsnapshot['id'],
+                     '%(group_name)s'), {'snap_name': cgsnapshot['id'],
                  'group_name': cgsnapshot['consistencygroup_id']})
 
         try:
@@ -2053,9 +2189,8 @@ class EMCVnxCliBase(object):
                 snapshot['status'] = 'deleted'
         except Exception:
             with excutils.save_and_reraise_exception():
-                msg = (_('Delete cgsnapshot %s failed.')
-                       % cgsnapshot_id)
-                LOG.error(msg)
+                LOG.error(_LE('Delete cgsnapshot %s failed.'),
+                          cgsnapshot_id)
 
         return model_update, snapshots
 
@@ -2074,9 +2209,9 @@ class EMCVnxCliBase(object):
                           'query it.')
                 lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
         except Exception as ex:
-            LOG.debug('Exception when getting lun id: %s.' % (ex))
+            LOG.debug('Exception when getting lun id: %s.', six.text_type(ex))
             lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
-        LOG.debug('Get lun_id: %s.' % (lun_id))
+        LOG.debug('Get lun_id: %s.', lun_id)
         return lun_id
 
     def get_lun_map(self, storage_group):
@@ -2088,22 +2223,18 @@ class EMCVnxCliBase(object):
         return data['storage_group_uid']
 
     def assure_storage_group(self, storage_group):
-        try:
-            self._client.create_storage_group(storage_group)
-        except EMCVnxCLICmdError as ex:
-            if ex.out.find("Storage Group name already in use") == -1:
-                raise ex
+        self._client.create_storage_group(storage_group)
 
     def assure_host_in_storage_group(self, hostname, storage_group):
         try:
             self._client.connect_host_to_storage_group(hostname, storage_group)
         except EMCVnxCLICmdError as ex:
-            if ex.rc == 83:
+            if ex.kwargs["rc"] == 83:
                 # SG was not created or was destroyed by another concurrent
                 # operation before connected.
                 # Create SG and try to connect again
-                LOG.warn(_LW('Storage Group %s is not found. Create it.'),
-                         storage_group)
+                LOG.warning(_LW('Storage Group %s is not found. Create it.'),
+                            storage_group)
                 self.assure_storage_group(storage_group)
                 self._client.connect_host_to_storage_group(
                     hostname, storage_group)
@@ -2111,31 +2242,13 @@ class EMCVnxCliBase(object):
                 raise ex
         return hostname
 
-    def find_device_details(self, volume, storage_group):
-        """Returns the Host Device number for the volume."""
-
-        host_lun_id = -1
-
-        data = self._client.get_storage_group(storage_group)
-        lun_map = data['lunmap']
-        data = self._client.get_lun_by_name(volume['name'])
-        allocated_lun_id = data['lun_id']
+    def get_lun_owner(self, volume):
+        """Returns SP owner of the volume."""
+        data = self._client.get_lun_by_name(volume['name'],
+                                            poll=False)
         owner_sp = data['owner']
-
-        for lun in lun_map.iterkeys():
-            if lun == int(allocated_lun_id):
-                host_lun_id = lun_map[lun]
-                LOG.debug('Host Lun Id : %s' % (host_lun_id))
-                break
-
-        LOG.debug('Owner SP : %s' % (owner_sp))
-
-        device = {
-            'hostlunid': host_lun_id,
-            'ownersp': owner_sp,
-            'lunmap': lun_map,
-        }
-        return device
+        LOG.debug('Owner SP : %s', owner_sp)
+        return owner_sp
 
     def filter_available_hlu_set(self, used_hlus):
         used_hlu_set = set(used_hlus)
@@ -2178,24 +2291,27 @@ class EMCVnxCliBase(object):
                                  '-spport', port_id, '-spvport', vport_id,
                                  '-ip', ip, '-host', host, '-o')
             out, rc = self._client.command_execute(*cmd_iscsi_setpath)
-            if rc != 0:
-                raise EMCVnxCLICmdError(cmd_iscsi_setpath, rc, out)
         else:
             cmd_fc_setpath = ('storagegroup', '-gname', gname, '-setpath',
                               '-hbauid', initiator_uid, '-sp', sp,
                               '-spport', port_id,
                               '-ip', ip, '-host', host, '-o')
             out, rc = self._client.command_execute(*cmd_fc_setpath)
-            if rc != 0:
-                raise EMCVnxCLICmdError(cmd_fc_setpath, rc, out)
+        if rc != 0:
+            LOG.warning(_LW("Failed to register %(itor)s to SP%(sp)s "
+                            "port %(portid)s because: %(msg)s."),
+                        {'itor': initiator_uid,
+                         'sp': sp,
+                         'portid': port_id,
+                         'msg': out})
 
     def _register_iscsi_initiator(self, ip, host, initiator_uids):
+        iscsi_targets = self.iscsi_targets
         for initiator_uid in initiator_uids:
-            iscsi_targets = self._client.get_iscsi_targets()
             LOG.info(_LI('Get ISCSI targets %(tg)s to register '
-                         'initiator %(in)s.')
-                     % ({'tg': iscsi_targets,
-                         'in': initiator_uid}))
+                         'initiator %(in)s.'),
+                     {'tg': iscsi_targets,
+                      'in': initiator_uid})
 
             target_portals_SPA = list(iscsi_targets['A'])
             target_portals_SPB = list(iscsi_targets['B'])
@@ -2215,11 +2331,12 @@ class EMCVnxCliBase(object):
                                            ip, host, vport_id)
 
     def _register_fc_initiator(self, ip, host, initiator_uids):
+        fc_targets = self._client.get_fc_targets()
         for initiator_uid in initiator_uids:
-            fc_targets = self._client.get_fc_targets()
-            LOG.info(_LI('Get FC targets %(tg)s to register initiator %(in)s.')
-                     % ({'tg': fc_targets,
-                         'in': initiator_uid}))
+            LOG.info(_LI('Get FC targets %(tg)s to register '
+                         'initiator %(in)s.'),
+                     {'tg': fc_targets,
+                      'in': initiator_uid})
 
             target_portals_SPA = list(fc_targets['A'])
             target_portals_SPB = list(fc_targets['B'])
@@ -2236,16 +2353,32 @@ class EMCVnxCliBase(object):
                 self._exec_command_setpath(initiator_uid, sp, port_id,
                                            ip, host)
 
-    def _filter_unregistered_initiators(self, initiator_uids=tuple()):
+    def _deregister_initiators(self, connector):
+        initiator_uids = []
+        try:
+            if self.protocol == 'iSCSI':
+                initiator_uids = self._extract_iscsi_uids(connector)
+            elif self.protocol == 'FC':
+                initiator_uids = self._extract_fc_uids(connector)
+        except exception.VolumeBackendAPIException:
+            LOG.warning(_LW("Failed to extract initiators of %s, so ignore "
+                            "deregistration operation."),
+                        connector['host'])
+        if initiator_uids:
+            for initiator_uid in initiator_uids:
+                rc, out = self._client.deregister_initiator(initiator_uid)
+                if rc != 0:
+                    LOG.warning(_LW("Failed to deregister %(itor)s "
+                                    "because: %(msg)s."),
+                                {'itor': initiator_uid,
+                                 'msg': out})
+
+    def _filter_unregistered_initiators(self, initiator_uids, sgdata):
         unregistered_initiators = []
         if not initiator_uids:
             return unregistered_initiators
 
-        command_get_storage_group = ('storagegroup', '-list')
-        out, rc = self._client.command_execute(*command_get_storage_group)
-
-        if rc != 0:
-            raise EMCVnxCLICmdError(command_get_storage_group, rc, out)
+        out = sgdata['raw_output']
 
         for initiator_uid in initiator_uids:
             m = re.search(initiator_uid, out)
@@ -2253,103 +2386,144 @@ class EMCVnxCliBase(object):
                 unregistered_initiators.append(initiator_uid)
         return unregistered_initiators
 
-    def auto_register_initiator(self, connector):
-        """Automatically register available initiators."""
+    def auto_register_initiator(self, connector, sgdata):
+        """Automatically registers available initiators.
+
+        Returns True if has registered initiator otherwise returns False.
+        """
         initiator_uids = []
         ip = connector['ip']
         host = connector['host']
         if self.protocol == 'iSCSI':
             initiator_uids = self._extract_iscsi_uids(connector)
-            itors_toReg = self._filter_unregistered_initiators(initiator_uids)
-            LOG.debug('iSCSI Initiators %(in)s of %(ins)s need registration.'
-                      % ({'in': itors_toReg,
-                         'ins': initiator_uids}))
-            if not itors_toReg:
-                LOG.debug('Initiators %s are already registered'
-                          % initiator_uids)
-                return
+            if sgdata is not None:
+                itors_toReg = self._filter_unregistered_initiators(
+                    initiator_uids,
+                    sgdata)
+            else:
+                itors_toReg = initiator_uids
+
+            if len(itors_toReg) == 0:
+                return False
+
+            LOG.info(_LI('iSCSI Initiators %(in)s of %(ins)s '
+                         'need registration.'),
+                     {'in': itors_toReg,
+                      'ins': initiator_uids})
             self._register_iscsi_initiator(ip, host, itors_toReg)
+            return True
 
         elif self.protocol == 'FC':
             initiator_uids = self._extract_fc_uids(connector)
-            itors_toReg = self._filter_unregistered_initiators(initiator_uids)
-            LOG.debug('FC Initiators %(in)s of %(ins)s need registration.'
-                      % ({'in': itors_toReg,
-                         'ins': initiator_uids}))
-            if not itors_toReg:
-                LOG.debug('Initiators %s are already registered.'
-                          % initiator_uids)
-                return
+            if sgdata is not None:
+                itors_toReg = self._filter_unregistered_initiators(
+                    initiator_uids,
+                    sgdata)
+            else:
+                itors_toReg = initiator_uids
+
+            if len(itors_toReg) == 0:
+                return False
+
+            LOG.info(_LI('FC Initiators %(in)s of %(ins)s need registration'),
+                     {'in': itors_toReg,
+                      'ins': initiator_uids})
             self._register_fc_initiator(ip, host, itors_toReg)
+            return True
 
-    def assure_host_access(self, volumename, connector):
+    def assure_host_access(self, volume, connector):
         hostname = connector['host']
+        volumename = volume['name']
         auto_registration_done = False
         try:
-            self.get_storage_group_uid(hostname)
+            sgdata = self._client.get_storage_group(hostname,
+                                                    poll=False)
         except EMCVnxCLICmdError as ex:
-            if ex.rc != 83:
+            if ex.kwargs["rc"] != 83:
                 raise ex
             # Storage Group has not existed yet
             self.assure_storage_group(hostname)
             if self.itor_auto_reg:
-                self.auto_register_initiator(connector)
+                self.auto_register_initiator(connector, None)
                 auto_registration_done = True
             else:
                 self._client.connect_host_to_storage_group(hostname, hostname)
 
+            sgdata = self._client.get_storage_group(hostname,
+                                                    poll=True)
+
         if self.itor_auto_reg and not auto_registration_done:
-            self.auto_register_initiator(connector)
-            auto_registration_done = True
-
-        lun_id = self.get_lun_id_by_name(volumename)
-        lun_map = self.get_lun_map(hostname)
-        if lun_id in lun_map:
-            return lun_map[lun_id]
-        used_hlus = lun_map.values()
-        if len(used_hlus) >= self.max_luns_per_sg:
-            msg = (_('Reach limitation set by configuration '
-                     'option max_luns_per_storage_group. '
-                     'Operation to add %(vol)s into '
-                     'Storage Group %(sg)s is rejected.')
-                   % {'vol': volumename, 'sg': hostname})
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
+            new_registerred = self.auto_register_initiator(connector, sgdata)
+            if new_registerred:
+                sgdata = self._client.get_storage_group(hostname,
+                                                        poll=True)
+
+        lun_id = self.get_lun_id(volume)
+        tried = 0
+        while tried < self.max_retries:
+            tried += 1
+            lun_map = sgdata['lunmap']
+            used_hlus = lun_map.values()
+            candidate_hlus = self.filter_available_hlu_set(used_hlus)
+            candidate_hlus = list(candidate_hlus)
+
+            if len(candidate_hlus) != 0:
+                hlu = candidate_hlus[random.randint(0,
+                                                    len(candidate_hlus) - 1)]
+                try:
+                    self._client.add_hlu_to_storage_group(
+                        hlu,
+                        lun_id,
+                        hostname)
+
+                    if hostname not in self.hlu_cache:
+                        self.hlu_cache[hostname] = {}
+                    self.hlu_cache[hostname][lun_id] = hlu
+                    return hlu, sgdata
+                except EMCVnxCLICmdError as ex:
+                    LOG.debug("Add HLU to storagegroup failed, retry %s",
+                              tried)
+            elif tried == 1:
+                # The first try didn't get the in time data,
+                # so we need a retry
+                LOG.debug("Did not find candidate HLUs, retry %s",
+                          tried)
+            else:
+                msg = (_('Reach limitation set by configuration '
+                         'option max_luns_per_storage_group. '
+                         'Operation to add %(vol)s into '
+                         'Storage Group %(sg)s is rejected.')
+                       % {'vol': volumename, 'sg': hostname})
+                LOG.error(msg)
+                raise exception.VolumeBackendAPIException(data=msg)
 
-        candidate_hlus = self.filter_available_hlu_set(used_hlus)
-        candidate_hlus = list(candidate_hlus)
-        random.shuffle(candidate_hlus)
-        for i, hlu in enumerate(candidate_hlus):
-            if i >= self.max_retries:
-                break
-            try:
-                self._client.add_hlu_to_storage_group(
-                    hlu,
-                    lun_id,
-                    hostname)
-                return hlu
-            except EMCVnxCLICmdError as ex:
-                # Retry
-                continue
+            # Need a full poll to get the real in time data
+            # Query storage group with poll for retry
+            sgdata = self._client.get_storage_group(hostname, poll=True)
+            self.hlu_cache[hostname] = sgdata['lunmap']
+            if lun_id in sgdata['lunmap']:
+                hlu = sgdata['lunmap'][lun_id]
+                return hlu, sgdata
 
         msg = _("Failed to add %(vol)s into %(sg)s "
                 "after %(retries)s tries.") % \
             {'vol': volumename,
              'sg': hostname,
-             'retries': min(self.max_retries, len(candidate_hlus))}
+             'retries': tried}
         LOG.error(msg)
         raise exception.VolumeBackendAPIException(data=msg)
 
-    def vnx_get_iscsi_properties(self, volume, connector):
+    def vnx_get_iscsi_properties(self, volume, connector, hlu, sg_raw_output):
         storage_group = connector['host']
-        device_info = self.find_device_details(volume, storage_group)
-        owner_sp = device_info['ownersp']
+        owner_sp = self.get_lun_owner(volume)
         registered_spports = self._client.get_registered_spport_set(
             connector['initiator'],
-            storage_group)
+            storage_group,
+            sg_raw_output)
         target = self._client.find_avaialable_iscsi_target_one(
             storage_group, owner_sp,
-            registered_spports)
+            registered_spports,
+            self.iscsi_targets)
         properties = {'target_discovered': True,
                       'target_iqn': 'unknown',
                       'target_portal': 'unknown',
@@ -2359,7 +2533,7 @@ class EMCVnxCliBase(object):
             properties = {'target_discovered': True,
                           'target_iqn': target['Port WWN'],
                           'target_portal': "%s:3260" % target['IP Address'],
-                          'target_lun': device_info['hostlunid']}
+                          'target_lun': hlu}
             LOG.debug("iSCSI Properties: %s", properties)
             auth = volume['provider_auth']
             if auth:
@@ -2368,19 +2542,25 @@ class EMCVnxCliBase(object):
                 properties['auth_username'] = auth_username
                 properties['auth_password'] = auth_secret
         else:
-            LOG.error(_LE('Failed to find an available '
-                          'iSCSI targets for %s.'),
+            LOG.error(_LE('Failed to find an available iSCSI targets for %s.'),
                       storage_group)
 
         return properties
 
     def vnx_get_fc_properties(self, connector, device_number):
-        ports = self.get_login_ports(connector)
-        return {'target_lun': device_number,
-                'target_discovered': True,
-                'target_wwn': ports}
+        fc_properties = {'target_lun': device_number,
+                         'target_dicovered': True,
+                         'target_wwn': None}
+        if self.zonemanager_lookup_service is None:
+            fc_properties['target_wwn'] = self.get_login_ports(connector)
+        else:
+            target_wwns, itor_tgt_map = self.get_initiator_target_map(
+                connector['wwpns'],
+                self.get_status_up_ports(connector))
+            fc_properties['target_wwn'] = target_wwns
+            fc_properties['initiator_target_map'] = itor_tgt_map
+        return fc_properties
 
-    @log_enter_exit
     def initialize_connection(self, volume, connector):
         volume_metadata = {}
         for metadata in volume['volume_admin_metadata']:
@@ -2390,27 +2570,30 @@ class EMCVnxCliBase(object):
             access_mode = ('ro'
                            if volume_metadata.get('readonly') == 'True'
                            else 'rw')
-        LOG.debug('Volume %(vol)s Access mode is: %(access)s.'
-                  {'vol': volume['name'],
-                     'access': access_mode})
+        LOG.debug('Volume %(vol)s Access mode is: %(access)s.',
+                  {'vol': volume['name'],
+                   'access': access_mode})
 
         """Initializes the connection and returns connection info."""
         @lockutils.synchronized('emc-connection-' + connector['host'],
                                 "emc-connection-", True)
         def do_initialize_connection():
-            device_number = self.assure_host_access(
-                volume['name'], connector)
-            return device_number
+            return self.assure_host_access(
+                volume, connector)
 
         if self.protocol == 'iSCSI':
-            do_initialize_connection()
-            iscsi_properties = self.vnx_get_iscsi_properties(volume,
-                                                             connector)
+            (device_number, sg_data) = do_initialize_connection()
+            iscsi_properties = self.vnx_get_iscsi_properties(
+                volume,
+                connector,
+                device_number,
+                sg_data['raw_output']
+            )
             iscsi_properties['access_mode'] = access_mode
             data = {'driver_volume_type': 'iscsi',
                     'data': iscsi_properties}
         elif self.protocol == 'FC':
-            device_number = do_initialize_connection()
+            (device_number, sg_data) = do_initialize_connection()
             fc_properties = self.vnx_get_fc_properties(connector,
                                                        device_number)
             fc_properties['volume_id'] = volume['id']
@@ -2420,77 +2603,76 @@ class EMCVnxCliBase(object):
 
         return data
 
-    @log_enter_exit
     def terminate_connection(self, volume, connector):
         """Disallow connection from connector."""
-
         @lockutils.synchronized('emc-connection-' + connector['host'],
                                 "emc-connection-", True)
         def do_terminate_connection():
             hostname = connector['host']
             volume_name = volume['name']
-            try:
-                lun_map = self.get_lun_map(hostname)
-            except EMCVnxCLICmdError as ex:
-                if ex.rc == 83:
-                    LOG.warn(_LW("Storage Group %s is not found. "
-                                 "terminate_connection() is unnecessary."),
-                             hostname)
-                    return True
-            try:
-                lun_id = self.get_lun_id(volume)
-            except EMCVnxCLICmdError as ex:
-                if ex.rc == 9:
-                    LOG.warn(_LW("Volume %s is not found. "
-                                 "It has probably been removed in VNX.")
-                             % volume_name)
-
-            if lun_id in lun_map:
-                self._client.remove_hlu_from_storagegroup(
-                    lun_map[lun_id], hostname)
+            lun_id = self.get_lun_id(volume)
+            lun_map = None
+            conn_info = None
+            if (hostname in self.hlu_cache and
+                    lun_id in self.hlu_cache[hostname] and
+                    not self.destroy_empty_sg and
+                    not self.zonemanager_lookup_service):
+                hlu = self.hlu_cache[hostname][lun_id]
+                self._client.remove_hlu_from_storagegroup(hlu, hostname,
+                                                          poll=True)
+                self.hlu_cache[hostname].pop(lun_id)
             else:
-                LOG.warn(_LW("Volume %(vol)s was not in Storage Group %(sg)s.")
-                         % {'vol': volume_name, 'sg': hostname})
-            if self.destroy_empty_sg or self.zonemanager_lookup_service:
                 try:
                     lun_map = self.get_lun_map(hostname)
-                    if not lun_map:
-                        LOG.debug("Storage Group %s was empty.", hostname)
-                        if self.destroy_empty_sg:
-                            LOG.info(_LI("Storage Group %s was empty, "
-                                         "destroy it."), hostname)
-                            self._client.disconnect_host_from_storage_group(
-                                hostname, hostname)
-                            self._client.delete_storage_group(hostname)
-                        return True
-                    else:
-                        LOG.debug("Storage Group %s not empty,", hostname)
-                        return False
+                    self.hlu_cache[hostname] = lun_map
+                except EMCVnxCLICmdError as ex:
+                    if ex.kwargs["rc"] == 83:
+                        LOG.warning(_LW("Storage Group %s is not found. "
+                                        "terminate_connection() is "
+                                        "unnecessary."),
+                                    hostname)
+                if lun_id in lun_map:
+                    self._client.remove_hlu_from_storagegroup(
+                        lun_map[lun_id], hostname)
+                    lun_map.pop(lun_id)
+                else:
+                    LOG.warning(_LW("Volume %(vol)s was not in Storage Group"
+                                    " %(sg)s."),
+                                {'vol': volume_name, 'sg': hostname})
+
+            if self.protocol == 'FC':
+                conn_info = {'driver_volume_type': 'fibre_channel',
+                             'data': {}}
+                if self.zonemanager_lookup_service and not lun_map:
+                    target_wwns, itor_tgt_map = self.get_initiator_target_map(
+                        connector['wwpns'],
+                        self.get_status_up_ports(connector))
+                    conn_info['data']['initiator_target_map'] = itor_tgt_map
+
+            if self.destroy_empty_sg and not lun_map:
+                try:
+                    LOG.info(_LI("Storage Group %s was empty."), hostname)
+                    self._client.disconnect_host_from_storage_group(
+                        hostname, hostname)
+                    self._client.delete_storage_group(hostname)
+                    if self.itor_auto_dereg:
+                        self._deregister_initiators(connector)
                 except Exception:
-                    LOG.warn(_LW("Failed to destroy Storage Group %s."),
-                             hostname)
-            else:
-                return False
+                    LOG.warning(_LW("Failed to destroy Storage Group %s."),
+                                hostname)
+                    try:
+                        self._client.connect_host_to_storage_group(
+                            hostname, hostname)
+                    except Exception:
+                        LOG.warning(_LW("Fail to connect host %(host)s "
+                                        "back to storage group %(sg)s."),
+                                    {'host': hostname, 'sg': hostname})
+            return conn_info
         return do_terminate_connection()
 
-    @log_enter_exit
-    def adjust_fc_conn_info(self, conn_info, connector, remove_zone=None):
-        target_wwns, itor_tgt_map = self.get_initiator_target_map(
-            connector['wwpns'],
-            self.get_status_up_ports(connector))
-        if target_wwns:
-            conn_info['data']['target_wwn'] = target_wwns
-        if remove_zone is None or remove_zone:
-            # Return initiator_target_map for initialize_connection (None)
-            # Return initiator_target_map for terminate_connection when (True)
-            # no volumes are in the storagegroup for host to use
-            conn_info['data']['initiator_target_map'] = itor_tgt_map
-        return conn_info
-
-    @log_enter_exit
     def manage_existing_get_size(self, volume, ref):
-        """Return size of volume to be managed by manage_existing.
-        """
+        """Return size of volume to be managed by manage_existing."""
+
         # Check that the reference is valid
         if 'id' not in ref:
             reason = _('Reference must contain lun_id element.')
@@ -2506,9 +2688,19 @@ class EMCVnxCliBase(object):
                                                            reason=reason)
         return data['total_capacity_gb']
 
-    @log_enter_exit
     def manage_existing(self, volume, ref):
-        raise NotImplementedError
+        """Imports the existing backend storage object as a volume.
+
+        Renames the backend storage object so that it matches the,
+        volume['name'] which is how drivers traditionally map between a
+        cinder volume and the associated backend storage object.
+
+        existing_ref:{
+            'id':lun_id
+        }
+        """
+
+        self._client.lun_rename(ref['id'], volume['name'])
 
     def find_iscsi_protocol_endpoints(self, device_sp):
         """Returns the iSCSI initiators for a SP."""
@@ -2546,6 +2738,7 @@ class EMCVnxCliBase(object):
         return specs
 
 
+@decorate_all_methods(log_enter_exit)
 class EMCVnxCliPool(EMCVnxCliBase):
 
     def __init__(self, prtcl, configuration):
@@ -2568,71 +2761,68 @@ class EMCVnxCliPool(EMCVnxCliBase):
                     raise exception.VolumeBackendAPIException(data=msg)
         return self.storage_pool
 
-    def is_pool_fastcache_enabled(self, storage_pool, no_poll=False):
-        command_check_fastcache = None
-        if no_poll:
-            command_check_fastcache = ('-np', 'storagepool', '-list', '-name',
-                                       storage_pool, '-fastcache')
-        else:
-            command_check_fastcache = ('storagepool', '-list', '-name',
-                                       storage_pool, '-fastcache')
-        out, rc = self._client.command_execute(*command_check_fastcache)
-
-        if 0 != rc:
-            raise EMCVnxCLICmdError(command_check_fastcache, rc, out)
-        else:
-            re_fastcache = 'FAST Cache:\s*(.*)\s*'
-            m = re.search(re_fastcache, out)
-            if m is not None:
-                result = True if 'Enabled' == m.group(1) else False
-            else:
-                LOG.error(_LE("Error parsing output for FastCache Command."))
-        return result
-
-    @log_enter_exit
     def update_volume_stats(self):
-        """Retrieve stats info."""
+        """Retrieves stats info."""
         self.stats = super(EMCVnxCliPool, self).update_volume_stats()
-        data = self._client.get_pool(self.get_target_storagepool())
-        self.stats['total_capacity_gb'] = data['total_capacity_gb']
-        self.stats['free_capacity_gb'] = data['free_capacity_gb']
-
-        array_serial = self._client.get_array_serial(NO_POLL)
+        pool = self._client.get_pool(self.get_target_storagepool(),
+                                     poll=False)
+        self.stats['total_capacity_gb'] = pool['total_capacity_gb']
+        self.stats['free_capacity_gb'] = pool['free_capacity_gb']
+        # Some extra capacity will be used by meta data of pool LUNs.
+        # The overhead is about LUN_Capacity * 0.02 + 3 GB
+        # reserved_percentage will be used to make sure the scheduler
+        # takes the overhead into consideration
+        # Assume that all the remaining capacity is to be used to create
+        # a thick LUN, reserved_percentage is estimated as follows:
+        reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
+                     (1.02 * pool['total_capacity_gb'])) * 100)
+        self.stats['reserved_percentage'] = int(math.ceil(min(reserved, 100)))
+        if self.check_max_pool_luns_threshold:
+            pool_feature = self._client.get_pool_feature_properties(poll=False)
+            if (pool_feature['max_pool_luns']
+                    <= pool_feature['total_pool_luns']):
+                LOG.warning(_LW("Maximum number of Pool LUNs, %s, "
+                                "have been created. "
+                                "No more LUN creation can be done."),
+                            pool_feature['max_pool_luns'])
+                self.stats['free_capacity_gb'] = 0
+        array_serial = self._client.get_array_serial()
         self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
                                        {'pool_name': self.storage_pool,
                                         'array_serial':
                                            array_serial['array_serial']})
         # check if this pool's fast_cache is really enabled
         if self.stats['fast_cache_enabled'] == 'True' and \
-           not self.is_pool_fastcache_enabled(self.storage_pool, NO_POLL):
+           not self._client.is_pool_fastcache_enabled(self.storage_pool):
             self.stats['fast_cache_enabled'] = 'False'
         return self.stats
 
-    @log_enter_exit
-    def manage_existing(self, volume, ref):
-        """Manage an existing lun in the array.
-
-        The lun should be in a manageable pool backend, otherwise
-        error would return.
-        Rename the backend storage object so that it matches the,
-        volume['name'] which is how drivers traditionally map between a
-        cinder volume and the associated backend storage object.
-
-        existing_ref:{
-            'id':lun_id
-        }
-        """
+    def manage_existing_get_size(self, volume, ref):
+        """Returns size of volume to be managed by manage_existing."""
 
+        # Check that the reference is valid
+        if 'id' not in ref:
+            reason = _('Reference must contain lun_id element.')
+            raise exception.ManageExistingInvalidReference(
+                existing_ref=ref,
+                reason=reason)
+        # Check for existence of the lun
         data = self._client.get_lun_by_id(
-            ref['id'], self._client.LUN_WITH_POOL)
-        if self.storage_pool != data['pool']:
+            ref['id'],
+            properties=self._client.LUN_WITH_POOL)
+        if data is None:
+            reason = _('Cannot find the lun with LUN id %s.') % ref['id']
+            raise exception.ManageExistingInvalidReference(existing_ref=ref,
+                                                           reason=reason)
+        if data['pool'] != self.storage_pool:
             reason = _('The input lun is not in a manageable pool backend '
                        'by cinder')
             raise exception.ManageExistingInvalidReference(existing_ref=ref,
                                                            reason=reason)
-        self._client.lun_rename(ref['id'], volume['name'])
+        return data['total_capacity_gb']
 
 
+@decorate_all_methods(log_enter_exit)
 class EMCVnxCliArray(EMCVnxCliBase):
 
     def __init__(self, prtcl, configuration):
@@ -2642,7 +2832,7 @@ class EMCVnxCliArray(EMCVnxCliBase):
 
     def _update_pool_cache(self):
         LOG.debug("Updating Pool Cache")
-        self.pool_cache = self._client.get_pool_list(NO_POLL)
+        self.pool_cache = self._client.get_pool_list(poll=False)
 
     def get_target_storagepool(self, volume, source_volume_name=None):
         """Find the storage pool for given volume."""
@@ -2671,14 +2861,13 @@ class EMCVnxCliArray(EMCVnxCliBase):
         LOG.error(msg)
         raise exception.VolumeBackendAPIException(data=msg)
 
-    @log_enter_exit
     def update_volume_stats(self):
         """Retrieve stats info."""
         self.stats = super(EMCVnxCliArray, self).update_volume_stats()
         self._update_pool_cache()
         self.stats['total_capacity_gb'] = 'unknown'
         self.stats['free_capacity_gb'] = 'unknown'
-        array_serial = self._client.get_array_serial(NO_POLL)
+        array_serial = self._client.get_array_serial()
         self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
                                        {'pool_name': '',
                                         'array_serial':
@@ -2686,19 +2875,6 @@ class EMCVnxCliArray(EMCVnxCliBase):
         self.stats['fast_cache_enabled'] = 'unknown'
         return self.stats
 
-    @log_enter_exit
-    def manage_existing(self, volume, ref):
-        """Rename the backend storage object so that it matches the,
-        volume['name'] which is how drivers traditionally map between a
-        cinder volume and the associated backend storage object.
-
-        existing_ref:{
-            'id':lun_id
-        }
-        """
-
-        self._client.lun_rename(ref['id'], volume['name'])
-
 
 def getEMCVnxCli(prtcl, configuration=None):
     configuration.append_config_values(loc_opts)
@@ -2708,3 +2884,135 @@ def getEMCVnxCli(prtcl, configuration=None):
         return EMCVnxCliArray(prtcl, configuration=configuration)
     else:
         return EMCVnxCliPool(prtcl, configuration=configuration)
+
+
+class CreateSMPTask(task.Task):
+    """Creates a snap mount point (SMP) for the source snapshot.
+
+    Reversion strategy: Delete the SMP.
+    """
+    def execute(self, client, volume, source_vol_name, *args, **kwargs):
+        LOG.debug('CreateSMPTask.execute')
+        client.create_mount_point(source_vol_name, volume['name'])
+
+    def revert(self, result, client, volume, *args, **kwargs):
+        LOG.debug('CreateSMPTask.revert')
+        if isinstance(result, failure.Failure):
+            return
+        else:
+            LOG.warning(_LW('CreateSMPTask.revert: delete mount point %s'),
+                        volume['name'])
+            client.delete_lun(volume['name'])
+
+
+class AttachSnapTask(task.Task):
+    """Attaches the snapshot to the SMP created before.
+
+    Reversion strategy: Detach the SMP.
+    """
+    def execute(self, client, volume, snap_name, *args, **kwargs):
+        LOG.debug('AttachSnapTask.execute')
+        client.attach_mount_point(volume['name'], snap_name)
+
+    def revert(self, result, client, volume, *args, **kwargs):
+        LOG.debug('AttachSnapTask.revert')
+        if isinstance(result, failure.Failure):
+            return
+        else:
+            LOG.warning(_LW('AttachSnapTask.revert: detach mount point %s'),
+                        volume['name'])
+            client.detach_mount_point(volume['name'])
+
+
+class CreateDestLunTask(task.Task):
+    """Creates a destination lun for migration.
+
+    Reversion strategy: Detach the temp lun.
+    """
+    def __init__(self):
+        super(CreateDestLunTask, self).__init__(provides='lun_data')
+
+    def execute(self, client, pool_name, dest_vol_name, volume_size,
+                provisioning, tiering, *args, **kwargs):
+        LOG.debug('CreateDestLunTask.execute')
+        data = client.create_lun_with_advance_feature(
+            pool_name, dest_vol_name, volume_size,
+            provisioning, tiering)
+        return data
+
+    def revert(self, result, client, dest_vol_name, *args, **kwargs):
+        LOG.debug('CreateDestLunTask.revert')
+        if isinstance(result, failure.Failure):
+            return
+        else:
+            LOG.warning(_LW('CreateDestLunTask.revert: delete temp lun %s'),
+                        dest_vol_name)
+            client.delete_lun(dest_vol_name)
+
+
+class MigrateLunTask(task.Task):
+    """Starts a migration between the SMP and the temp lun.
+
+    Reversion strategy: None
+    """
+    def __init__(self):
+        super(MigrateLunTask, self).__init__(provides='new_lun_id')
+
+    def execute(self, client, dest_vol_name, volume, lun_data,
+                *args, **kwargs):
+        LOG.debug('MigrateLunTask.execute')
+        new_vol_name = volume['name']
+        new_vol_lun_id = client.get_lun_by_name(new_vol_name)['lun_id']
+        dest_vol_lun_id = lun_data['lun_id']
+
+        LOG.info(_LI('Migrating Mount Point Volume: %s'), new_vol_name)
+
+        migrated = client.migrate_lun_with_verification(new_vol_lun_id,
+                                                        dest_vol_lun_id,
+                                                        None)
+        if not migrated:
+            msg = (_LE("Migrate volume failed between source vol %(src)s"
+                       " and dest vol %(dst)s."),
+                   {'src': new_vol_name, 'dst': dest_vol_name})
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        return new_vol_lun_id
+
+    def revert(self, *args, **kwargs):
+        pass
+
+
+class CreateSnapshotTask(task.Task):
+    """Creates a snapshot/cgsnapshot of a volume.
+
+    Reversion Strategy: Delete the created snapshot/cgsnapshot.
+    """
+    def execute(self, client, snapshot, source_lun_id, *args, **kwargs):
+        LOG.debug('CreateSnapshotTask.execute')
+        # Create temp Snapshot
+        if snapshot['consistencygroup_id']:
+            client.create_cgsnapshot(snapshot)
+        else:
+            snapshot_name = snapshot['name']
+            volume_name = snapshot['volume_name']
+            LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'),
+                     {'snapshot': snapshot_name,
+                      'volume': volume_name})
+            client.create_snapshot(source_lun_id, snapshot_name)
+
+    def revert(self, result, client, snapshot, *args, **kwargs):
+        LOG.debug('CreateSnapshotTask.revert')
+        if isinstance(result, failure.Failure):
+            return
+        else:
+            if snapshot['consistencygroup_id']:
+                LOG.warning(_LW('CreateSnapshotTask.revert: '
+                                'delete temp cgsnapshot %s'),
+                            snapshot['consistencygroup_id'])
+                client.delete_cgsnapshot(snapshot)
+            else:
+                LOG.warning(_LW('CreateSnapshotTask.revert: '
+                                'delete temp snapshot %s'),
+                            snapshot['name'])
+                client.delete_snapshot(snapshot['name'])