]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Pool-aware scheduler support in EMC VNX Cinder driver
authorJeegn Chen <jeegn.chen@emc.com>
Thu, 15 Jan 2015 01:35:09 +0000 (20:35 -0500)
committerJeegn Chen <jeegn.chen@emc.com>
Wed, 18 Feb 2015 02:50:25 +0000 (10:50 +0800)
Changed EMC VNX driver to report pools information in
update_volume_stats and removed the original pool selection
logic in VNX driver.
Besides, deprecated the VNX driver defined extra spec key
"storagetype:pool", since "pool_name" key is introduced by
the pool-aware scheduler feature to specify which pool to
place the volume.

Change-Id: Idd2b0008a3844ad9db72768e6016c63e7e4c5e98
Implements: blueprint pool-aware-cinder-scheduler-vnx

cinder/tests/test_emc_vnxdirect.py
cinder/volume/drivers/emc/emc_cli_fc.py
cinder/volume/drivers/emc/emc_cli_iscsi.py
cinder/volume/drivers/emc/emc_vnx_cli.py

index 405e466fc95f80d387a3ebf5bbc730930e22be2b..56a557ace675d098eac67509d099e3a2713054f5 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
+# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -31,6 +31,7 @@ from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
 
 SUCCEED = ("", 0)
 FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
+VERSION = emc_vnx_cli.EMCVnxCliBase.VERSION
 
 
 class EMCVNXCLIDriverTestData():
@@ -41,8 +42,25 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol1',
         'id': '1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
-        'provider_location': 'system^FNM11111|type^lun|lun_id^1',
+        'provider_location': 'system^FNM11111|type^lun|id^1|version^05.02.00',
+        'display_name': 'vol1',
+        'display_description': 'test volume',
+        'volume_type_id': None,
+        'consistencygroup_id': None,
+        'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]
+    }
+
+    test_legacy_volume = {
+        'name': 'vol1',
+        'size': 1,
+        'volume_name': 'vol1',
+        'id': '1',
+        'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
+        'project_id': 'project',
+        'provider_location': 'system^FNM11111|type^lun|id^1',
         'display_name': 'vol1',
         'display_description': 'test volume',
         'volume_type_id': None,
@@ -56,6 +74,7 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol1',
         'id': '1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'vol1',
         'display_description': 'test volume',
@@ -70,6 +89,7 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol1',
         'id': '1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'vol1',
         'display_description': 'test volume',
@@ -84,13 +104,15 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol1',
         'id': '1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'vol1',
         'display_description': 'test volume',
         'volume_type_id': None,
         'consistencygroup_id': None,
         'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'},
-                                  {'key': 'readonly', 'value': 'False'}]
+                                  {'key': 'readonly', 'value': 'False'}],
+        'provider_location': 'system^FNM11111|type^lun|id^1|version^05.02.00',
     }
 
     test_volume2 = {
@@ -99,6 +121,7 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol2',
         'id': '1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'vol2',
         'consistencygroup_id': None,
@@ -111,6 +134,7 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol2',
         'id': '1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'vol2',
         'consistencygroup_id': None,
@@ -123,6 +147,7 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol_with_type',
         'id': '1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'thin_vol',
         'consistencygroup_id': None,
@@ -135,6 +160,7 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'failed_vol1',
         'id': '4',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'failed_vol',
         'consistencygroup_id': None,
@@ -147,11 +173,12 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol1_in_sg',
         'id': '4',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'failed_vol',
         'display_description': 'Volume 1 in SG',
         'volume_type_id': None,
-        'provider_location': 'system^fakesn|type^lun|id^4'}
+        'provider_location': 'system^fakesn|type^lun|id^4|version^05.02.00'}
 
     test_volume2_in_sg = {
         'name': 'vol2_in_sg',
@@ -159,11 +186,12 @@ class EMCVNXCLIDriverTestData():
         'volume_name': 'vol2_in_sg',
         'id': '5',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'failed_vol',
         'display_description': 'Volume 2 in SG',
         'volume_type_id': None,
-        'provider_location': 'system^fakesn|type^lun|id^3'}
+        'provider_location': 'system^fakesn|type^lun|id^3|version^05.02.00'}
 
     test_snapshot = {
         'name': 'snapshot1',
@@ -189,6 +217,7 @@ class EMCVNXCLIDriverTestData():
         'id': '2',
         'volume_name': 'vol1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'clone1',
         'consistencygroup_id': None,
@@ -200,6 +229,7 @@ class EMCVNXCLIDriverTestData():
         'id': '2',
         'volume_name': 'vol1',
         'provider_auth': None,
+        'host': "host@backendsec#unit_test_pool",
         'project_id': 'project',
         'display_name': 'clone1',
         'consistencygroup_id': 'consistencygroup_id',
@@ -211,23 +241,25 @@ class EMCVNXCLIDriverTestData():
         'wwpns': ["1234567890123456", "1234567890543216"],
         'wwnns': ["2234567890123456", "2234567890543216"],
         'host': 'fakehost'}
-    test_volume3 = {'migration_status': None, 'availability_zone': 'nova',
-                    'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
-                    'name': 'vol3',
-                    'size': 2,
-                    'volume_admin_metadata': [],
-                    'status': 'available',
-                    'volume_type_id':
-                    '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
-                    'deleted': False, 'provider_location': None,
-                    'host': 'ubuntu-server12@pool_backend_1',
-                    'source_volid': None, 'provider_auth': None,
-                    'display_name': 'vol-test02', 'instance_uuid': None,
-                    'attach_status': 'detached',
-                    'volume_type': [],
-                    'attached_host': None,
-                    'provider_location': 'system^FNM11111|type^lun|lun_id^1',
-                    '_name_id': None, 'volume_metadata': []}
+    test_volume3 = {
+        'migration_status': None, 'availability_zone': 'nova',
+        'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
+        'name': 'vol3',
+        'size': 2,
+        'volume_admin_metadata': [],
+        'status': 'available',
+        'volume_type_id':
+        '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
+        'deleted': False,
+        'host': "host@backendsec#unit_test_pool",
+        'source_volid': None, 'provider_auth': None,
+        'display_name': 'vol-test02', 'instance_uuid': None,
+        'attach_status': 'detached',
+        'volume_type': [],
+        'attached_host': None,
+        'provider_location':
+        'system^FNM11111|type^lun|id^1|version^05.02.00',
+        '_name_id': None, 'volume_metadata': []}
 
     test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
                      'deleted': False,
@@ -238,28 +270,13 @@ class EMCVNXCLIDriverTestData():
                  'extra_specs':
                  {'storagetype:provisioning': ('thick', 'thin')}}
 
-    test_host = {'host': 'ubuntu-server12@pool_backend_1',
+    test_host = {'host': 'ubuntu-server12@pool_backend_1#POOL_SAS1',
                  'capabilities':
-                 {'location_info': 'POOL_SAS1|FNM00124500890',
+                 {'pool_name': 'POOL_SAS1',
+                  'location_info': 'POOL_SAS1|FNM00124500890',
                   'volume_backend_name': 'pool_backend_1',
                   'storage_protocol': 'iSCSI'}}
 
-    test_volume4 = {'migration_status': None, 'availability_zone': 'nova',
-                    'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
-                    'name': 'vol4',
-                    'size': 2L,
-                    'volume_admin_metadata': [],
-                    'status': 'available',
-                    'volume_type_id':
-                    '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
-                    'deleted': False, 'provider_location': None,
-                    'host': 'ubuntu-server12@array_backend_1',
-                    'source_volid': None, 'provider_auth': None,
-                    'display_name': 'vol-test02', 'instance_uuid': None,
-                    'attach_status': 'detached',
-                    'volume_type': [],
-                    '_name_id': None, 'volume_metadata': []}
-
     test_volume5 = {'migration_status': None, 'availability_zone': 'nova',
                     'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
                     'name_id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
@@ -270,8 +287,8 @@ class EMCVNXCLIDriverTestData():
                     'volume_type_id':
                     '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
                     'deleted': False, 'provider_location':
-                    'system^FNM11111|type^lun|lun_id^5',
-                    'host': 'ubuntu-server12@array_backend_1',
+                    'system^FNM11111|type^lun|id^5|version^05.02.00',
+                    'host': 'ubuntu-server12@array_backend_1#unit_test_pool',
                     'source_volid': None, 'provider_auth': None,
                     'display_name': 'vol-test05', 'instance_uuid': None,
                     'attach_status': 'detached',
@@ -306,6 +323,7 @@ class EMCVNXCLIDriverTestData():
         'name': 'snapshot1',
         'size': 1,
         'id': 'cgsnapshot_id',
+        'volume': test_volume,
         'volume_name': 'vol1',
         'volume_size': 1,
         'consistencygroup_id': 'consistencygroup_id',
@@ -326,14 +344,63 @@ class EMCVNXCLIDriverTestData():
     POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
                          '-userCap', '-availableCap')
 
+    POOL_PROPERTY_W_FASTCACHE_CMD = ('storagepool', '-list', '-name',
+                                     'unit_test_pool', '-availableCap',
+                                     '-userCap', '-fastcache')
+
+    def POOL_GET_ALL_CMD(self, withfastcache=False):
+        if withfastcache:
+            return ('storagepool', '-list', '-availableCap',
+                    '-userCap', '-fastcache')
+        else:
+            return ('storagepool', '-list', '-availableCap',
+                    '-userCap')
+
+    def POOL_GET_ALL_RESULT(self, withfastcache=False):
+        if withfastcache:
+            return ("Pool Name:  unit_test_pool1\n"
+                    "Pool ID:  0\n"
+                    "User Capacity (Blocks):  6881061888\n"
+                    "User Capacity (GBs):  3281.146\n"
+                    "Available Capacity (Blocks):  6512292864\n"
+                    "Available Capacity (GBs):  3105.303\n"
+                    "FAST Cache:  Enabled\n"
+                    "\n"
+                    "Pool Name:  unit test pool 2\n"
+                    "Pool ID:  1\n"
+                    "User Capacity (Blocks):  8598306816\n"
+                    "User Capacity (GBs):  4099.992\n"
+                    "Available Capacity (Blocks):  8356663296\n"
+                    "Available Capacity (GBs):  3984.768\n"
+                    "FAST Cache:  Disabled\n", 0)
+        else:
+            return ("Pool Name:  unit_test_pool1\n"
+                    "Pool ID:  0\n"
+                    "User Capacity (Blocks):  6881061888\n"
+                    "User Capacity (GBs):  3281.146\n"
+                    "Available Capacity (Blocks):  6512292864\n"
+                    "Available Capacity (GBs):  3105.303\n"
+                    "\n"
+                    "Pool Name:  unit test pool 2\n"
+                    "Pool ID:  1\n"
+                    "User Capacity (Blocks):  8598306816\n"
+                    "User Capacity (GBs):  4099.992\n"
+                    "Available Capacity (Blocks):  8356663296\n"
+                    "Available Capacity (GBs):  3984.768\n", 0)
+
     NDU_LIST_CMD = ('ndu', '-list')
     NDU_LIST_RESULT = ("Name of the software package:   -Compression " +
                        "Name of the software package:   -Deduplication " +
                        "Name of the software package:   -FAST " +
                        "Name of the software package:   -FASTCache " +
-                       "Name of the software package:   -ThinProvisioning ",
+                       "Name of the software package:   -ThinProvisioning "
+                       "Name of the software package:   -VNXSnapshots",
                        0)
 
+    NDU_LIST_RESULT_WO_LICENSE = (
+        "Name of the software package:   -Unisphere ",
+        0)
+
     def SNAP_MP_CREATE_CMD(self, name='vol1', source='vol1'):
         return ('lun', '-create', '-type', 'snap', '-primaryLunName',
                 source, '-name', name)
@@ -349,10 +416,8 @@ class EMCVNXCLIDriverTestData():
                 '-allowReadWrite', 'yes',
                 '-allowAutoDelete', 'no')
 
-    def SNAP_LIST_CMD(self, res_id=1, poll=True):
+    def SNAP_LIST_CMD(self, res_id=1):
         cmd = ('snap', '-list', '-res', res_id)
-        if not poll:
-            cmd = ('-np',) + cmd
         return cmd
 
     def LUN_DELETE_CMD(self, name):
@@ -362,6 +427,9 @@ class EMCVNXCLIDriverTestData():
         return ('lun', '-expand', '-name', name, '-capacity', newsize,
                 '-sq', 'gb', '-o', '-ignoreThresholds')
 
+    def LUN_PROPERTY_POOL_CMD(self, lunname):
+        return ('lun', '-list', '-name', lunname, '-poolName')
+
     def LUN_PROPERTY_ALL_CMD(self, lunname):
         return ('lun', '-list', '-name', lunname,
                 '-state', '-status', '-opDetails', '-userCap', '-owner',
@@ -503,6 +571,15 @@ Available Capacity (GBs):  3257.851
 
 """, 0)
 
+    POOL_PROPERTY_W_FASTCACHE = (
+        "Pool Name:  unit_test_pool\n"
+        "Pool ID:  1\n"
+        "User Capacity (Blocks):  6881061888\n"
+        "User Capacity (GBs):  3281.146\n"
+        "Available Capacity (Blocks):  6832207872\n"
+        "Available Capacity (GBs):  3257.851\n"
+        "FAST Cache:  Enabled\n\n", 0)
+
     ALL_PORTS = ("SP:  A\n" +
                  "Port ID:  4\n" +
                  "Port WWN:  iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" +
@@ -792,13 +869,12 @@ Available Capacity (GBs):  3257.851
                     0)
 
 
-class EMCVNXCLIDriverISCSITestCase(test.TestCase):
-
+class DriverTestCaseBase(test.TestCase):
     def setUp(self):
-        super(EMCVNXCLIDriverISCSITestCase, self).setUp()
+        super(DriverTestCaseBase, self).setUp()
 
         self.stubs.Set(CommandLineHelper, 'command_execute',
-                       self.fake_setup_command_execute)
+                       self.fake_command_execute_for_driver_setup)
         self.stubs.Set(CommandLineHelper, 'get_array_serial',
                        mock.Mock(return_value={'array_serial':
                                                'fakeSerial'}))
@@ -819,20 +895,27 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
         self.configuration.default_timeout = 0.0002
         self.configuration.initiator_auto_registration = True
         self.configuration.check_max_pool_luns_threshold = False
-        self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
+        self.stubs.Set(self.configuration, 'safe_get',
+                       self.fake_safe_get({'storage_vnx_pool_name':
+                                           'unit_test_pool',
+                                           'volume_backend_name':
+                                           'namedbackend'}))
         self.testData = EMCVNXCLIDriverTestData()
         self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
             '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
         self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
 
     def driverSetup(self, commands=tuple(), results=tuple()):
-        self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+        self.driver = self.generateDriver(self.configuration)
         fake_command_execute = self.get_command_execute_simulator(
             commands, results)
         fake_cli = mock.Mock(side_effect=fake_command_execute)
         self.driver.cli._client.command_execute = fake_cli
         return fake_cli
 
+    def generateDriver(self, conf):
+        raise NotImplementedError
+
     def get_command_execute_simulator(self, commands=tuple(),
                                       results=tuple()):
         assert(len(commands) == len(results))
@@ -874,6 +957,22 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
 
         return standard_default
 
+    def fake_command_execute_for_driver_setup(self, *command, **kwargv):
+        if command == ('connection', '-getport', '-address', '-vlanid'):
+            return self.testData.ALL_PORTS
+        else:
+            return SUCCEED
+
+    def fake_safe_get(self, values):
+        def _safe_get(key):
+            return values.get(key)
+        return _safe_get
+
+
+class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
+    def generateDriver(self, conf):
+        return EMCCLIISCSIDriver(configuration=conf)
+
     @mock.patch(
         "eventlet.event.Event.wait",
         mock.Mock(return_value=None))
@@ -1061,107 +1160,58 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
             re.match(r".*Compression Enabler is not installed",
                      ex.msg))
 
-    @mock.patch(
-        "eventlet.event.Event.wait",
-        mock.Mock(return_value=None))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
-                                'storagetype:pool': 'unit_test_pool'}))
-    def test_create_compression_volume_on_array_backend(self):
-        """Unit test for create a compression volume on array
-        backend.
-        """
-        #Set up the array backend
-        config = conf.Configuration(None)
-        config.append_config_values = mock.Mock(return_value=0)
-        config.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
-        config.san_ip = '10.0.0.1'
-        config.san_login = 'sysadmin'
-        config.san_password = 'sysadmin'
-        config.default_timeout = 0.0002
-        config.initiator_auto_registration = True
-        config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
-            '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
-        config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
-        self.driver = EMCCLIISCSIDriver(configuration=config)
-        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
-
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
-                    self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
-                   self.testData.LUN_PROPERTY('vol_with_type', True),
-                   self.testData.NDU_LIST_RESULT]
-        fake_command_execute = self.get_command_execute_simulator(
-            commands, results)
-        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
-        self.driver.cli._client.command_execute = fake_cli
-
-        self.driver.cli.stats['compression_support'] = 'True'
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        #case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-        #verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                'vol_with_type', 1,
-                'unit_test_pool',
-                'compressed', None, False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                'vol_with_type'), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                'vol_with_type'), poll=True),
-            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
-                1))]
-        fake_cli.assert_has_calls(expect_cmd)
-
     def test_get_volume_stats(self):
-        self.driverSetup()
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_PROPERTY_W_FASTCACHE_CMD]
+        results = [self.testData.NDU_LIST_RESULT,
+                   self.testData.POOL_PROPERTY_W_FASTCACHE]
+        self.driverSetup(commands, results)
         stats = self.driver.get_volume_stats(True)
-        self.assertTrue(stats['driver_version'] is not None,
-                        "driver_version is not returned")
-        self.assertTrue(
-            stats['free_capacity_gb'] == 3257.851,
-            "free_capacity_gb is not correct")
-        self.assertTrue(
-            stats['reserved_percentage'] == 3,
-            "reserved_percentage is not correct")
+
+        self.assertTrue(stats['driver_version'] == VERSION,
+                        "driver_version is incorrect")
         self.assertTrue(
             stats['storage_protocol'] == 'iSCSI',
-            "storage_protocol is not correct")
-        self.assertTrue(
-            stats['total_capacity_gb'] == 3281.146,
-            "total_capacity_gb is not correct")
+            "storage_protocol is incorrect")
         self.assertTrue(
             stats['vendor_name'] == "EMC",
-            "vender name is not correct")
+            "vendor name is incorrect")
         self.assertTrue(
             stats['volume_backend_name'] == "namedbackend",
-            "volume backend name is not correct")
-        self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
-        self.assertTrue(
-            stats['driver_version'] == "05.01.00",
-            "driver version is incorrect.")
+            "volume backend name is incorrect")
+
+        pool_stats = stats['pools'][0]
+
+        expected_pool_stats = {
+            'free_capacity_gb': 3257.851,
+            'reserved_percentage': 3,
+            'location_info': 'unit_test_pool|fakeSerial',
+            'total_capacity_gb': 3281.146,
+            'compression_support': 'True',
+            'deduplication_support': 'True',
+            'thinprovisioning_support': 'True',
+            'consistencygroup_support': 'True',
+            'pool_name': 'unit_test_pool',
+            'fast_cache_enabled': 'True',
+            'fast_support': 'True'}
+
+        self.assertEqual(expected_pool_stats, pool_stats)
 
     def test_get_volume_stats_too_many_luns(self):
-        commands = [self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
-        results = [self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
+                    self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
+        results = [self.testData.NDU_LIST_RESULT,
+                   self.testData.POOL_PROPERTY_W_FASTCACHE,
+                   self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
         fake_cli = self.driverSetup(commands, results)
 
         self.driver.cli.check_max_pool_luns_threshold = True
         stats = self.driver.get_volume_stats(True)
+        pool_stats = stats['pools'][0]
         self.assertTrue(
-            stats['free_capacity_gb'] == 0,
-            "free_capacity_gb is not correct")
-        expect_cmd = [
-            mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
-                      poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
+            pool_stats['free_capacity_gb'] == 0,
+            "free_capacity_gb is incorrect")
         expect_cmd = [
             mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
                       poll=False)]
@@ -1169,11 +1219,12 @@ class EMCVNXCLIDriverISCSITestCase(test.TestCase):
 
         self.driver.cli.check_max_pool_luns_threshold = False
         stats = self.driver.get_volume_stats(True)
+        pool_stats = stats['pools'][0]
         self.assertTrue(stats['driver_version'] is not None,
                         "driver_version is not returned")
         self.assertTrue(
-            stats['free_capacity_gb'] == 3257.851,
-            "free_capacity_gb is not correct")
+            pool_stats['free_capacity_gb'] == 3257.851,
+            "free_capacity_gb is incorrect")
 
     @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
                 "CommandLineHelper.create_lun_by_cmd",
@@ -1388,8 +1439,8 @@ Time Remaining:  0 second(s)
             self.testData.test_volume,
             self.testData.connector)
 
-        self.assertEqual(connection_info,
-                         self.testData.iscsi_connection_info_ro)
+        self.assertEqual(self.testData.iscsi_connection_info_ro,
+                         connection_info)
 
         expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
                               poll=False),
@@ -1422,14 +1473,13 @@ Time Remaining:  0 second(s)
             self.testData.PING_OK
         ]
         fake_cli = self.driverSetup(commands, results)
-        test_volume_rw = self.testData.test_volume_rw.copy()
-        test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
+        test_volume_rw = self.testData.test_volume_rw
         connection_info = self.driver.initialize_connection(
             test_volume_rw,
             self.testData.connector)
 
-        self.assertEqual(connection_info,
-                         self.testData.iscsi_connection_info_rw)
+        self.assertEqual(self.testData.iscsi_connection_info_rw,
+                         connection_info)
 
         expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
                               poll=False),
@@ -1812,11 +1862,9 @@ Time Remaining:  0 second(s)
         output_migrate_verify = (r'The specified source LUN '
                                  'is not currently migrating', 23)
         commands = [cmd_dest, cmd_dest_p, cmd_migrate,
-                    cmd_migrate_verify,
-                    self.testData.NDU_LIST_CMD]
+                    cmd_migrate_verify]
         results = [output_dest, output_dest, output_migrate,
-                   output_migrate_verify,
-                   self.testData.NDU_LIST_RESULT]
+                   output_migrate_verify]
         fake_cli = self.driverSetup(commands, results)
 
         self.driver.create_cloned_volume(self.testData.test_volume,
@@ -2007,12 +2055,12 @@ Time Remaining:  0 second(s)
             self.testData.test_volume_with_type,
             self.testData.test_existing_ref)
         self.assertTrue(
-            re.match(r'.*not in a manageable pool backend by cinder',
+            re.match(r'.*not managed by the host',
                      ex.msg))
         expected = [mock.call(*get_lun_cmd, poll=True)]
         fake_cli.assert_has_calls(expected)
 
-    def test_manage_existing_get_size_pool_backend(self):
+    def test_manage_existing_get_size(self):
         get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
                        '-state', '-userCap', '-owner',
                        '-attachedSnapshot', '-poolName')
@@ -2044,67 +2092,6 @@ Time Remaining:  0 second(s)
                           self.testData.test_volume_with_type,
                           invaild_ref)
 
-    def test_manage_existing_get_size_array_backend(self):
-        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
-                       '-state', '-status', '-opDetails', '-userCap', '-owner',
-                       '-attachedSnapshot',)
-        test_size = 2
-        commands = [get_lun_cmd]
-        results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
-
-        self.configuration.safe_get = mock.Mock(return_value=None)
-        self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
-        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
-
-        # Mock the command executor
-        fake_command_execute = self.get_command_execute_simulator(
-            commands, results)
-        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
-        self.driver.cli._client.command_execute = fake_cli
-
-        get_size = self.driver.manage_existing_get_size(
-            self.testData.test_volume_with_type,
-            self.testData.test_existing_ref)
-        expected = [mock.call(*get_lun_cmd, poll=True)]
-        assert get_size == test_size
-        fake_cli.assert_has_calls(expected)
-        self.configuration.safe_get = self.fake_safe_get
-
-    def test_manage_existing_with_array_backend(self):
-        """Unit test for the manage_existing with the
-        array backend which is not support the manage
-        existing functinality.
-        """
-        #Set up the array backend
-        config = conf.Configuration(None)
-        config.append_config_values = mock.Mock(return_value=0)
-        config.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
-        config.san_ip = '10.0.0.1'
-        config.san_login = 'sysadmin'
-        config.san_password = 'sysadmin'
-        config.default_timeout = 0.0002
-        config.initiator_auto_registration = True
-        config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
-            '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
-        config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
-        self.driver = EMCCLIISCSIDriver(configuration=config)
-        assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
-        #mock the command executor
-        lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
-                          '-newName', 'vol_with_type', '-o')
-        commands = [lun_rename_cmd]
-        results = [SUCCEED]
-        #mock the command executor
-        fake_command_execute = self.get_command_execute_simulator(
-            commands, results)
-        fake_cli = mock.MagicMock(side_effect=fake_command_execute)
-        self.driver.cli._client.command_execute = fake_cli
-        self.driver.manage_existing(
-            self.testData.test_volume_with_type,
-            self.testData.test_existing_ref)
-        expected = [mock.call(*lun_rename_cmd, poll=False)]
-        fake_cli.assert_has_calls(expected)
-
     @mock.patch(
         "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
         mock.Mock(return_value=1))
@@ -2280,7 +2267,8 @@ Time Remaining:  0 second(s)
                                              'unit_test_pool2'},
                          'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
 
-        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
+        host_test_data = {'host':
+                          'ubuntu-server12@pool_backend_1#unit_test_pool2',
                           'capabilities':
                           {'location_info': 'unit_test_pool2|FNM00124500890',
                            'volume_backend_name': 'pool_backend_1',
@@ -2344,14 +2332,15 @@ Time Remaining:  0 second(s)
                                              'unit_test_pool'},
                          'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
 
-        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_1',
-                           'storage_protocol': 'iSCSI'}}
+        host_test_data = {
+            'host': 'host@backendsec#unit_test_pool',
+            'capabilities': {
+                'location_info': 'unit_test_pool|FNM00124500890',
+                'volume_backend_name': 'pool_backend_1',
+                'storage_protocol': 'iSCSI'}}
 
         commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD(poll=False)]
+                    self.testData.SNAP_LIST_CMD()]
         results = [self.testData.NDU_LIST_RESULT,
                    ('No snap', 1023)]
         fake_cli = self.driverSetup(commands, results)
@@ -2396,14 +2385,15 @@ Time Remaining:  0 second(s)
                                              'unit_test_pool'},
                          'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
 
-        host_test_data = {'host': 'ubuntu-server12@pool_backend_2',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500891',
-                           'volume_backend_name': 'pool_backend_2',
-                           'storage_protocol': 'iSCSI'}}
+        host_test_data = {
+            'host': 'ubuntu-server12@pool_backend_2#unit_test_pool',
+            'capabilities':
+                {'location_info': 'unit_test_pool|FNM00124500891',
+                 'volume_backend_name': 'pool_backend_2',
+                 'storage_protocol': 'iSCSI'}}
 
         commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD(poll=False)]
+                    self.testData.SNAP_LIST_CMD()]
         results = [self.testData.NDU_LIST_RESULT,
                    ('No snap', 1023)]
         self.driverSetup(commands, results)
@@ -2455,11 +2445,12 @@ Time Remaining:  0 second(s)
                                              'unit_test_pool'},
                          'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
 
-        host_test_data = {'host': 'ubuntu-server12@pool_backend_2',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_2',
-                           'storage_protocol': 'FC'}}
+        host_test_data = {
+            'host': 'ubuntu-server12@pool_backend_2#unit_test_pool',
+            'capabilities':
+                {'location_info': 'unit_test_pool|FNM00124500890',
+                 'volume_backend_name': 'pool_backend_2',
+                 'storage_protocol': 'FC'}}
 
         commands = [self.testData.NDU_LIST_CMD,
                     self.testData.SNAP_LIST_CMD(),
@@ -2518,14 +2509,15 @@ Time Remaining:  0 second(s)
                                              'unit_test_pool'},
                          'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
 
-        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_1',
-                           'storage_protocol': 'iSCSI'}}
+        host_test_data = {
+            'host': 'ubuntu-server12@pool_backend_1#unit_test_pool',
+            'capabilities':
+                {'location_info': 'unit_test_pool|FNM00124500890',
+                 'volume_backend_name': 'pool_backend_1',
+                 'storage_protocol': 'iSCSI'}}
 
         commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD(poll=False)]
+                    self.testData.SNAP_LIST_CMD()]
         results = [self.testData.NDU_LIST_RESULT,
                    ('Has snap', 0)]
         self.driverSetup(commands, results)
@@ -2570,11 +2562,12 @@ Time Remaining:  0 second(s)
                                              'thin'},
                          'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
 
-        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_1',
-                           'storage_protocol': 'iSCSI'}}
+        host_test_data = {
+            'host': 'ubuntu-server12@pool_backend_1#unit_test_pool',
+            'capabilities':
+                {'location_info': 'unit_test_pool|FNM00124500890',
+                 'volume_backend_name': 'pool_backend_1',
+                 'storage_protocol': 'iSCSI'}}
 
         commands = [self.testData.NDU_LIST_CMD]
         results = [self.testData.NDU_LIST_RESULT]
@@ -2597,15 +2590,14 @@ Time Remaining:  0 second(s)
         mock.Mock(return_value={'fast_cache_enabled': 'True'}))
     def test_create_volume_with_fastcache(self):
         '''enable fastcache when creating volume.'''
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
                     self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
-                    self.testData.NDU_LIST_CMD,
-                    self.testData.CHECK_FASTCACHE_CMD(
-                        self.testData.test_pool_name)]
-        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                    ]
+        results = [self.testData.NDU_LIST_RESULT,
+                   self.testData.POOL_PROPERTY_W_FASTCACHE,
                    self.testData.LUN_PROPERTY('vol_with_type', True),
-                   SUCCEED,
-                   ('FAST Cache:  Enabled', 0)]
+                   ]
         fake_cli = self.driverSetup(commands, results)
 
         lun_info = {'lun_name': "vol_with_type",
@@ -2628,17 +2620,20 @@ Time Remaining:  0 second(s)
         cli_helper.command_execute = fake_cli
         cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
         cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
-        cli_helper.get_pool = mock.Mock(return_value={'lun_nums': 1000,
-                                                      'total_capacity_gb': 10,
-                                                      'free_capacity_gb': 5})
+        cli_helper.get_pool = mock.Mock(return_value={
+            'lun_nums': 1000,
+            'total_capacity_gb': 10,
+            'free_capacity_gb': 5,
+            'pool_name': "unit_test_pool",
+            'fast_cache_enabled': 'True'})
+
         self.driver.update_volume_stats()
         self.driver.create_volume(self.testData.test_volume_with_type)
-        self.assertEqual(self.driver.cli.stats['fast_cache_enabled'], 'True')
+        pool_stats = self.driver.cli.stats['pools'][0]
+        self.assertEqual('True', pool_stats['fast_cache_enabled'])
         expect_cmd = [
             mock.call('connection', '-getport', '-address', '-vlanid',
                       poll=False),
-            mock.call('storagepool', '-list', '-name',
-                      'Pool_02_SASFLASH', '-fastcache', poll=False),
             mock.call('-np', 'lun', '-create', '-capacity',
                       1, '-sq', 'gb', '-poolName', 'Pool_02_SASFLASH',
                       '-name', 'vol_with_type', '-type', 'NonThin')
@@ -2655,13 +2650,13 @@ Time Remaining:  0 second(s)
             'volume_name': 'vol_01',
             'id': '1',
             'name_id': '1',
-            'provider_location': 'system^FNM11111|type^lun|lun_id^1',
+            'provider_location': 'system^FNM11111|type^lun|id^4',
             'project_id': 'project',
             'display_name': 'vol_01',
             'display_description': 'test volume',
             'volume_type_id': None,
             'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]}
-        self.assertEqual(self.driver.cli.get_lun_id(volume_01), 1)
+        self.assertEqual(4, self.driver.cli.get_lun_id(volume_01))
 
     @mock.patch(
         "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
@@ -2681,7 +2676,7 @@ Time Remaining:  0 second(s)
             'display_description': 'test volume',
             'volume_type_id': None,
             'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]}
-        self.assertEqual(self.driver.cli.get_lun_id(volume_02), 2)
+        self.assertEqual(2, self.driver.cli.get_lun_id(volume_02))
 
     def test_create_consistency_group(self):
         cg_name = self.testData.test_cg['id']
@@ -2889,147 +2884,267 @@ Time Remaining:  0 second(s)
                       '-o')]
         fake_cli.assert_has_calls(expect_cmd)
 
-    def succeed_fake_command_execute(self, *command, **kwargv):
-        return SUCCEED
-
-    def fake_setup_command_execute(self, *command, **kwargv):
-        return self.testData.ALL_PORTS
-
-    def fake_get_pool_properties(self, filter_option, properties=None):
-        pool_info = {'pool_name': "unit_test_pool0",
-                     'total_capacity_gb': 1000.0,
-                     'free_capacity_gb': 1000.0
-                     }
-        return pool_info
-
-    def fake_get_lun_properties(self, filter_option, properties=None):
-        lun_info = {'lun_name': "vol1",
-                    'lun_id': 1,
-                    'pool': "unit_test_pool",
-                    'attached_snapshot': "N/A",
-                    'owner': "A",
-                    'total_capacity_gb': 1.0,
-                    'state': "Ready"}
-        return lun_info
 
-    def fake_safe_get(self, value):
-        if value == "storage_vnx_pool_name":
-            return "unit_test_pool"
-        elif 'volume_backend_name' == value:
-            return "namedbackend"
-        else:
-            return None
+class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
+    def setUp(self):
+        super(EMCVNXCLIDArrayBasedDriverTestCase, self).setUp()
+        self.configuration.safe_get = self.fake_safe_get(
+            {'storage_vnx_pool_name': None,
+             'volume_backend_name': 'namedbackend'})
 
+    def generateDriver(self, conf):
+        driver = EMCCLIISCSIDriver(configuration=conf)
+        self.assertTrue(isinstance(driver.cli,
+                                   emc_vnx_cli.EMCVnxCliArray))
+        return driver
 
-class EMCVNXCLIDriverFCTestCase(test.TestCase):
+    def test_get_volume_stats(self):
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_GET_ALL_CMD(True)]
+        results = [self.testData.NDU_LIST_RESULT,
+                   self.testData.POOL_GET_ALL_RESULT(True)]
+        self.driverSetup(commands, results)
+        stats = self.driver.get_volume_stats(True)
 
-    def setUp(self):
-        super(EMCVNXCLIDriverFCTestCase, self).setUp()
+        self.assertTrue(stats['driver_version'] == VERSION,
+                        "driver_version is incorrect")
+        self.assertTrue(
+            stats['storage_protocol'] == 'iSCSI',
+            "storage_protocol is not correct")
+        self.assertTrue(
+            stats['vendor_name'] == "EMC",
+            "vendor name is not correct")
+        self.assertTrue(
+            stats['volume_backend_name'] == "namedbackend",
+            "volume backend name is not correct")
 
-        self.stubs.Set(CommandLineHelper, 'command_execute',
-                       self.fake_setup_command_execute)
-        self.stubs.Set(CommandLineHelper, 'get_array_serial',
-                       mock.Mock(return_value={'array_serial':
-                                               "fakeSerial"}))
-        self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
+        self.assertEqual(2, len(stats['pools']))
+        pool_stats1 = stats['pools'][0]
+        expected_pool_stats1 = {
+            'free_capacity_gb': 3105.303,
+            'reserved_percentage': 2,
+            'location_info': 'unit_test_pool1|fakeSerial',
+            'total_capacity_gb': 3281.146,
+            'compression_support': 'True',
+            'deduplication_support': 'True',
+            'thinprovisioning_support': 'True',
+            'consistencygroup_support': 'True',
+            'pool_name': 'unit_test_pool1',
+            'fast_cache_enabled': 'True',
+            'fast_support': 'True'}
+        self.assertEqual(expected_pool_stats1, pool_stats1)
+
+        pool_stats2 = stats['pools'][1]
+        expected_pool_stats2 = {
+            'free_capacity_gb': 3984.768,
+            'reserved_percentage': 2,
+            'location_info': 'unit test pool 2|fakeSerial',
+            'total_capacity_gb': 4099.992,
+            'compression_support': 'True',
+            'deduplication_support': 'True',
+            'thinprovisioning_support': 'True',
+            'consistencygroup_support': 'True',
+            'pool_name': 'unit test pool 2',
+            'fast_cache_enabled': 'False',
+            'fast_support': 'True'}
+        self.assertEqual(expected_pool_stats2, pool_stats2)
+
+    def test_get_volume_stats_wo_fastcache(self):
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_GET_ALL_CMD(False)]
+        results = [self.testData.NDU_LIST_RESULT_WO_LICENSE,
+                   self.testData.POOL_GET_ALL_RESULT(False)]
+        self.driverSetup(commands, results)
 
-        self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
-        self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
-        self.stubs.Set(emc_vnx_cli, 'INTERVAL_60_SEC', 0.01)
+        stats = self.driver.get_volume_stats(True)
 
-        self.configuration = conf.Configuration(None)
-        self.configuration.append_config_values = mock.Mock(return_value=0)
-        self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
-        self.configuration.san_ip = '10.0.0.1'
-        self.configuration.storage_vnx_pool_name = 'unit_test_pool'
-        self.configuration.san_login = 'sysadmin'
-        self.configuration.san_password = 'sysadmin'
-        #set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
-        self.configuration.default_timeout = 0.0002
-        self.configuration.initiator_auto_registration = True
-        self.configuration.check_max_pool_luns_threshold = False
-        self.configuration.zoning_mode = None
-        self.configuration.max_luns_per_storage_pool = 4000
-        self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
-        self.testData = EMCVNXCLIDriverTestData()
-        self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
-            '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
+        self.assertEqual(2, len(stats['pools']))
+        pool_stats1 = stats['pools'][0]
+        expected_pool_stats1 = {
+            'free_capacity_gb': 3105.303,
+            'reserved_percentage': 2,
+            'location_info': 'unit_test_pool1|fakeSerial',
+            'total_capacity_gb': 3281.146,
+            'compression_support': 'False',
+            'deduplication_support': 'False',
+            'thinprovisioning_support': 'False',
+            'consistencygroup_support': 'False',
+            'pool_name': 'unit_test_pool1',
+            'fast_cache_enabled': 'False',
+            'fast_support': 'False'}
+        self.assertEqual(expected_pool_stats1, pool_stats1)
+
+        pool_stats2 = stats['pools'][1]
+        expected_pool_stats2 = {
+            'free_capacity_gb': 3984.768,
+            'reserved_percentage': 2,
+            'location_info': 'unit test pool 2|fakeSerial',
+            'total_capacity_gb': 4099.992,
+            'compression_support': 'False',
+            'deduplication_support': 'False',
+            'thinprovisioning_support': 'False',
+            'consistencygroup_support': 'False',
+            'pool_name': 'unit test pool 2',
+            'fast_cache_enabled': 'False',
+            'fast_support': 'False'}
+        self.assertEqual(expected_pool_stats2, pool_stats2)
 
-    def driverSetup(self, commands=tuple(), results=tuple()):
-        self.driver = EMCCLIFCDriver(configuration=self.configuration)
-        fake_command_execute = self.get_command_execute_simulator(
-            commands, results)
-        fake_cli = mock.Mock(side_effect=fake_command_execute)
-        self.driver.cli._client.command_execute = fake_cli
-        return fake_cli
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
+    def test_create_volume_deduplicated(self):
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type')]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True)]
 
-    def get_command_execute_simulator(self, commands=tuple(),
-                                      results=tuple()):
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        # Case
+        self.driver.create_volume(self.testData.test_volume_with_type)
 
-        assert(len(commands) == len(results))
+        # Verification
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol_with_type', 1,
+                'unit_test_pool',
+                'deduplicated', None, False)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                      poll=False)]
+        fake_cli.assert_has_calls(expect_cmd)
 
-        def fake_command_execute(*args, **kwargv):
-            for i in range(len(commands)):
-                if args == commands[i]:
-                    if isinstance(results[i], list):
-                        if len(results[i]) > 0:
-                            ret = results[i][0]
-                            del results[i][0]
-                            return ret
-                    else:
-                        return results[i]
-            return self.standard_fake_command_execute(*args, **kwargv)
-        return fake_command_execute
+    def test_get_pool(self):
+        testVolume = self.testData.test_volume_with_type
+        commands = [self.testData.LUN_PROPERTY_POOL_CMD(testVolume['name'])]
+        results = [self.testData.LUN_PROPERTY(testVolume['name'], False)]
+        fake_cli = self.driverSetup(commands, results)
+        pool = self.driver.get_pool(testVolume)
+        self.assertEqual('Pool_02_SASFLASH', pool)
+        fake_cli.assert_has_calls(
+            [mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
+                testVolume['name']), poll=False)])
+
+    def test_get_target_pool_for_cloned_volme(self):
+        testSrcVolume = self.testData.test_volume
+        testNewVolume = self.testData.test_volume2
+        fake_cli = self.driverSetup()
+        pool = self.driver.cli.get_target_storagepool(testNewVolume,
+                                                      testSrcVolume)
+        self.assertEqual('unit_test_pool', pool)
+        self.assertFalse(fake_cli.called)
+
+    def test_get_target_pool_for_clone_legacy_volme(self):
+        testSrcVolume = self.testData.test_legacy_volume
+        testNewVolume = self.testData.test_volume2
+        commands = [self.testData.LUN_PROPERTY_POOL_CMD(testSrcVolume['name'])]
+        results = [self.testData.LUN_PROPERTY(testSrcVolume['name'], False)]
+        fake_cli = self.driverSetup(commands, results)
+        pool = self.driver.cli.get_target_storagepool(testNewVolume,
+                                                      testSrcVolume)
+        self.assertEqual('Pool_02_SASFLASH', pool)
+        fake_cli.assert_has_calls(
+            [mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
+                testSrcVolume['name']), poll=False)])
+
+    def test_manage_existing_get_size(self):
+        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+                       '-state', '-userCap', '-owner',
+                       '-attachedSnapshot', '-poolName')
+        test_size = 2
+        commands = [get_lun_cmd]
+        results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
+        fake_cli = self.driverSetup(commands, results)
+        test_volume = self.testData.test_volume2.copy()
+        test_volume['host'] = "host@backendsec#Pool_02_SASFLASH"
+        get_size = self.driver.manage_existing_get_size(
+            test_volume,
+            self.testData.test_existing_ref)
+        expected = [mock.call(*get_lun_cmd, poll=True)]
+        self.assertEqual(test_size, get_size)
+        fake_cli.assert_has_calls(expected)
 
-    def standard_fake_command_execute(self, *args, **kwargv):
-        standard_commands = [
-            self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
-            self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
-            self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
-            self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
-            self.testData.POOL_PROPERTY_CMD]
+    def test_manage_existing_get_size_incorrect_pool(self):
+        """Test manage_existing function of driver with an invalid pool."""
 
-        standard_results = [
-            self.testData.LUN_PROPERTY('vol1'),
-            self.testData.LUN_PROPERTY('vol2'),
-            self.testData.LUN_PROPERTY('vol-vol1'),
-            self.testData.LUN_PROPERTY('snapshot1'),
-            self.testData.POOL_PROPERTY]
+        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
+                       '-state', '-userCap', '-owner',
+                       '-attachedSnapshot', '-poolName')
+        commands = [get_lun_cmd]
+        results = [self.testData.LUN_PROPERTY('lun_name')]
+        fake_cli = self.driverSetup(commands, results)
+        test_volume = self.testData.test_volume2.copy()
+        test_volume['host'] = "host@backendsec#fake_pool"
+        ex = self.assertRaises(
+            exception.ManageExistingInvalidReference,
+            self.driver.manage_existing_get_size,
+            self.testData.test_volume_with_type,
+            self.testData.test_existing_ref)
+        self.assertTrue(
+            re.match(r'.*not managed by the host',
+                     ex.msg))
+        expected = [mock.call(*get_lun_cmd, poll=True)]
+        fake_cli.assert_has_calls(expected)
 
-        standard_default = SUCCEED
-        for i in range(len(standard_commands)):
-            if args == standard_commands[i]:
-                return standard_results[i]
+    def test_manage_existing(self):
+        lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
+                          '-newName', 'vol_with_type', '-o')
+        commands = [lun_rename_cmd]
+        results = [SUCCEED]
+        fake_cli = self.driverSetup(commands, results)
+        self.driver.manage_existing(
+            self.testData.test_volume_with_type,
+            self.testData.test_existing_ref)
+        expected = [mock.call(*lun_rename_cmd, poll=False)]
+        fake_cli.assert_has_calls(expected)
 
-        return standard_default
+    @mock.patch(
+        "eventlet.event.Event.wait",
+        mock.Mock(return_value=None))
+    @mock.patch(
+        "cinder.volume.volume_types."
+        "get_volume_type_extra_specs",
+        mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
+                                'storagetype:pool': 'unit_test_pool'}))
+    def test_create_compression_volume(self):
+        commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
+                    self.testData.NDU_LIST_CMD]
+        results = [self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.LUN_PROPERTY('vol_with_type', True),
+                   self.testData.NDU_LIST_RESULT]
 
-    def fake_setup_command_execute(self, *command, **kwargv):
-        return self.testData.ALL_PORTS
+        fake_cli = self.driverSetup(commands, results)
 
-    def fake_get_pool_properties(self, filter_option, properties=None):
-        pool_info = {'pool_name': "unit_test_pool0",
-                     'total_capacity_gb': 1000.0,
-                     'free_capacity_gb': 1000.0
-                     }
-        return pool_info
+        self.driver.cli.stats['compression_support'] = 'True'
+        self.driver.cli.enablers = ['-Compression',
+                                    '-Deduplication',
+                                    '-ThinProvisioning',
+                                    '-FAST']
+        # Case
+        self.driver.create_volume(self.testData.test_volume_with_type)
+        # Verification
+        expect_cmd = [
+            mock.call(*self.testData.LUN_CREATION_CMD(
+                'vol_with_type', 1,
+                'unit_test_pool',
+                'compressed', None, False)),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                'vol_with_type'), poll=False),
+            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
+                'vol_with_type'), poll=True),
+            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
+                1))]
+        fake_cli.assert_has_calls(expect_cmd)
 
-    def fake_get_lun_properties(self, filter_option, properties=None):
-        lun_info = {'lun_name': "vol1",
-                    'lun_id': 1,
-                    'pool': "unit_test_pool",
-                    'attached_snapshot': "N/A",
-                    'owner': "A",
-                    'total_capacity_gb': 1.0,
-                    'state': "Ready"}
-        return lun_info
 
-    def fake_safe_get(self, value):
-        if value == "storage_vnx_pool_name":
-            return "unit_test_pool"
-        elif 'volume_backend_name' == value:
-            return "namedbackend"
-        else:
-            return None
+class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
+    def generateDriver(self, conf):
+        return EMCCLIFCDriver(configuration=conf)
 
     @mock.patch(
         "oslo_concurrency.processutils.execute",
@@ -3137,10 +3252,10 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
             self.testData.test_volume,
             self.testData.connector)
 
-        self.assertEqual(conn_info['data']['initiator_target_map'],
-                         EMCVNXCLIDriverTestData.i_t_map)
-        self.assertEqual(conn_info['data']['target_wwn'],
-                         ['1122334455667777'])
+        self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
+                         conn_info['data']['initiator_target_map'])
+        self.assertEqual(['1122334455667777'],
+                         conn_info['data']['target_wwn'])
         expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost',
                               poll=False),
                     mock.call('storagegroup', '-create', '-gname', 'fakehost'),
@@ -3202,51 +3317,61 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
             self.testData.connector)
         self.assertTrue('initiator_target_map' in connection_info['data'],
                         'initiator_target_map should be populated.')
-        self.assertEqual(connection_info['data']['initiator_target_map'],
-                         EMCVNXCLIDriverTestData.i_t_map)
+        self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
+                         connection_info['data']['initiator_target_map'])
 
     def test_get_volume_stats(self):
-        self.driverSetup()
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_PROPERTY_W_FASTCACHE_CMD]
+        results = [self.testData.NDU_LIST_RESULT,
+                   self.testData.POOL_PROPERTY_W_FASTCACHE]
+        self.driverSetup(commands, results)
         stats = self.driver.get_volume_stats(True)
-        self.assertTrue(stats['driver_version'] is not None,
-                        "driver_version is not returned")
-        self.assertTrue(
-            stats['free_capacity_gb'] == 3257.851,
-            "free_capacity_gb is not correct")
-        self.assertTrue(
-            stats['reserved_percentage'] == 3,
-            "reserved_percentage is not correct")
+
+        self.assertTrue(stats['driver_version'] == VERSION,
+                        "driver_version is incorrect")
         self.assertTrue(
             stats['storage_protocol'] == 'FC',
-            "storage_protocol is not correct")
-        self.assertTrue(
-            stats['total_capacity_gb'] == 3281.146,
-            "total_capacity_gb is not correct")
+            "storage_protocol is incorrect")
         self.assertTrue(
             stats['vendor_name'] == "EMC",
-            "vender name is not correct")
+            "vendor name is incorrect")
         self.assertTrue(
             stats['volume_backend_name'] == "namedbackend",
-            "volume backend name is not correct")
-        self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
-        self.assertTrue(
-            stats['driver_version'] == "05.01.00",
-            "driver version is incorrect.")
+            "volume backend name is incorrect")
+
+        pool_stats = stats['pools'][0]
+
+        expected_pool_stats = {
+            'free_capacity_gb': 3257.851,
+            'reserved_percentage': 3,
+            'location_info': 'unit_test_pool|fakeSerial',
+            'total_capacity_gb': 3281.146,
+            'compression_support': 'True',
+            'deduplication_support': 'True',
+            'thinprovisioning_support': 'True',
+            'consistencygroup_support': 'True',
+            'pool_name': 'unit_test_pool',
+            'fast_cache_enabled': 'True',
+            'fast_support': 'True'}
+
+        self.assertEqual(expected_pool_stats, pool_stats)
 
     def test_get_volume_stats_too_many_luns(self):
-        commands = [self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
-        results = [self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
+        commands = [self.testData.NDU_LIST_CMD,
+                    self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
+                    self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
+        results = [self.testData.NDU_LIST_RESULT,
+                   self.testData.POOL_PROPERTY_W_FASTCACHE,
+                   self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
         fake_cli = self.driverSetup(commands, results)
 
         self.driver.cli.check_max_pool_luns_threshold = True
         stats = self.driver.get_volume_stats(True)
+        pool_stats = stats['pools'][0]
         self.assertTrue(
-            stats['free_capacity_gb'] == 0,
-            "free_capacity_gb is not correct")
-        expect_cmd = [
-            mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
-                      poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
+            pool_stats['free_capacity_gb'] == 0,
+            "free_capacity_gb is incorrect")
         expect_cmd = [
             mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
                       poll=False)]
@@ -3254,11 +3379,12 @@ class EMCVNXCLIDriverFCTestCase(test.TestCase):
 
         self.driver.cli.check_max_pool_luns_threshold = False
         stats = self.driver.get_volume_stats(True)
+        pool_stats = stats['pools'][0]
         self.assertTrue(stats['driver_version'] is not None,
-                        "driver_version is not returned")
+                        "driver_version is incorrect")
         self.assertTrue(
-            stats['free_capacity_gb'] == 3257.851,
-            "free_capacity_gb is not correct")
+            pool_stats['free_capacity_gb'] == 3257.851,
+            "free_capacity_gb is incorrect")
 
     def test_deregister_initiator(self):
         fake_cli = self.driverSetup()
@@ -3330,7 +3456,7 @@ class EMCVNXCLIToggleSPTestCase(test.TestCase):
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
             self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.10")
+            self.assertEqual("10.10.10.10", self.cli_client.active_storage_ip)
             expected = [
                 mock.call(*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
                           + FAKE_COMMAND), check_exit_code=True)]
@@ -3350,7 +3476,7 @@ Message : HTTP/1.1 503 Service Unavailable"""
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
             self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
             expected = [
                 mock.call(
                     *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
@@ -3376,7 +3502,7 @@ Message : End of data stream"""
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
             self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
             expected = [
                 mock.call(
                     *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
@@ -3404,7 +3530,7 @@ Unable to establish a secure connection to the Management Server.
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
             self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
             expected = [
                 mock.call(
                     *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
@@ -3430,7 +3556,7 @@ Message : Error occurred because of time out"""
         with mock.patch('cinder.utils.execute') as mock_utils:
             mock_utils.side_effect = SIDE_EFFECTS
             self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
+            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
             expected = [
                 mock.call(
                     *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
index e1906583265ffd1766e9eee17611f418c49cc2fd..2929e9d783ba26902d88ada97c50b8d1504e1451 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
+# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -52,6 +52,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
                 Force Deleting LUN in Storage Groups,
                 robust enhancement
         5.1.0 - iSCSI multipath enhancement
+        5.2.0 - Pool-aware scheduler support
     """
 
     def __init__(self, *args, **kwargs):
@@ -234,3 +235,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
     def delete_cgsnapshot(self, context, cgsnapshot):
         """Deletes a cgsnapshot."""
         return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
+
+    def get_pool(self, volume):
+        """Returns the pool name of a volume."""
+        return self.cli.get_pool(volume)
index cc0b9f78ee8535c6df81a7fcd19a9c43f8579814..6049bd26975489edc011fa5234fc9e0f3df239da 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
+# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -49,6 +49,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
                 Force Deleting LUN in Storage Groups,
                 robust enhancement
         5.1.0 - iSCSI multipath enhancement
+        5.2.0 - Pool-aware scheduler support
     """
 
     def __init__(self, *args, **kwargs):
@@ -212,3 +213,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
     def delete_cgsnapshot(self, context, cgsnapshot):
         """Deletes a cgsnapshot."""
         return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
+
+    def get_pool(self, volume):
+        """Returns the pool name of a volume."""
+        return self.cli.get_pool(volume)
index fe2d40e7b416392f9ce94fec688a0482cdf49a41..ff3d1f2b44c8a5b45d38294c831e3bb84657b2da 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
+# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
 # All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -44,6 +44,7 @@ from cinder import utils
 from cinder.volume.configuration import Configuration
 from cinder.volume.drivers.san import san
 from cinder.volume import manager
+from cinder.volume import utils as vol_utils
 from cinder.volume import volume_types
 
 CONF = cfg.CONF
@@ -225,6 +226,11 @@ class CommandLineHelper(object):
         'Available Capacity *\(GBs\) *:\s*(.*)\s*',
         'free_capacity_gb',
         float)
+    POOL_FAST_CACHE = PropertyDescriptor(
+        '-fastcache',
+        'FAST Cache:\s*(.*)\s*',
+        'fast_cache_enabled',
+        lambda value: 'True' if value == 'Enabled' else 'False')
     POOL_NAME = PropertyDescriptor(
         '-name',
         'Pool Name:\s*(.*)\s*',
@@ -321,7 +327,6 @@ class CommandLineHelper(object):
             LOG.info(_LI("iscsi_initiators: %s"), self.iscsi_initiator_map)
 
         # extra spec constants
-        self.pool_spec = 'storagetype:pool'
         self.tiering_spec = 'storagetype:tiering'
         self.provisioning_spec = 'storagetype:provisioning'
         self.provisioning_values = {
@@ -1037,8 +1042,9 @@ class CommandLineHelper(object):
                                        properties, poll=poll)
         return data
 
-    def get_pool(self, name, poll=True):
+    def get_pool(self, name, properties=POOL_ALL, poll=True):
         data = self.get_pool_properties(('-name', name),
+                                        properties=properties,
                                         poll=poll)
         return data
 
@@ -1125,27 +1131,29 @@ class CommandLineHelper(object):
         else:
             return False
 
-    def get_pool_list(self, poll=True):
+    def get_pool_list(self, properties=POOL_ALL, poll=True):
         temp_cache = []
-        cmd = ('storagepool', '-list', '-availableCap', '-state')
-        out, rc = self.command_execute(*cmd, poll=poll)
+        list_cmd = ('storagepool', '-list')
+        for prop in properties:
+            list_cmd += (prop.option,)
+        output_properties = [self.POOL_NAME] + properties
+        out, rc = self.command_execute(*list_cmd, poll=poll)
         if rc != 0:
-            self._raise_cli_error(cmd, rc, out)
+            self._raise_cli_error(list_cmd, rc, out)
 
         try:
             for pool in out.split('\n\n'):
                 if len(pool.strip()) == 0:
                     continue
                 obj = {}
-                obj['name'] = self._get_property_value(pool, self.POOL_NAME)
-                obj['free_space'] = self._get_property_value(
-                    pool, self.POOL_FREE_CAPACITY)
+                for prop in output_properties:
+                    obj[prop.key] = self._get_property_value(pool, prop)
                 temp_cache.append(obj)
         except Exception as ex:
             LOG.error(_LE("Error happened during storage pool querying, %s."),
                       ex)
             # NOTE: Do not want to continue raise the exception
-            # as the pools may temporarly unavailable
+            # as the pools may be temporarily unavailable
             pass
         return temp_cache
 
@@ -1549,12 +1557,9 @@ class CommandLineHelper(object):
 class EMCVnxCliBase(object):
     """This class defines the functions to use the native CLI functionality."""
 
-    VERSION = '05.01.00'
+    VERSION = '05.02.00'
     stats = {'driver_version': VERSION,
-             'free_capacity_gb': 'unknown',
-             'reserved_percentage': 0,
              'storage_protocol': None,
-             'total_capacity_gb': 'unknown',
              'vendor_name': 'EMC',
              'volume_backend_name': None,
              'compression_support': 'False',
@@ -1601,12 +1606,9 @@ class EMCVnxCliBase(object):
         if self.force_delete_lun_in_sg:
             LOG.warning(_LW("force_delete_lun_in_storagegroup=True"))
 
-    def get_target_storagepool(self, volume, source_volume_name=None):
+    def get_target_storagepool(self, volume, source_volume=None):
         raise NotImplementedError
 
-    def dumps_provider_location(self, pl_dict):
-        return '|'.join([k + '^' + pl_dict[k] for k in pl_dict])
-
     def get_array_serial(self):
         if not self.array_serial:
             self.array_serial = self._client.get_array_serial()
@@ -1622,7 +1624,7 @@ class EMCVnxCliBase(object):
             volume_size = snapshot['volume_size']
             dest_volume_name = volume_name + '_dest'
 
-            pool_name = self.get_target_storagepool(volume, source_volume_name)
+            pool_name = self.get_target_storagepool(volume, snapshot['volume'])
             specs = self.get_volumetype_extraspecs(volume)
             provisioning, tiering = self._get_extra_spec_value(specs)
             store_spec = {
@@ -1665,24 +1667,24 @@ class EMCVnxCliBase(object):
         data = self._client.create_lun_with_advance_feature(
             pool, volume_name, volume_size,
             provisioning, tiering, volume['consistencygroup_id'], False)
-        pl_dict = {'system': self.get_array_serial(),
-                   'type': 'lun',
-                   'id': str(data['lun_id'])}
         model_update = {'provider_location':
-                        self.dumps_provider_location(pl_dict)}
-        volume['provider_location'] = model_update['provider_location']
+                        self._build_provider_location_for_lun(data['lun_id'])}
+
         return model_update
 
     def _volume_creation_check(self, volume):
-        """This function will perform the check on the
-        extra spec before the volume can be created. The
-        check is a common check between the array based
-        and pool based backend.
-        """
-
+        """Checks on extra spec before the volume can be created."""
         specs = self.get_volumetype_extraspecs(volume)
-        provisioning, tiering = self._get_extra_spec_value(specs)
+        self._get_and_validate_extra_specs(specs)
+
+    def _get_and_validate_extra_specs(self, specs):
+        """Checks on extra specs combinations."""
+        if "storagetype:pool" in specs:
+            LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted "
+                            "since driver version 5.1.0. This key will be "
+                            "ignored."))
 
+        provisioning, tiering = self._get_extra_spec_value(specs)
         # step 1: check extra spec value
         if provisioning:
             self._check_extra_spec_value(
@@ -1694,7 +1696,8 @@ class EMCVnxCliBase(object):
                 self._client.tiering_values.keys())
 
         # step 2: check extra spec combination
-        self._check_extra_spec_combination(specs)
+        self._check_extra_spec_combination(provisioning, tiering)
+        return provisioning, tiering
 
     def _check_extra_spec_value(self, extra_spec, valid_values):
         """Checks whether an extra spec's value is valid."""
@@ -1719,12 +1722,9 @@ class EMCVnxCliBase(object):
 
         return provisioning, tiering
 
-    def _check_extra_spec_combination(self, extra_specs):
+    def _check_extra_spec_combination(self, provisioning, tiering):
         """Checks whether extra spec combination is valid."""
-
-        provisioning, tiering = self._get_extra_spec_value(extra_specs)
         enablers = self.enablers
-
         # check provisioning and tiering
         # deduplicated and tiering can not be both enabled
         if provisioning == 'deduplicated' and tiering is not None:
@@ -1812,23 +1812,6 @@ class EMCVnxCliBase(object):
                             "target_array_serial."))
             return false_ret
 
-        if len(target_pool_name) == 0:
-            # if retype, try to get the pool of the volume
-            # when it's array-based
-            if new_type:
-                if 'storagetype:pool' in new_type['extra_specs']\
-                        and new_type['extra_specs']['storagetype:pool']\
-                        is not None:
-                    target_pool_name = \
-                        new_type['extra_specs']['storagetype:pool']
-                else:
-                    target_pool_name = self._client.get_pool_name_of_lun(
-                        volume['name'])
-
-        if len(target_pool_name) == 0:
-            LOG.debug("Skip storage-assisted migration because "
-                      "it doesn't support array backend .")
-            return false_ret
         # source and destination should be on same array
         array_serial = self.get_array_serial()
         if target_array_serial != array_serial:
@@ -1836,7 +1819,17 @@ class EMCVnxCliBase(object):
                       'target and source backend are not managing'
                       'the same array.')
             return false_ret
-        # same protocol should be used if volume is in-use
+
+        if len(target_pool_name) == 0:
+            # Destination host is using a legacy driver
+            LOG.warning(_LW("Didn't get the pool information of the "
+                            "host %(s). Storage assisted Migration is not "
+                            "supported. The host may be using a legacy "
+                            "driver."),
+                        host['name'])
+            return false_ret
+
+        # Same protocol should be used if volume is in-use
         if host['capabilities']['storage_protocol'] != self.protocol \
                 and self._get_original_status(volume) == 'in-use':
             LOG.debug('Skip storage-assisted migration because '
@@ -1889,25 +1882,15 @@ class EMCVnxCliBase(object):
 
     def retype(self, ctxt, volume, new_type, diff, host):
         new_specs = new_type['extra_specs']
-        new_provisioning, new_tiering = self._get_extra_spec_value(
-            new_specs)
 
-        # validate new_type
-        if new_provisioning:
-            self._check_extra_spec_value(
-                new_provisioning,
-                self._client.provisioning_values.keys())
-        if new_tiering:
-            self._check_extra_spec_value(
-                new_tiering,
-                self._client.tiering_values.keys())
-        self._check_extra_spec_combination(new_specs)
+        new_provisioning, new_tiering = (
+            self._get_and_validate_extra_specs(new_specs))
 
-        # check what changes are needed
+        # Check what changes are needed
         migration, tiering_change = self.determine_changes_when_retype(
             volume, new_type, host)
 
-        # reject if volume has snapshot when migration is needed
+        # Reject if volume has snapshot when migration is needed
         if migration and self._client.check_lun_has_snap(
                 self.get_lun_id(volume)):
             LOG.debug('Driver is not able to do retype because the volume '
@@ -1915,7 +1898,7 @@ class EMCVnxCliBase(object):
             return False
 
         if migration:
-            # check whether the migration is valid
+            # Check whether the migration is valid
             is_valid, target_pool_name = (
                 self._is_valid_for_storage_assisted_migration(
                     volume, host, new_type))
@@ -1928,13 +1911,13 @@ class EMCVnxCliBase(object):
                                     'retype.'))
                     return False
             else:
-                # migration is invalid
+                # Migration is invalid
                 LOG.debug('Driver is not able to do retype due to '
                           'storage-assisted migration is not valid '
                           'in this situation.')
                 return False
-        elif not migration and tiering_change:
-            # modify lun to change tiering policy
+        elif tiering_change:
+            # Modify lun to change tiering policy
             self._client.modify_lun_tiering(volume['name'], new_tiering)
             return True
         else:
@@ -1947,22 +1930,14 @@ class EMCVnxCliBase(object):
         old_specs = self.get_volumetype_extraspecs(volume)
         old_provisioning, old_tiering = self._get_extra_spec_value(
             old_specs)
-        old_pool = self.get_specific_extra_spec(
-            old_specs,
-            self._client.pool_spec)
 
         new_specs = new_type['extra_specs']
         new_provisioning, new_tiering = self._get_extra_spec_value(
             new_specs)
-        new_pool = self.get_specific_extra_spec(
-            new_specs,
-            self._client.pool_spec)
 
         if volume['host'] != host['host'] or \
                 old_provisioning != new_provisioning:
             migration = True
-        elif new_pool and new_pool != old_pool:
-            migration = True
 
         if new_tiering != old_tiering:
             tiering_change = True
@@ -1982,36 +1957,73 @@ class EMCVnxCliBase(object):
                 return False
         return True
 
+    def _build_pool_stats(self, pool):
+        pool_stats = {}
+        pool_stats['pool_name'] = pool['pool_name']
+        pool_stats['total_capacity_gb'] = pool['total_capacity_gb']
+        pool_stats['reserved_percentage'] = 0
+        pool_stats['free_capacity_gb'] = pool['free_capacity_gb']
+        # Some extra capacity will be used by meta data of pool LUNs.
+        # The overhead is about LUN_Capacity * 0.02 + 3 GB
+        # reserved_percentage will be used to make sure the scheduler
+        # takes the overhead into consideration.
+        # Assume that all the remaining capacity is to be used to create
+        # a thick LUN, reserved_percentage is estimated as follows:
+        reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
+                     (1.02 * pool['total_capacity_gb'])) * 100)
+        pool_stats['reserved_percentage'] = int(math.ceil(min(reserved, 100)))
+        if self.check_max_pool_luns_threshold:
+            pool_feature = self._client.get_pool_feature_properties(poll=False)
+            if (pool_feature['max_pool_luns']
+                    <= pool_feature['total_pool_luns']):
+                LOG.warning(_LW("Maximum number of Pool LUNs, %s, "
+                                "have been created. "
+                                "No more LUN creation can be done."),
+                            pool_feature['max_pool_luns'])
+                pool_stats['free_capacity_gb'] = 0
+
+        array_serial = self.get_array_serial()
+        pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
+                                       {'pool_name': pool['pool_name'],
+                                        'array_serial': array_serial})
+        # Check if this pool's fast_cache is enabled
+        if 'fast_cache_enabled' not in pool:
+            pool_stats['fast_cache_enabled'] = 'False'
+        else:
+            pool_stats['fast_cache_enabled'] = pool['fast_cache_enabled']
+
+        # Copy advanced feature stats from backend stats
+        pool_stats['compression_support'] = self.stats['compression_support']
+        pool_stats['fast_support'] = self.stats['fast_support']
+        pool_stats['deduplication_support'] = (
+            self.stats['deduplication_support'])
+        pool_stats['thinprovisioning_support'] = (
+            self.stats['thinprovisioning_support'])
+        pool_stats['consistencygroup_support'] = (
+            self.stats['consistencygroup_support'])
+
+        return pool_stats
+
+    @log_enter_exit
     def update_volume_stats(self):
-        """Update the common status share with pool and
-        array backend.
-        """
+        """Gets the common stats shared by pool and array backend."""
         if not self.determine_all_enablers_exist(self.enablers):
             self.enablers = self._client.get_enablers_on_array()
-        if '-Compression' in self.enablers:
-            self.stats['compression_support'] = 'True'
-        else:
-            self.stats['compression_support'] = 'False'
-        if '-FAST' in self.enablers:
-            self.stats['fast_support'] = 'True'
-        else:
-            self.stats['fast_support'] = 'False'
-        if '-Deduplication' in self.enablers:
-            self.stats['deduplication_support'] = 'True'
-        else:
-            self.stats['deduplication_support'] = 'False'
-        if '-ThinProvisioning' in self.enablers:
-            self.stats['thinprovisioning_support'] = 'True'
-        else:
-            self.stats['thinprovisioning_support'] = 'False'
-        if '-FASTCache' in self.enablers:
-            self.stats['fast_cache_enabled'] = 'True'
-        else:
-            self.stats['fast_cache_enabled'] = 'False'
-        if '-VNXSnapshots' in self.enablers:
-            self.stats['consistencygroup_support'] = 'True'
-        else:
-            self.stats['consistencygroup_support'] = 'False'
+
+        self.stats['compression_support'] = (
+            'True' if '-Compression' in self.enablers else 'False')
+
+        self.stats['fast_support'] = (
+            'True' if '-FAST' in self.enablers else 'False')
+
+        self.stats['deduplication_support'] = (
+            'True' if '-Deduplication' in self.enablers else 'False')
+
+        self.stats['thinprovisioning_support'] = (
+            'True' if '-ThinProvisioning' in self.enablers else 'False')
+
+        self.stats['consistencygroup_support'] = (
+            'True' if '-VNXSnapshots' in self.enablers else 'False')
 
         if self.protocol == 'iSCSI':
             self.iscsi_targets = self._client.get_iscsi_targets(poll=False)
@@ -2051,7 +2063,6 @@ class EMCVnxCliBase(object):
         4. Start a migration between the SMP and the temp lun.
         """
         self._volume_creation_check(volume)
-        array_serial = self.get_array_serial()
         flow_name = 'create_volume_from_snapshot'
         work_flow = linear_flow.Flow(flow_name)
         store_spec = self._construct_store_spec(volume, snapshot)
@@ -2063,18 +2074,19 @@ class EMCVnxCliBase(object):
                                             store=store_spec)
         flow_engine.run()
         new_lun_id = flow_engine.storage.fetch('new_lun_id')
-        pl_dict = {'system': array_serial,
-                   'type': 'lun',
-                   'id': str(new_lun_id)}
         model_update = {'provider_location':
-                        self.dumps_provider_location(pl_dict)}
-        volume['provider_location'] = model_update['provider_location']
+                        self._build_provider_location_for_lun(new_lun_id)}
+        volume_host = volume['host']
+        host = vol_utils.extract_host(volume_host, 'backend')
+        host_and_pool = vol_utils.append_host(host, store_spec['pool_name'])
+        if volume_host != host_and_pool:
+            model_update['host'] = host_and_pool
+
         return model_update
 
     def create_cloned_volume(self, volume, src_vref):
         """Creates a clone of the specified volume."""
         self._volume_creation_check(volume)
-        array_serial = self.get_array_serial()
         source_volume_name = src_vref['name']
         source_lun_id = self.get_lun_id(src_vref)
         volume_size = src_vref['size']
@@ -2113,13 +2125,41 @@ class EMCVnxCliBase(object):
         else:
             self.delete_snapshot(snapshot)
 
-        pl_dict = {'system': array_serial,
-                   'type': 'lun',
-                   'id': str(new_lun_id)}
         model_update = {'provider_location':
-                        self.dumps_provider_location(pl_dict)}
+                        self._build_provider_location_for_lun(new_lun_id)}
+        volume_host = volume['host']
+        host = vol_utils.extract_host(volume_host, 'backend')
+        host_and_pool = vol_utils.append_host(host, store_spec['pool_name'])
+        if volume_host != host_and_pool:
+            model_update['host'] = host_and_pool
+
         return model_update
 
+    def dumps_provider_location(self, pl_dict):
+        return '|'.join([k + '^' + pl_dict[k] for k in pl_dict])
+
+    def _build_provider_location_for_lun(self, lun_id):
+        pl_dict = {'system': self.get_array_serial(),
+                   'type': 'lun',
+                   'id': six.text_type(lun_id),
+                   'version': self.VERSION}
+        return self.dumps_provider_location(pl_dict)
+
+    def _extract_provider_location_for_lun(self, provider_location, key='id'):
+        """Extacts value of the specified field from provider_location string.
+
+        :param provider_location: provider_location string
+        :param key: field name of the value that to be extracted
+        :return: value of the specified field if it exists, otherwise,
+                 None is returned
+        """
+
+        kvps = provider_location.split('|')
+        for kvp in kvps:
+            fields = kvp.split('^')
+            if len(fields) == 2 and fields[0] == key:
+                return fields[1]
+
     def create_consistencygroup(self, context, group):
         """Creates a consistency group."""
         LOG.info(_LI('Start to create consistency group: %(group_name)s '
@@ -2217,10 +2257,14 @@ class EMCVnxCliBase(object):
     def get_lun_id(self, volume):
         lun_id = None
         try:
-            if volume.get('provider_location') is not None:
-                lun_id = int(
-                    volume['provider_location'].split('|')[2].split('^')[1])
-            if not lun_id:
+            provider_location = volume.get('provider_location')
+            if provider_location:
+                lun_id = self._extract_provider_location_for_lun(
+                    provider_location,
+                    'id')
+            if lun_id:
+                lun_id = int(lun_id)
+            else:
                 LOG.debug('Lun id is not stored in provider location, '
                           'query it.')
                 lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
@@ -2707,7 +2751,7 @@ class EMCVnxCliBase(object):
         return do_terminate_connection()
 
     def manage_existing_get_size(self, volume, ref):
-        """Return size of volume to be managed by manage_existing."""
+        """Returns size of volume to be managed by manage_existing."""
 
         # Check that the reference is valid
         if 'id' not in ref:
@@ -2717,9 +2761,21 @@ class EMCVnxCliBase(object):
                 reason=reason)
 
         # Check for existence of the lun
-        data = self._client.get_lun_by_id(ref['id'])
+        data = self._client.get_lun_by_id(
+            ref['id'],
+            properties=self._client.LUN_WITH_POOL)
         if data is None:
-            reason = _('Find no lun with the specified lun_id.')
+            reason = _('Find no lun with the specified id %s.') % ref['id']
+            raise exception.ManageExistingInvalidReference(existing_ref=ref,
+                                                           reason=reason)
+
+        pool = self.get_target_storagepool(volume, None)
+        if pool and data['pool'] != pool:
+            reason = (_('The input lun %(lun_id)s is in pool %(poolname)s '
+                        'which is not managed by the host %(host)s.')
+                      % {'lun_id': ref['id'],
+                         'poolname': data['pool'],
+                         'host': volume['host']})
             raise exception.ManageExistingInvalidReference(existing_ref=ref,
                                                            reason=reason)
         return data['total_capacity_gb']
@@ -2737,6 +2793,10 @@ class EMCVnxCliBase(object):
         """
 
         self._client.lun_rename(ref['id'], volume['name'])
+        model_update = {'provider_location':
+                        self._build_provider_location_for_lun(ref['id'])}
+
+        return model_update
 
     def find_iscsi_protocol_endpoints(self, device_sp):
         """Returns the iSCSI initiators for a SP."""
@@ -2773,6 +2833,14 @@ class EMCVnxCliBase(object):
 
         return specs
 
+    def get_pool(self, volume):
+        """Returns the pool name of a volume."""
+
+        data = self._client.get_lun_by_name(volume['name'],
+                                            [self._client.LUN_POOL],
+                                            poll=False)
+        return data.get(self._client.LUN_POOL.key)
+
 
 @decorate_all_methods(log_enter_exit)
 class EMCVnxCliPool(EMCVnxCliBase):
@@ -2783,80 +2851,27 @@ class EMCVnxCliPool(EMCVnxCliBase):
         self._client.get_pool(self.storage_pool)
 
     def get_target_storagepool(self,
-                               volume=None,
-                               source_volume_name=None):
-        pool_spec_id = "storagetype:pool"
-        if volume is not None:
-            specs = self.get_volumetype_extraspecs(volume)
-            if specs and pool_spec_id in specs:
-                expect_pool = specs[pool_spec_id].strip()
-                if expect_pool != self.storage_pool:
-                    msg = _("Storage pool %s is not supported"
-                            " by this Cinder Volume") % expect_pool
-                    LOG.error(msg)
-                    raise exception.VolumeBackendAPIException(data=msg)
+                               volume,
+                               source_volume=None):
         return self.storage_pool
 
     def update_volume_stats(self):
         """Retrieves stats info."""
-        self.stats = super(EMCVnxCliPool, self).update_volume_stats()
-        pool = self._client.get_pool(self.get_target_storagepool(),
+        super(EMCVnxCliPool, self).update_volume_stats()
+        if '-FASTCache' in self.enablers:
+            properties = [self._client.POOL_FREE_CAPACITY,
+                          self._client.POOL_TOTAL_CAPACITY,
+                          self._client.POOL_FAST_CACHE]
+        else:
+            properties = [self._client.POOL_FREE_CAPACITY,
+                          self._client.POOL_TOTAL_CAPACITY]
+
+        pool = self._client.get_pool(self.storage_pool,
+                                     properties=properties,
                                      poll=False)
-        self.stats['total_capacity_gb'] = pool['total_capacity_gb']
-        self.stats['free_capacity_gb'] = pool['free_capacity_gb']
-        # Some extra capacity will be used by meta data of pool LUNs.
-        # The overhead is about LUN_Capacity * 0.02 + 3 GB
-        # reserved_percentage will be used to make sure the scheduler
-        # takes the overhead into consideration
-        # Assume that all the remaining capacity is to be used to create
-        # a thick LUN, reserved_percentage is estimated as follows:
-        reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
-                     (1.02 * pool['total_capacity_gb'])) * 100)
-        self.stats['reserved_percentage'] = int(math.ceil(min(reserved, 100)))
-        if self.check_max_pool_luns_threshold:
-            pool_feature = self._client.get_pool_feature_properties(poll=False)
-            if (pool_feature['max_pool_luns']
-                    <= pool_feature['total_pool_luns']):
-                LOG.warning(_LW("Maximum number of Pool LUNs, %s, "
-                                "have been created. "
-                                "No more LUN creation can be done."),
-                            pool_feature['max_pool_luns'])
-                self.stats['free_capacity_gb'] = 0
-        array_serial = self._client.get_array_serial()
-        self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
-                                       {'pool_name': self.storage_pool,
-                                        'array_serial':
-                                           array_serial['array_serial']})
-        # check if this pool's fast_cache is really enabled
-        if self.stats['fast_cache_enabled'] == 'True' and \
-           not self._client.is_pool_fastcache_enabled(self.storage_pool):
-            self.stats['fast_cache_enabled'] = 'False'
+        self.stats['pools'] = [self._build_pool_stats(pool)]
         return self.stats
 
-    def manage_existing_get_size(self, volume, ref):
-        """Returns size of volume to be managed by manage_existing."""
-
-        # Check that the reference is valid
-        if 'id' not in ref:
-            reason = _('Reference must contain lun_id element.')
-            raise exception.ManageExistingInvalidReference(
-                existing_ref=ref,
-                reason=reason)
-        # Check for existence of the lun
-        data = self._client.get_lun_by_id(
-            ref['id'],
-            properties=self._client.LUN_WITH_POOL)
-        if data is None:
-            reason = _('Cannot find the lun with LUN id %s.') % ref['id']
-            raise exception.ManageExistingInvalidReference(existing_ref=ref,
-                                                           reason=reason)
-        if data['pool'] != self.storage_pool:
-            reason = _('The input lun is not in a manageable pool backend '
-                       'by cinder')
-            raise exception.ManageExistingInvalidReference(existing_ref=ref,
-                                                           reason=reason)
-        return data['total_capacity_gb']
-
 
 @decorate_all_methods(log_enter_exit)
 class EMCVnxCliArray(EMCVnxCliBase):
@@ -2864,51 +2879,55 @@ class EMCVnxCliArray(EMCVnxCliBase):
     def __init__(self, prtcl, configuration):
         super(EMCVnxCliArray, self).__init__(prtcl,
                                              configuration=configuration)
-        self._update_pool_cache()
-
-    def _update_pool_cache(self):
-        LOG.debug("Updating Pool Cache")
-        self.pool_cache = self._client.get_pool_list(poll=False)
 
-    def get_target_storagepool(self, volume, source_volume_name=None):
-        """Find the storage pool for given volume."""
-        pool_spec_id = "storagetype:pool"
-        specs = self.get_volumetype_extraspecs(volume)
-        if specs and pool_spec_id in specs:
-            return specs[pool_spec_id]
-        elif source_volume_name:
-            data = self._client.get_lun_by_name(source_volume_name,
-                                                [self._client.LUN_POOL])
+    def get_target_storagepool(self, volume, source_volume=None):
+        pool = vol_utils.extract_host(volume['host'], 'pool')
+
+        # For new created volume that is not from snapshot or cloned,
+        # just use the pool selected by scheduler
+        if not source_volume:
+            return pool
+
+        # For volume created from snapshot or cloned from volume, the pool to
+        # use depends on the source volume version. If the source volume is
+        # created by older version of driver which doesn't support pool
+        # scheduler, use the pool where the source volume locates. Otherwise,
+        # use the pool selected by scheduler
+        provider_location = source_volume.get('provider_location')
+
+        if (provider_location and
+                self._extract_provider_location_for_lun(provider_location,
+                                                        'version')):
+            return pool
+        else:
+            LOG.warning(_LW("The source volume is a legacy volume. "
+                            "Create volume in the pool where the source "
+                            "volume %s is created."),
+                        source_volume['name'])
+            data = self._client.get_lun_by_name(source_volume['name'],
+                                                [self._client.LUN_POOL],
+                                                poll=False)
             if data is None:
-                msg = _("Failed to find storage pool for source volume %s") \
-                    % source_volume_name
+                msg = (_("Failed to find storage pool for source volume %s.")
+                       % source_volume['name'])
                 LOG.error(msg)
                 raise exception.VolumeBackendAPIException(data=msg)
             return data[self._client.LUN_POOL.key]
-        else:
-            if len(self.pool_cache) > 0:
-                pools = sorted(self.pool_cache,
-                               key=lambda po: po['free_space'],
-                               reverse=True)
-                return pools[0]['name']
-
-        msg = (_("Failed to find storage pool to create volume %s.")
-               % volume['name'])
-        LOG.error(msg)
-        raise exception.VolumeBackendAPIException(data=msg)
 
     def update_volume_stats(self):
-        """Retrieve stats info."""
-        self.stats = super(EMCVnxCliArray, self).update_volume_stats()
-        self._update_pool_cache()
-        self.stats['total_capacity_gb'] = 'unknown'
-        self.stats['free_capacity_gb'] = 'unknown'
-        array_serial = self._client.get_array_serial()
-        self.stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
-                                       {'pool_name': '',
-                                        'array_serial':
-                                        array_serial['array_serial']})
-        self.stats['fast_cache_enabled'] = 'unknown'
+        """Retrieves stats info."""
+        super(EMCVnxCliArray, self).update_volume_stats()
+        if '-FASTCache' in self.enablers:
+            properties = [self._client.POOL_FREE_CAPACITY,
+                          self._client.POOL_TOTAL_CAPACITY,
+                          self._client.POOL_FAST_CACHE]
+        else:
+            properties = [self._client.POOL_FREE_CAPACITY,
+                          self._client.POOL_TOTAL_CAPACITY]
+        pool_list = self._client.get_pool_list(properties, False)
+
+        self.stats['pools'] = map(lambda pool: self._build_pool_stats(pool),
+                                  pool_list)
         return self.stats
 
 
@@ -2963,7 +2982,7 @@ class AttachSnapTask(task.Task):
 class CreateDestLunTask(task.Task):
     """Creates a destination lun for migration.
 
-    Reversion strategy: Detach the temp lun.
+    Reversion strategy: Delete the temp destination lun.
     """
     def __init__(self):
         super(CreateDestLunTask, self).__init__(provides='lun_data')