]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
EMC VMAX Driver Juno Update
authorXing Yang <xing.yang@emc.com>
Thu, 3 Jul 2014 21:50:45 +0000 (17:50 -0400)
committerXing Yang <xing.yang@emc.com>
Sat, 23 Aug 2014 04:04:28 +0000 (00:04 -0400)
This driver is an enhancement from the EMC SMI-S driver.
In Juno, VNX support will be removed from this driver.
Moving forward, this driver will support VMAX only.

The following features are added for VMAX:

* Extend volume
* Create volume from snapshot
* Dynamically creating masking views, storage groups,
  and initiator groups
* Striped volumes
* FAST policies

Tempest test results from CI system:
  https://bugs.launchpad.net/cinder/+bug/1337840

Change-Id: I01aaf1041d32351a8dc12c509f387e2d120074a3
Implements: blueprint emc-vmax-driver-juno-update

cinder/tests/test_emc_smis.py [deleted file]
cinder/tests/test_emc_vmax.py [new file with mode: 0644]
cinder/volume/drivers/emc/emc_smis_common.py [deleted file]
cinder/volume/drivers/emc/emc_vmax_common.py [new file with mode: 0644]
cinder/volume/drivers/emc/emc_vmax_fast.py [new file with mode: 0644]
cinder/volume/drivers/emc/emc_vmax_fc.py [moved from cinder/volume/drivers/emc/emc_smis_fc.py with 70% similarity]
cinder/volume/drivers/emc/emc_vmax_iscsi.py [moved from cinder/volume/drivers/emc/emc_smis_iscsi.py with 68% similarity]
cinder/volume/drivers/emc/emc_vmax_masking.py [new file with mode: 0644]
cinder/volume/drivers/emc/emc_vmax_provision.py [new file with mode: 0644]
cinder/volume/drivers/emc/emc_vmax_utils.py [new file with mode: 0644]
etc/cinder/cinder.conf.sample

diff --git a/cinder/tests/test_emc_smis.py b/cinder/tests/test_emc_smis.py
deleted file mode 100644 (file)
index b3ab58a..0000000
+++ /dev/null
@@ -1,1348 +0,0 @@
-# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import os
-import shutil
-import tempfile
-import time
-from xml.dom.minidom import Document
-
-import mock
-
-from cinder import exception
-from cinder.openstack.common import log as logging
-from cinder import test
-from cinder.volume.drivers.emc.emc_smis_common import EMCSMISCommon
-from cinder.volume.drivers.emc.emc_smis_fc import EMCSMISFCDriver
-from cinder.volume.drivers.emc.emc_smis_iscsi import EMCSMISISCSIDriver
-from cinder.volume import volume_types
-
-CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml'
-LOG = logging.getLogger(__name__)
-
-
-class EMC_StorageVolume(dict):
-    pass
-
-
-class SE_ConcreteJob(dict):
-    pass
-
-
-class SE_StorageHardwareID(dict):
-    pass
-
-
-class FakeCIMInstanceName(dict):
-
-    def fake_getinstancename(self, classname, bindings):
-        instancename = FakeCIMInstanceName()
-        for key in bindings:
-            instancename[key] = bindings[key]
-        instancename.classname = classname
-        instancename.namespace = 'root/emc'
-        return instancename
-
-
-class FakeDB():
-    def volume_update(self, context, volume_id, model_update):
-        pass
-
-    def snapshot_update(self, context, snapshot_id, model_update):
-        pass
-
-    def volume_get(self, context, volume_id):
-        conn = FakeEcomConnection()
-        objectpath = {}
-        objectpath['CreationClassName'] = 'Clar_StorageVolume'
-        if volume_id == 'vol1':
-            device_id = '1'
-            objectpath['DeviceID'] = device_id
-        else:
-            objectpath['DeviceID'] = volume_id
-        return conn.GetInstance(objectpath)
-
-
-class EMCSMISCommonData():
-    connector = {'ip': '10.0.0.2',
-                 'initiator': 'iqn.1993-08.org.debian:01:222',
-                 'wwpns': ["123456789012345", "123456789054321"],
-                 'wwnns': ["223456789012345", "223456789054321"],
-                 'host': 'fakehost'}
-
-    config_file_name = 'cinder_emc_config.xml'
-    storage_system = 'CLARiiON+APM00123456789'
-    storage_system_vmax = 'SYMMETRIX+000195900551'
-    lunmaskctrl_id =\
-        'CLARiiON+APM00123456789+00aa11bb22cc33dd44ff55gg66hh77ii88jj'
-    initiator1 = 'iqn.1993-08.org.debian:01:1a2b3c4d5f6g'
-    stconf_service_creationclass = 'Clar_StorageConfigurationService'
-    ctrlconf_service_creationclass = 'Clar_ControllerConfigurationService'
-    rep_service_creationclass = 'Clar_ReplicationService'
-    vol_creationclass = 'Clar_StorageVolume'
-    pool_creationclass = 'Clar_UnifiedStoragePool'
-    lunmask_creationclass = 'Clar_LunMaskingSCSIProtocolController'
-    unit_creationclass = 'CIM_ProtocolControllerForUnit'
-    storage_type = 'gold'
-
-    test_volume = {'name': 'vol1',
-                   'size': 1,
-                   'volume_name': 'vol1',
-                   'id': '1',
-                   'provider_auth': None,
-                   'project_id': 'project',
-                   'display_name': 'vol1',
-                   'display_description': 'test volume',
-                   'volume_type_id': None}
-    test_failed_volume = {'name': 'failed_vol',
-                          'size': 1,
-                          'volume_name': 'failed_vol',
-                          'id': '4',
-                          'provider_auth': None,
-                          'project_id': 'project',
-                          'display_name': 'failed_vol',
-                          'display_description': 'test failed volume',
-                          'volume_type_id': None}
-    test_snapshot = {'name': 'snapshot1',
-                     'size': 1,
-                     'id': '4444',
-                     'volume_name': 'vol-vol1',
-                     'volume_size': 1,
-                     'project_id': 'project'}
-    test_clone = {'name': 'clone1',
-                  'size': 1,
-                  'volume_name': 'vol1',
-                  'id': '2',
-                  'provider_auth': None,
-                  'project_id': 'project',
-                  'display_name': 'clone1',
-                  'display_description': 'volume created from snapshot',
-                  'volume_type_id': None}
-    test_clone3 = {'name': 'clone3',
-                   'size': 1,
-                   'volume_name': 'vol1',
-                   'id': '3',
-                   'provider_auth': None,
-                   'project_id': 'project',
-                   'display_name': 'clone3',
-                   'display_description': 'cloned volume',
-                   'volume_type_id': None}
-    test_snapshot_vmax = {'name': 'snapshot_vmax',
-                          'size': 1,
-                          'id': '4445',
-                          'volume_name': 'vol-vol1',
-                          'volume_size': 1,
-                          'project_id': 'project'}
-    failed_snapshot_replica = {'name': 'failed_snapshot_replica',
-                               'size': 1,
-                               'volume_name': 'vol-vol1',
-                               'id': '5',
-                               'provider_auth': None,
-                               'project_id': 'project',
-                               'display_name': 'vol1',
-                               'display_description':
-                               'failed snapshot replica',
-                               'volume_type_id': None}
-    failed_snapshot_sync = {'name': 'failed_snapshot_sync',
-                            'size': 1,
-                            'volume_name': 'vol-vol1',
-                            'id': '6',
-                            'provider_auth': None,
-                            'project_id': 'project',
-                            'display_name': 'failed_snapshot_sync',
-                            'display_description': 'failed snapshot sync',
-                            'volume_type_id': None}
-    failed_clone_replica = {'name': 'failed_clone_replica',
-                            'size': 1,
-                            'volume_name': 'vol1',
-                            'id': '7',
-                            'provider_auth': None,
-                            'project_id': 'project',
-                            'display_name': 'vol1',
-                            'display_description': 'failed clone replica',
-                            'volume_type_id': None}
-    failed_clone_sync = {'name': 'failed_clone_sync',
-                         'size': 1,
-                         'volume_name': 'vol1',
-                         'id': '8',
-                         'provider_auth': None,
-                         'project_id': 'project',
-                         'display_name': 'vol1',
-                         'display_description': 'failed clone sync',
-                         'volume_type_id': None}
-    failed_delete_vol = {'name': 'failed_delete_vol',
-                         'size': 1,
-                         'volume_name': 'failed_delete_vol',
-                         'id': '99999',
-                         'provider_auth': None,
-                         'project_id': 'project',
-                         'display_name': 'failed delete vol',
-                         'display_description': 'failed delete volume',
-                         'volume_type_id': None}
-    failed_extend_vol = {'name': 'failed_extend_vol',
-                         'size': 1,
-                         'volume_name': 'failed_extend_vol',
-                         'id': '9',
-                         'provider_auth': None,
-                         'project_id': 'project',
-                         'display_name': 'failed_extend_vol',
-                         'display_description': 'test failed extend volume',
-                         'volume_type_id': None}
-
-
-class FakeEcomConnection():
-
-    def __init__(self, *args, **kwargs):
-        self.data = EMCSMISCommonData()
-
-    def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None,
-                     ElementType=None, Size=None,
-                     SyncType=None, SourceElement=None,
-                     Operation=None, Synchronization=None,
-                     TheElements=None, TheElement=None,
-                     LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None,
-                     ProtocolControllers=None,
-                     MaskingGroup=None, Members=None,
-                     HardwareId=None):
-
-        rc = 0L
-        myjob = SE_ConcreteJob()
-        myjob.classname = 'SE_ConcreteJob'
-        myjob['InstanceID'] = '9999'
-        myjob['status'] = 'success'
-        myjob['type'] = ElementName
-        if ElementName == 'failed_vol' and \
-                MethodName == 'CreateOrModifyElementFromStoragePool':
-            rc = 10L
-            myjob['status'] = 'failure'
-        elif TheElement and TheElement['ElementName'] == 'failed_extend_vol' \
-                and MethodName == 'CreateOrModifyElementFromStoragePool':
-            rc = 10L
-            myjob['status'] = 'failure'
-        elif MethodName == 'CreateOrModifyElementFromStoragePool':
-            rc = 0L
-            myjob['status'] = 'success'
-        elif ElementName == 'failed_snapshot_replica' and \
-                MethodName == 'CreateElementReplica':
-            rc = 10L
-            myjob['status'] = 'failure'
-        elif Synchronization and \
-                Synchronization['SyncedElement']['ElementName'] \
-                == 'failed_snapshot_sync' and \
-                MethodName == 'ModifyReplicaSynchronization':
-            rc = 10L
-            myjob['status'] = 'failure'
-        elif ElementName == 'failed_clone_replica' and \
-                MethodName == 'CreateElementReplica':
-            rc = 10L
-            myjob['status'] = 'failure'
-        elif Synchronization and \
-                Synchronization['SyncedElement']['ElementName'] \
-                == 'failed_clone_sync' and \
-                MethodName == 'ModifyReplicaSynchronization':
-            rc = 10L
-            myjob['status'] = 'failure'
-        elif TheElements and \
-                TheElements[0]['DeviceID'] == '99999' and \
-                MethodName == 'EMCReturnToStoragePool':
-            rc = 10L
-            myjob['status'] = 'failure'
-        elif HardwareId:
-            rc = 0L
-            targetendpoints = {}
-            endpoints = []
-            endpoint = {}
-            endpoint['Name'] = '1234567890123'
-            endpoints.append(endpoint)
-            endpoint2 = {}
-            endpoint2['Name'] = '0987654321321'
-            endpoints.append(endpoint2)
-            targetendpoints['TargetEndpoints'] = endpoints
-            return rc, targetendpoints
-
-        job = {'Job': myjob}
-        return rc, job
-
-    def EnumerateInstanceNames(self, name):
-        result = None
-        if name == 'EMC_ReplicationService':
-            result = self._enum_replicationservices()
-        elif name == 'EMC_StorageConfigurationService':
-            result = self._enum_stconfsvcs()
-        elif name == 'EMC_ControllerConfigurationService':
-            result = self._enum_ctrlconfsvcs()
-        elif name == 'EMC_VirtualProvisioningPool':
-            result = self._enum_pools()
-        elif name == 'EMC_UnifiedStoragePool':
-            result = self._enum_pools()
-        elif name == 'EMC_StorageVolume':
-            result = self._enum_storagevolumes()
-        elif name == 'Clar_StorageVolume':
-            result = self._enum_storagevolumes()
-        elif name == 'SE_StorageSynchronized_SV_SV':
-            result = self._enum_syncsvsvs()
-        elif name == 'CIM_ProtocolControllerForUnit':
-            result = self._enum_unitnames()
-        elif name == 'EMC_LunMaskingSCSIProtocolController':
-            result = self._enum_lunmaskctrls()
-        elif name == 'EMC_StorageProcessorSystem':
-            result = self._enum_processors()
-        elif name == 'EMC_StorageHardwareIDManagementService':
-            result = self._enum_hdwidmgmts()
-        else:
-            result = self._default_enum()
-        return result
-
-    def EnumerateInstances(self, name):
-        result = None
-        if name == 'EMC_VirtualProvisioningPool':
-            result = self._enum_pool_details()
-        elif name == 'EMC_UnifiedStoragePool':
-            result = self._enum_pool_details()
-        elif name == 'SE_StorageHardwareID':
-            result = self._enum_storhdwids()
-        else:
-            result = self._default_enum()
-        return result
-
-    def GetInstance(self, objectpath, LocalOnly=False):
-        try:
-            name = objectpath['CreationClassName']
-        except KeyError:
-            name = objectpath.classname
-        result = None
-        if name == 'Clar_StorageVolume' or name == 'Symm_StorageVolume':
-            result = self._getinstance_storagevolume(objectpath)
-        elif name == 'CIM_ProtocolControllerForUnit':
-            result = self._getinstance_unit(objectpath)
-        elif name == 'Clar_LunMaskingSCSIProtocolController':
-            result = self._getinstance_lunmask()
-        elif name == 'SE_ConcreteJob':
-            result = self._getinstance_job(objectpath)
-        elif name == 'SE_StorageSynchronized_SV_SV':
-            result = self._getinstance_syncsvsv(objectpath)
-        else:
-            result = self._default_getinstance(objectpath)
-        return result
-
-    def Associators(self, objectpath, resultClass='EMC_StorageHardwareID'):
-        result = None
-        if resultClass == 'EMC_StorageHardwareID':
-            result = self._assoc_hdwid()
-        elif resultClass == 'EMC_iSCSIProtocolEndpoint':
-            result = self._assoc_endpoint()
-        # Added test for EMC_StorageVolume
-        elif resultClass == 'EMC_StorageVolume':
-            result = self._assoc_storagevolume(objectpath)
-        else:
-            result = self._default_assoc(objectpath)
-        return result
-
-    def AssociatorNames(self, objectpath,
-                        resultClass='EMC_LunMaskingSCSIProtocolController'):
-        result = None
-        if resultClass == 'EMC_LunMaskingSCSIProtocolController':
-            result = self._assocnames_lunmaskctrl()
-        else:
-            result = self._default_assocnames(objectpath)
-        return result
-
-    def ReferenceNames(self, objectpath,
-                       ResultClass='CIM_ProtocolControllerForUnit'):
-        result = None
-        if ResultClass == 'CIM_ProtocolControllerForUnit':
-            result = self._ref_unitnames()
-        else:
-            result = self._default_ref(objectpath)
-        return result
-
-    def _ref_unitnames(self):
-        unitnames = []
-        unitname = {}
-
-        dependent = {}
-        dependent['CreationClassName'] = self.data.vol_creationclass
-        dependent['DeviceID'] = self.data.test_volume['id']
-        dependent['ElementName'] = self.data.test_volume['name']
-        dependent['SystemName'] = self.data.storage_system
-
-        antecedent = {}
-        antecedent['CreationClassName'] = self.data.lunmask_creationclass
-        antecedent['DeviceID'] = self.data.lunmaskctrl_id
-        antecedent['SystemName'] = self.data.storage_system
-
-        unitname['Dependent'] = dependent
-        unitname['Antecedent'] = antecedent
-        unitname['CreationClassName'] = self.data.unit_creationclass
-        unitnames.append(unitname)
-
-        return unitnames
-
-    def _default_ref(self, objectpath):
-        return objectpath
-
-    def _assoc_hdwid(self):
-        assocs = []
-        assoc = {}
-        assoc['StorageID'] = self.data.connector['initiator']
-        assocs.append(assoc)
-        for wwpn in self.data.connector['wwpns']:
-            assoc2 = {}
-            assoc2['StorageID'] = wwpn
-            assocs.append(assoc2)
-        return assocs
-
-    def _assoc_endpoint(self):
-        assocs = []
-        assoc = {}
-        assoc['Name'] = 'iqn.1992-04.com.emc:cx.apm00123907237.a8,t,0x0001'
-        assoc['SystemName'] = self.data.storage_system + '+SP_A+8'
-        assocs.append(assoc)
-        return assocs
-
-    # Added test for EMC_StorageVolume associators
-    def _assoc_storagevolume(self, objectpath):
-        assocs = []
-        if objectpath['type'] == 'failed_delete_vol':
-            vol = self.data.failed_delete_vol
-        elif objectpath['type'] == 'vol1':
-            vol = self.data.test_volume
-        elif objectpath['type'] == 'failed_vol':
-            vol = self.data.test_failed_volume
-        elif objectpath['type'] == 'failed_clone_sync':
-            vol = self.data.failed_clone_sync
-        elif objectpath['type'] == 'failed_clone_replica':
-            vol = self.data.failed_clone_replica
-        elif objectpath['type'] == 'failed_snapshot_replica':
-            vol = self.data.failed_snapshot_replica
-        elif objectpath['type'] == 'failed_snapshot_sync':
-            vol = self.data.failed_snapshot_sync
-        elif objectpath['type'] == 'clone1':
-            vol = self.data.test_clone
-        elif objectpath['type'] == 'clone3':
-            vol = self.data.test_clone3
-        elif objectpath['type'] == 'snapshot1':
-            vol = self.data.test_snapshot
-        elif objectpath['type'] == 'snapshot_vmax':
-            vol = self.data.test_snapshot_vmax
-        elif objectpath['type'] == 'failed_extend_vol':
-            vol = self.data.failed_extend_vol
-        else:
-            return None
-
-        vol['DeviceID'] = vol['id']
-        assoc = self._getinstance_storagevolume(vol)
-        assocs.append(assoc)
-        return assocs
-
-    def _default_assoc(self, objectpath):
-        return objectpath
-
-    def _assocnames_lunmaskctrl(self):
-        return self._enum_lunmaskctrls()
-
-    def _default_assocnames(self, objectpath):
-        return objectpath
-
-    def _getinstance_storagevolume(self, objectpath):
-        foundinstance = None
-        instance = EMC_StorageVolume()
-        vols = self._enum_storagevolumes()
-        for vol in vols:
-            if vol['DeviceID'] == objectpath['DeviceID']:
-                instance = vol
-                break
-        if not instance:
-            foundinstance = None
-        else:
-            foundinstance = instance
-        return foundinstance
-
-    def _getinstance_syncsvsv(self, objectpath):
-        foundsync = None
-        syncs = self._enum_syncsvsvs()
-        for sync in syncs:
-            if (sync['SyncedElement'] == objectpath['SyncedElement'] and
-                    sync['SystemElement'] == objectpath['SystemElement']):
-                foundsync = sync
-                break
-        return foundsync
-
-    def _getinstance_lunmask(self):
-        lunmask = {}
-        lunmask['CreationClassName'] = self.data.lunmask_creationclass
-        lunmask['DeviceID'] = self.data.lunmaskctrl_id
-        lunmask['SystemName'] = self.data.storage_system
-        return lunmask
-
-    def _getinstance_unit(self, objectpath):
-        unit = {}
-
-        dependent = {}
-        dependent['CreationClassName'] = self.data.vol_creationclass
-        dependent['DeviceID'] = self.data.test_volume['id']
-        dependent['ElementName'] = self.data.test_volume['name']
-        dependent['SystemName'] = self.data.storage_system
-
-        antecedent = {}
-        antecedent['CreationClassName'] = self.data.lunmask_creationclass
-        antecedent['DeviceID'] = self.data.lunmaskctrl_id
-        antecedent['SystemName'] = self.data.storage_system
-
-        unit['Dependent'] = dependent
-        unit['Antecedent'] = antecedent
-        unit['CreationClassName'] = self.data.unit_creationclass
-        unit['DeviceNumber'] = '0'
-
-        return unit
-
-    def _getinstance_job(self, jobpath):
-        jobinstance = {}
-        jobinstance['InstanceID'] = '9999'
-        if jobpath['status'] == 'failure':
-            jobinstance['JobState'] = 10
-            jobinstance['ErrorCode'] = 99
-            jobinstance['ErrorDescription'] = 'Failure'
-        else:
-            jobinstance['JobState'] = 7
-            jobinstance['ErrorCode'] = 0
-            jobinstance['ErrorDescription'] = ''
-        return jobinstance
-
-    def _default_getinstance(self, objectpath):
-        return objectpath
-
-    def _enum_replicationservices(self):
-        rep_services = []
-        rep_service = {}
-        rep_service['SystemName'] = self.data.storage_system
-        rep_service['CreationClassName'] = self.data.rep_service_creationclass
-        rep_services.append(rep_service)
-        return rep_services
-
-    def _enum_stconfsvcs(self):
-        conf_services = []
-        conf_service = {}
-        conf_service['SystemName'] = self.data.storage_system
-        conf_service['CreationClassName'] =\
-            self.data.stconf_service_creationclass
-        conf_services.append(conf_service)
-        return conf_services
-
-    def _enum_ctrlconfsvcs(self):
-        conf_services = []
-        conf_service = {}
-        conf_service['SystemName'] = self.data.storage_system
-        conf_service['CreationClassName'] =\
-            self.data.ctrlconf_service_creationclass
-        conf_services.append(conf_service)
-        return conf_services
-
-    def _enum_pools(self):
-        pools = []
-        pool = {}
-        pool['InstanceID'] = self.data.storage_system + '+U+' +\
-            self.data.storage_type
-        pool['CreationClassName'] = 'Clar_UnifiedStoragePool'
-        pools.append(pool)
-        return pools
-
-    def _enum_pool_details(self):
-        pools = []
-        pool = {}
-        pool['InstanceID'] = self.data.storage_system + '+U+' +\
-            self.data.storage_type
-        pool['CreationClassName'] = 'Clar_UnifiedStoragePool'
-        pool['TotalManagedSpace'] = 12345678
-        pool['RemainingManagedSpace'] = 123456
-        pools.append(pool)
-        return pools
-
-    def _enum_storagevolumes(self):
-        vols = []
-
-        vol = EMC_StorageVolume()
-        vol['name'] = self.data.test_volume['name']
-        vol['CreationClassName'] = 'Clar_StorageVolume'
-        vol['ElementName'] = self.data.test_volume['name']
-        vol['DeviceID'] = self.data.test_volume['id']
-        vol['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        vol['SystemCreationClassName'] = 'Clar_StorageSystem'
-        vol.path = vol
-        vol.path.classname = vol['CreationClassName']
-
-        name = {}
-        name['classname'] = 'Clar_StorageVolume'
-        keys = {}
-        keys['CreationClassName'] = 'Clar_StorageVolume'
-        keys['SystemName'] = self.data.storage_system
-        keys['DeviceID'] = vol['DeviceID']
-        keys['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name['keybindings'] = keys
-        vol['provider_location'] = str(name)
-
-        vols.append(vol)
-
-        snap_vol = EMC_StorageVolume()
-        snap_vol['name'] = self.data.test_snapshot['name']
-        snap_vol['CreationClassName'] = 'Clar_StorageVolume'
-        snap_vol['ElementName'] = self.data.test_snapshot['name']
-        snap_vol['DeviceID'] = self.data.test_snapshot['id']
-        snap_vol['SystemName'] = self.data.storage_system
-        # Added vol to path
-        snap_vol['SystemCreationClassName'] = 'Clar_StorageSystem'
-        snap_vol.path = snap_vol
-        snap_vol.path.classname = snap_vol['CreationClassName']
-
-        name2 = {}
-        name2['classname'] = 'Clar_StorageVolume'
-        keys2 = {}
-        keys2['CreationClassName'] = 'Clar_StorageVolume'
-        keys2['SystemName'] = self.data.storage_system
-        keys2['DeviceID'] = snap_vol['DeviceID']
-        keys2['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name2['keybindings'] = keys2
-        snap_vol['provider_location'] = str(name2)
-
-        vols.append(snap_vol)
-
-        clone_vol = EMC_StorageVolume()
-        clone_vol['name'] = self.data.test_clone['name']
-        clone_vol['CreationClassName'] = 'Clar_StorageVolume'
-        clone_vol['ElementName'] = self.data.test_clone['name']
-        clone_vol['DeviceID'] = self.data.test_clone['id']
-        clone_vol['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        clone_vol['SystemCreationClassName'] = 'Clar_StorageSystem'
-        clone_vol.path = clone_vol
-        clone_vol.path.classname = clone_vol['CreationClassName']
-        vols.append(clone_vol)
-
-        clone_vol3 = EMC_StorageVolume()
-        clone_vol3['name'] = self.data.test_clone3['name']
-        clone_vol3['CreationClassName'] = 'Clar_StorageVolume'
-        clone_vol3['ElementName'] = self.data.test_clone3['name']
-        clone_vol3['DeviceID'] = self.data.test_clone3['id']
-        clone_vol3['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        clone_vol3['SystemCreationClassName'] = 'Clar_StorageSystem'
-        clone_vol3.path = clone_vol3
-        clone_vol3.path.classname = clone_vol3['CreationClassName']
-        vols.append(clone_vol3)
-
-        snap_vol_vmax = EMC_StorageVolume()
-        snap_vol_vmax['name'] = self.data.test_snapshot_vmax['name']
-        snap_vol_vmax['CreationClassName'] = 'Symm_StorageVolume'
-        snap_vol_vmax['ElementName'] = self.data.test_snapshot_vmax['name']
-        snap_vol_vmax['DeviceID'] = self.data.test_snapshot_vmax['id']
-        snap_vol_vmax['SystemName'] = self.data.storage_system_vmax
-        # Added vol to vol.path
-        snap_vol_vmax['SystemCreationClassName'] = 'Symm_StorageSystem'
-        snap_vol_vmax.path = snap_vol_vmax
-        snap_vol_vmax.path.classname = snap_vol_vmax['CreationClassName']
-
-        name3 = {}
-        name3['classname'] = 'Clar_StorageVolume'
-        keys3 = {}
-        keys3['CreationClassName'] = 'Clar_StorageVolume'
-        keys3['SystemName'] = self.data.storage_system
-        keys3['DeviceID'] = snap_vol_vmax['DeviceID']
-        keys3['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name3['keybindings'] = keys3
-        snap_vol_vmax['provider_location'] = str(name3)
-
-        vols.append(snap_vol_vmax)
-
-        failed_snap_replica = EMC_StorageVolume()
-        failed_snap_replica['name'] = self.data.failed_snapshot_replica['name']
-        failed_snap_replica['CreationClassName'] = 'Clar_StorageVolume'
-        failed_snap_replica['ElementName'] =\
-            self.data.failed_snapshot_replica['name']
-        failed_snap_replica['DeviceID'] =\
-            self.data.failed_snapshot_replica['id']
-        failed_snap_replica['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        failed_snap_replica['SystemCreationClassName'] = 'Clar_StorageSystem'
-        failed_snap_replica.path = failed_snap_replica
-        failed_snap_replica.path.classname =\
-            failed_snap_replica['CreationClassName']
-
-        name4 = {}
-        name4['classname'] = 'Clar_StorageVolume'
-        keys4 = {}
-        keys4['CreationClassName'] = 'Clar_StorageVolume'
-        keys4['SystemName'] = self.data.storage_system
-        keys4['DeviceID'] = failed_snap_replica['DeviceID']
-        keys4['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name4['keybindings'] = keys4
-        failed_snap_replica['provider_location'] = str(name4)
-
-        vols.append(failed_snap_replica)
-
-        failed_snap_sync = EMC_StorageVolume()
-        failed_snap_sync['name'] = self.data.failed_snapshot_sync['name']
-        failed_snap_sync['CreationClassName'] = 'Clar_StorageVolume'
-        failed_snap_sync['ElementName'] =\
-            self.data.failed_snapshot_sync['name']
-        failed_snap_sync['DeviceID'] = self.data.failed_snapshot_sync['id']
-        failed_snap_sync['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        failed_snap_sync['SystemCreationClassName'] = 'Clar_StorageSystem'
-        failed_snap_sync.path = failed_snap_sync
-        failed_snap_sync.path.classname =\
-            failed_snap_sync['CreationClassName']
-
-        name5 = {}
-        name5['classname'] = 'Clar_StorageVolume'
-        keys5 = {}
-        keys5['CreationClassName'] = 'Clar_StorageVolume'
-        keys5['SystemName'] = self.data.storage_system
-        keys5['DeviceID'] = failed_snap_sync['DeviceID']
-        keys5['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name5['keybindings'] = keys5
-        failed_snap_sync['provider_location'] = str(name5)
-
-        vols.append(failed_snap_sync)
-
-        failed_clone_rep = EMC_StorageVolume()
-        failed_clone_rep['name'] = self.data.failed_clone_replica['name']
-        failed_clone_rep['CreationClassName'] = 'Clar_StorageVolume'
-        failed_clone_rep['ElementName'] =\
-            self.data.failed_clone_replica['name']
-        failed_clone_rep['DeviceID'] = self.data.failed_clone_replica['id']
-        failed_clone_rep['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        failed_clone_rep['SystemCreationClassName'] = 'Clar_StorageSystem'
-        failed_clone_rep.path = failed_clone_rep
-        failed_clone_rep.path.classname =\
-            failed_clone_rep['CreationClassName']
-        vols.append(failed_clone_rep)
-
-        failed_clone_s = EMC_StorageVolume()
-        failed_clone_s['name'] = self.data.failed_clone_sync['name']
-        failed_clone_s['CreationClassName'] = 'Clar_StorageVolume'
-        failed_clone_s['ElementName'] = self.data.failed_clone_sync['name']
-        failed_clone_s['DeviceID'] = self.data.failed_clone_sync['id']
-        failed_clone_s['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        failed_clone_s['SystemCreationClassName'] = 'Clar_StorageSystem'
-        failed_clone_s.path = failed_clone_s
-        failed_clone_s.path.classname =\
-            failed_clone_s['CreationClassName']
-        vols.append(failed_clone_s)
-
-        failed_delete_vol = EMC_StorageVolume()
-        failed_delete_vol['name'] = 'failed_delete_vol'
-        failed_delete_vol['CreationClassName'] = 'Clar_StorageVolume'
-        failed_delete_vol['ElementName'] = 'failed_delete_vol'
-        failed_delete_vol['DeviceID'] = '99999'
-        failed_delete_vol['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        failed_delete_vol['SystemCreationClassName'] = 'Clar_StorageSystem'
-        failed_delete_vol.path = failed_delete_vol
-        failed_delete_vol.path.classname =\
-            failed_delete_vol['CreationClassName']
-        vols.append(failed_delete_vol)
-
-        failed_vol = EMC_StorageVolume()
-        failed_vol['name'] = 'failed__vol'
-        failed_vol['CreationClassName'] = 'Clar_StorageVolume'
-        failed_vol['ElementName'] = 'failed_vol'
-        failed_vol['DeviceID'] = '4'
-        failed_vol['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        failed_vol['SystemCreationClassName'] = 'Clar_StorageSystem'
-        failed_vol.path = failed_vol
-        failed_vol.path.classname =\
-            failed_vol['CreationClassName']
-
-        name_failed = {}
-        name_failed['classname'] = 'Clar_StorageVolume'
-        keys_failed = {}
-        keys_failed['CreationClassName'] = 'Clar_StorageVolume'
-        keys_failed['SystemName'] = self.data.storage_system
-        keys_failed['DeviceID'] = failed_vol['DeviceID']
-        keys_failed['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name_failed['keybindings'] = keys_failed
-        failed_vol['provider_location'] = str(name_failed)
-
-        vols.append(failed_vol)
-
-        failed_extend_vol = EMC_StorageVolume()
-        failed_extend_vol['name'] = 'failed_extend_vol'
-        failed_extend_vol['CreationClassName'] = 'Clar_StorageVolume'
-        failed_extend_vol['ElementName'] = 'failed_extend_vol'
-        failed_extend_vol['DeviceID'] = '9'
-        failed_extend_vol['SystemName'] = self.data.storage_system
-        # Added vol to vol.path
-        failed_extend_vol['SystemCreationClassName'] = 'Clar_StorageSystem'
-        failed_extend_vol.path = failed_extend_vol
-        failed_extend_vol.path.classname =\
-            failed_extend_vol['CreationClassName']
-
-        name_extend_failed = {}
-        name_extend_failed['classname'] = 'Clar_StorageVolume'
-        keys_extend_failed = {}
-        keys_extend_failed['CreationClassName'] = 'Clar_StorageVolume'
-        keys_extend_failed['SystemName'] = self.data.storage_system
-        keys_extend_failed['DeviceID'] = failed_extend_vol['DeviceID']
-        keys_extend_failed['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name_extend_failed['keybindings'] = keys_extend_failed
-        failed_extend_vol['provider_location'] = str(name_extend_failed)
-
-        vols.append(failed_extend_vol)
-
-        return vols
-
-    def _enum_syncsvsvs(self):
-        syncs = []
-
-        vols = self._enum_storagevolumes()
-
-        sync = self._create_sync(vols[0], vols[1], 100)
-        syncs.append(sync)
-
-        sync2 = self._create_sync(vols[1], vols[2], 100)
-        syncs.append(sync2)
-
-        sync3 = self._create_sync(vols[0], vols[3], 100)
-        syncs.append(sync3)
-
-        objpath1 = vols[1]
-        for vol in vols:
-            if vol['ElementName'] == 'failed_snapshot_sync':
-                objpath2 = vol
-                break
-        sync4 = self._create_sync(objpath1, objpath2, 100)
-        syncs.append(sync4)
-
-        objpath1 = vols[0]
-        for vol in vols:
-            if vol['ElementName'] == 'failed_clone_sync':
-                objpath2 = vol
-                break
-        sync5 = self._create_sync(objpath1, objpath2, 100)
-        syncs.append(sync5)
-
-        return syncs
-
-    def _create_sync(self, objpath1, objpath2, percentsynced):
-        sync = {}
-        sync['SyncedElement'] = objpath2
-        sync['SystemElement'] = objpath1
-        sync['CreationClassName'] = 'SE_StorageSynchronized_SV_SV'
-        sync['PercentSynced'] = percentsynced
-        return sync
-
-    def _enum_unitnames(self):
-        return self._ref_unitnames()
-
-    def _enum_lunmaskctrls(self):
-        ctrls = []
-        ctrl = {}
-        ctrl['CreationClassName'] = self.data.lunmask_creationclass
-        ctrl['DeviceID'] = self.data.lunmaskctrl_id
-        ctrl['SystemName'] = self.data.storage_system
-        ctrls.append(ctrl)
-        return ctrls
-
-    def _enum_processors(self):
-        ctrls = []
-        ctrl = {}
-        ctrl['CreationClassName'] = 'Clar_StorageProcessorSystem'
-        ctrl['Name'] = self.data.storage_system + '+SP_A'
-        ctrls.append(ctrl)
-        return ctrls
-
-    def _enum_hdwidmgmts(self):
-        services = []
-        srv = {}
-        srv['SystemName'] = self.data.storage_system
-        services.append(srv)
-        return services
-
-    def _enum_storhdwids(self):
-        storhdwids = []
-        hdwid = SE_StorageHardwareID()
-        hdwid['StorageID'] = self.data.connector['wwpns'][0]
-
-        hdwid.path = hdwid
-        storhdwids.append(hdwid)
-        return storhdwids
-
-    def _default_enum(self):
-        names = []
-        name = {}
-        name['Name'] = 'default'
-        names.append(name)
-        return names
-
-
-class EMCSMISISCSIDriverTestCase(test.TestCase):
-
-    def setUp(self):
-
-        self.data = EMCSMISCommonData()
-
-        self.tempdir = tempfile.mkdtemp()
-        self.addCleanup(shutil.rmtree, self.tempdir)
-        super(EMCSMISISCSIDriverTestCase, self).setUp()
-        self.config_file_path = None
-        self.create_fake_config_file()
-        self.addCleanup(os.remove, self.config_file_path)
-
-        configuration = mock.Mock()
-        configuration.cinder_emc_config_file = self.config_file_path
-
-        self.stubs.Set(EMCSMISISCSIDriver, '_do_iscsi_discovery',
-                       self.fake_do_iscsi_discovery)
-        self.stubs.Set(EMCSMISCommon, '_get_ecom_connection',
-                       self.fake_ecom_connection)
-        instancename = FakeCIMInstanceName()
-        self.stubs.Set(EMCSMISCommon, '_getinstancename',
-                       instancename.fake_getinstancename)
-        self.stubs.Set(time, 'sleep',
-                       self.fake_sleep)
-        driver = EMCSMISISCSIDriver(configuration=configuration)
-        driver.db = FakeDB()
-        self.driver = driver
-
-    def create_fake_config_file(self):
-
-        doc = Document()
-        emc = doc.createElement("EMC")
-        doc.appendChild(emc)
-
-        storagetype = doc.createElement("StorageType")
-        storagetypetext = doc.createTextNode("gold")
-        emc.appendChild(storagetype)
-        storagetype.appendChild(storagetypetext)
-
-        ecomserverip = doc.createElement("EcomServerIp")
-        ecomserveriptext = doc.createTextNode("1.1.1.1")
-        emc.appendChild(ecomserverip)
-        ecomserverip.appendChild(ecomserveriptext)
-
-        ecomserverport = doc.createElement("EcomServerPort")
-        ecomserverporttext = doc.createTextNode("10")
-        emc.appendChild(ecomserverport)
-        ecomserverport.appendChild(ecomserverporttext)
-
-        ecomusername = doc.createElement("EcomUserName")
-        ecomusernametext = doc.createTextNode("user")
-        emc.appendChild(ecomusername)
-        ecomusername.appendChild(ecomusernametext)
-
-        ecompassword = doc.createElement("EcomPassword")
-        ecompasswordtext = doc.createTextNode("pass")
-        emc.appendChild(ecompassword)
-        ecompassword.appendChild(ecompasswordtext)
-
-        timeout = doc.createElement("Timeout")
-        timeouttext = doc.createTextNode("0")
-        emc.appendChild(timeout)
-        timeout.appendChild(timeouttext)
-
-        self.config_file_path = self.tempdir + '/' + self.data.config_file_name
-        f = open(self.config_file_path, 'w')
-        doc.writexml(f)
-        f.close()
-
-    def fake_ecom_connection(self):
-        conn = FakeEcomConnection()
-        return conn
-
-    def fake_do_iscsi_discovery(self, volume):
-        output = []
-        item = '10.0.0.3:3260,1 iqn.1992-04.com.emc:cx.apm00123907237.a8'
-        item2 = '10.0.0.4:3260,2 iqn.1992-04.com.emc:cx.apm00123907237.b8'
-        output.append(item)
-        output.append(item2)
-        return output
-
-    def fake_sleep(self, seconds):
-        return
-
-    def test_get_volume_stats(self):
-        self.driver.get_volume_stats(True)
-
-    def test_create_destroy(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_snapshot_destroy(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.create_snapshot(self.data.test_snapshot)
-        self.driver.create_volume_from_snapshot(
-            self.data.test_clone, self.data.test_snapshot)
-        self.driver.create_cloned_volume(
-            self.data.test_clone3, self.data.test_volume)
-        self.driver.delete_volume(self.data.test_clone)
-        self.driver.delete_volume(self.data.test_clone3)
-        self.driver.delete_snapshot(self.data.test_snapshot)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_map_unmap(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.data.test_volume['EMCCurrentOwningStorageProcessor'] = 'SP_A'
-        self.driver.initialize_connection(self.data.test_volume,
-                                          self.data.connector)
-        self.driver.terminate_connection(self.data.test_volume,
-                                         self.data.connector)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_failed(self):
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume,
-                          self.data.test_failed_volume)
-
-    def test_create_volume_snapshot_unsupported(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.create_snapshot(self.data.test_snapshot_vmax)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume_from_snapshot,
-                          self.data.test_clone,
-                          self.data.test_snapshot_vmax)
-        self.driver.delete_snapshot(self.data.test_snapshot_vmax)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_snapshot_replica_failed(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.create_snapshot(self.data.test_snapshot)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume_from_snapshot,
-                          self.data.failed_snapshot_replica,
-                          self.data.test_snapshot)
-        self.driver.delete_snapshot(self.data.test_snapshot)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_snapshot_sync_failed(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.create_snapshot(self.data.test_snapshot)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume_from_snapshot,
-                          self.data.failed_snapshot_sync,
-                          self.data.test_snapshot)
-        self.driver.delete_snapshot(self.data.test_snapshot)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_clone_replica_failed(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_cloned_volume,
-                          self.data.failed_clone_replica,
-                          self.data.test_volume)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_clone_sync_failed(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_cloned_volume,
-                          self.data.failed_clone_sync,
-                          self.data.test_volume)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_delete_volume_notfound(self):
-        notfound_delete_vol = {}
-        notfound_delete_vol['name'] = 'notfound_delete_vol'
-        notfound_delete_vol['id'] = '10'
-        notfound_delete_vol['CreationClassName'] = 'Clar_StorageVolume'
-        notfound_delete_vol['SystemName'] = self.data.storage_system
-        notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
-        notfound_delete_vol['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name = {}
-        name['classname'] = 'Clar_StorageVolume'
-        keys = {}
-        keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
-        keys['SystemName'] = notfound_delete_vol['SystemName']
-        keys['DeviceID'] = notfound_delete_vol['DeviceID']
-        keys['SystemCreationClassName'] =\
-            notfound_delete_vol['SystemCreationClassName']
-        name['keybindings'] = keys
-        notfound_delete_vol['provider_location'] = str(name)
-        self.driver.delete_volume(notfound_delete_vol)
-
-    def test_delete_volume_failed(self):
-        self.driver.create_volume(self.data.failed_delete_vol)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.delete_volume,
-                          self.data.failed_delete_vol)
-
-    def test_extend_volume(self):
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.extend_volume(self.data.test_volume, '10')
-        self.driver.create_volume(self.data.failed_extend_vol)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.extend_volume,
-                          self.data.failed_extend_vol,
-                          '10')
-
-
-class EMCSMISFCDriverTestCase(test.TestCase):
-
-    def setUp(self):
-
-        self.data = EMCSMISCommonData()
-
-        self.tempdir = tempfile.mkdtemp()
-        self.addCleanup(shutil.rmtree, self.tempdir)
-        super(EMCSMISFCDriverTestCase, self).setUp()
-        self.config_file_path = None
-        self.create_fake_config_file()
-        self.addCleanup(os.remove, self.config_file_path)
-
-        configuration = mock.Mock()
-        configuration.cinder_emc_config_file = self.config_file_path
-
-        self.stubs.Set(EMCSMISCommon, '_get_ecom_connection',
-                       self.fake_ecom_connection)
-        instancename = FakeCIMInstanceName()
-        self.stubs.Set(EMCSMISCommon, '_getinstancename',
-                       instancename.fake_getinstancename)
-        self.stubs.Set(time, 'sleep',
-                       self.fake_sleep)
-        driver = EMCSMISFCDriver(configuration=configuration)
-        driver.db = FakeDB()
-        self.driver = driver
-
-    def create_fake_config_file(self):
-
-        doc = Document()
-        emc = doc.createElement("EMC")
-        doc.appendChild(emc)
-
-        storagetype = doc.createElement("StorageType")
-        storagetypetext = doc.createTextNode("gold")
-        emc.appendChild(storagetype)
-        storagetype.appendChild(storagetypetext)
-
-        ecomserverip = doc.createElement("EcomServerIp")
-        ecomserveriptext = doc.createTextNode("1.1.1.1")
-        emc.appendChild(ecomserverip)
-        ecomserverip.appendChild(ecomserveriptext)
-
-        ecomserverport = doc.createElement("EcomServerPort")
-        ecomserverporttext = doc.createTextNode("10")
-        emc.appendChild(ecomserverport)
-        ecomserverport.appendChild(ecomserverporttext)
-
-        ecomusername = doc.createElement("EcomUserName")
-        ecomusernametext = doc.createTextNode("user")
-        emc.appendChild(ecomusername)
-        ecomusername.appendChild(ecomusernametext)
-
-        ecompassword = doc.createElement("EcomPassword")
-        ecompasswordtext = doc.createTextNode("pass")
-        emc.appendChild(ecompassword)
-        ecompassword.appendChild(ecompasswordtext)
-
-        timeout = doc.createElement("Timeout")
-        timeouttext = doc.createTextNode("0")
-        emc.appendChild(timeout)
-        timeout.appendChild(timeouttext)
-
-        self.config_file_path = self.tempdir + '/' + self.data.config_file_name
-        f = open(self.config_file_path, 'w')
-        doc.writexml(f)
-        f.close()
-
-    def fake_ecom_connection(self):
-        conn = FakeEcomConnection()
-        return conn
-
-    def fake_sleep(self, seconds):
-        return
-
-    def test_get_volume_stats(self):
-        self.driver.get_volume_stats(True)
-
-    def test_create_destroy(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_snapshot_destroy(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.create_snapshot(self.data.test_snapshot)
-        self.driver.create_volume_from_snapshot(
-            self.data.test_clone, self.data.test_snapshot)
-        self.driver.create_cloned_volume(
-            self.data.test_clone3, self.data.test_volume)
-        self.driver.delete_volume(self.data.test_clone)
-        self.driver.delete_volume(self.data.test_clone3)
-        self.driver.delete_snapshot(self.data.test_snapshot)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_map_unmap(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-
-        output = {
-            'driver_volume_type': 'fibre_channel',
-            'data': {
-                'target_lun': 0,
-                'target_wwn': ['1234567890123', '0987654321321'],
-                'target_discovered': True,
-                'initiator_target_map': {'123456789012345':
-                                         ['1234567890123', '0987654321321'],
-                                         '123456789054321':
-                                         ['1234567890123', '0987654321321'],
-                                         }}}
-
-        connection_info = self.driver.initialize_connection(
-            self.data.test_volume,
-            self.data.connector)
-        self.assertEqual(connection_info, output)
-
-        connection_info = self.driver.terminate_connection(
-            self.data.test_volume,
-            self.data.connector)
-
-        # Verify calls in terminate_connection are executed
-        conf_service = {}
-        conf_service['SystemName'] = self.data.storage_system
-        conf_service['CreationClassName'] =\
-            self.data.ctrlconf_service_creationclass
-
-        vol_instance = self.driver.common._find_lun(self.data.test_volume)
-
-        mock.call._get_ecom_connection(),
-        mock.call.find_device_number(self.data.test_volume),
-        mock.call._find_lun(self.data.test_volume),
-        mock.call.self._find_controller_configuration_service(
-            self.data.storage_system),
-        mock.call._remove_members(conf_service, vol_instance),
-        mock.call.get_target_wwns(
-            self.data.storage_system,
-            self.data.connector)
-
-        output = {
-            'driver_volume_type': 'fibre_channel',
-            'data': {
-                'target_wwn': ['1234567890123', '0987654321321'],
-                'initiator_target_map': {'123456789012345':
-                                         ['1234567890123', '0987654321321'],
-                                         '123456789054321':
-                                         ['1234567890123', '0987654321321'],
-                                         }}}
-
-        self.assertEqual(connection_info, output)
-
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_failed(self):
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume,
-                          self.data.test_failed_volume)
-
-    def test_create_volume_snapshot_unsupported(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.create_snapshot(self.data.test_snapshot_vmax)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume_from_snapshot,
-                          self.data.test_clone,
-                          self.data.test_snapshot_vmax)
-        self.driver.delete_snapshot(self.data.test_snapshot_vmax)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_snapshot_replica_failed(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.create_snapshot(self.data.test_snapshot)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume_from_snapshot,
-                          self.data.failed_snapshot_replica,
-                          self.data.test_snapshot)
-        self.driver.delete_snapshot(self.data.test_snapshot)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_snapshot_sync_failed(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.create_snapshot(self.data.test_snapshot)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume_from_snapshot,
-                          self.data.failed_snapshot_sync,
-                          self.data.test_snapshot)
-        self.driver.delete_snapshot(self.data.test_snapshot)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_clone_replica_failed(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_cloned_volume,
-                          self.data.failed_clone_replica,
-                          self.data.test_volume)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_create_volume_clone_sync_failed(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_cloned_volume,
-                          self.data.failed_clone_sync,
-                          self.data.test_volume)
-        self.driver.delete_volume(self.data.test_volume)
-
-    def test_delete_volume_notfound(self):
-        notfound_delete_vol = {}
-        notfound_delete_vol['name'] = 'notfound_delete_vol'
-        notfound_delete_vol['id'] = '10'
-        notfound_delete_vol['CreationClassName'] = 'Clar_StorageVolume'
-        notfound_delete_vol['SystemName'] = self.data.storage_system
-        notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
-        notfound_delete_vol['SystemCreationClassName'] = 'Clar_StorageSystem'
-        name = {}
-        name['classname'] = 'Clar_StorageVolume'
-        keys = {}
-        keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
-        keys['SystemName'] = notfound_delete_vol['SystemName']
-        keys['DeviceID'] = notfound_delete_vol['DeviceID']
-        keys['SystemCreationClassName'] =\
-            notfound_delete_vol['SystemCreationClassName']
-        name['keybindings'] = keys
-        notfound_delete_vol['provider_location'] = str(name)
-        self.driver.delete_volume(notfound_delete_vol)
-
-    def test_delete_volume_failed(self):
-        self.driver.create_volume(self.data.failed_delete_vol)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.delete_volume,
-                          self.data.failed_delete_vol)
-
-    def test_extend_volume(self):
-        self.data.test_volume['volume_type_id'] = None
-        self.driver.create_volume(self.data.test_volume)
-        self.driver.extend_volume(self.data.test_volume, '10')
-        self.driver.create_volume(self.data.failed_extend_vol)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.extend_volume,
-                          self.data.failed_extend_vol,
-                          '10')
-
-    @mock.patch.object(
-        volume_types,
-        'get_volume_type_extra_specs',
-        return_value={'storagetype:pool': 'gold',
-                      'storagetype:provisioning': 'thick'})
-    def test_create_volume_with_volume_type(self, _mock_volume_type):
-        volume_with_vt = self.data.test_volume
-        volume_with_vt['volume_type_id'] = 1
-        self.driver.create_volume(volume_with_vt)
diff --git a/cinder/tests/test_emc_vmax.py b/cinder/tests/test_emc_vmax.py
new file mode 100644 (file)
index 0000000..536d5a8
--- /dev/null
@@ -0,0 +1,2783 @@
+# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import shutil
+import tempfile
+import time
+from xml.dom.minidom import Document
+
+import mock
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder import test
+from cinder.volume.drivers.emc.emc_vmax_common import EMCVMAXCommon
+from cinder.volume.drivers.emc.emc_vmax_fast import EMCVMAXFast
+from cinder.volume.drivers.emc.emc_vmax_fc import EMCVMAXFCDriver
+from cinder.volume.drivers.emc.emc_vmax_iscsi import EMCVMAXISCSIDriver
+from cinder.volume.drivers.emc.emc_vmax_masking import EMCVMAXMasking
+from cinder.volume.drivers.emc.emc_vmax_utils import EMCVMAXUtils
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+CINDER_EMC_CONFIG_DIR = '/etc/cinder/'
+
+
+class EMC_StorageVolume(dict):
+    pass
+
+
+class CIM_StorageExtent(dict):
+    pass
+
+
+class SE_InitiatorMaskingGroup(dict):
+    pass
+
+
+class SE_ConcreteJob(dict):
+    pass
+
+
+class SE_StorageHardwareID(dict):
+    pass
+
+
+class Fake_CIMProperty():
+
+    def fake_getCIMProperty(self):
+        cimproperty = Fake_CIMProperty()
+        cimproperty.value = True
+        return cimproperty
+
+    def fake_getBlockSizeCIMProperty(self):
+        cimproperty = Fake_CIMProperty()
+        cimproperty.value = '512'
+        return cimproperty
+
+    def fake_getConsumableBlocksCIMProperty(self):
+        cimproperty = Fake_CIMProperty()
+        cimproperty.value = '12345'
+        return cimproperty
+
+    def fake_getIsConcatenatedCIMProperty(self):
+        cimproperty = Fake_CIMProperty()
+        cimproperty.value = True
+        return cimproperty
+
+    def fake_getIsCompositeCIMProperty(self):
+        cimproperty = Fake_CIMProperty()
+        cimproperty.value = False
+        return cimproperty
+
+
+class Fake_CIM_TierPolicyServiceCapabilities():
+
+    def fake_getpolicyinstance(self):
+        classinstance = Fake_CIM_TierPolicyServiceCapabilities()
+
+        classcimproperty = Fake_CIMProperty()
+        cimproperty = classcimproperty.fake_getCIMProperty()
+
+        cimproperties = {u'SupportsTieringPolicies': cimproperty}
+        classinstance.properties = cimproperties
+
+        return classinstance
+
+
+class FakeCIMInstanceName(dict):
+
+    def fake_getinstancename(self, classname, bindings):
+        instancename = FakeCIMInstanceName()
+        for key in bindings:
+            instancename[key] = bindings[key]
+        instancename.classname = classname
+        instancename.namespace = 'root/emc'
+        return instancename
+
+
+class FakeDB():
+
+    def volume_update(self, context, volume_id, model_update):
+        pass
+
+    def volume_get(self, context, volume_id):
+        conn = FakeEcomConnection()
+        objectpath = {}
+        objectpath['CreationClassName'] = 'Symm_StorageVolume'
+
+        if volume_id == 'vol1':
+            device_id = '1'
+            objectpath['DeviceID'] = device_id
+        else:
+            objectpath['DeviceID'] = volume_id
+        return conn.GetInstance(objectpath)
+
+
+class EMCVMAXCommonData():
+    connector = {'ip': '10.0.0.2',
+                 'initiator': 'iqn.1993-08.org.debian: 01: 222',
+                 'wwpns': ["123456789012345", "123456789054321"],
+                 'wwnns': ["223456789012345", "223456789054321"],
+                 'host': 'fakehost'}
+    default_storage_group = (
+        u'//10.108.246.202/root/emc: SE_DeviceMaskingGroup.InstanceID='
+        '"SYMMETRIX+000198700440+OS_default_GOLD1_SG"')
+    storage_system = 'SYMMETRIX+000195900551'
+    lunmaskctrl_id =\
+        'SYMMETRIX+000195900551+OS-fakehost-gold-MV'
+    lunmaskctrl_name =\
+        'OS-fakehost-gold-MV'
+
+    initiatorgroup_id =\
+        'SYMMETRIX+000195900551+OS-fakehost-IG'
+    initiatorgroup_name =\
+        'OS-fakehost-IG'
+    initiatorgroup_creationclass = 'SE_InitiatorMaskingGroup'
+
+    storageextent_creationclass = 'CIM_StorageExtent'
+    initiator1 = 'iqn.1993-08.org.debian: 01: 1a2b3c4d5f6g'
+    stconf_service_creationclass = 'Symm_StorageConfigurationService'
+    ctrlconf_service_creationclass = 'Symm_ControllerConfigurationService'
+    elementcomp_service_creationclass = 'Symm_ElementCompositionService'
+    storreloc_service_creationclass = 'Symm_StorageRelocationService'
+    replication_service_creationclass = 'EMC_ReplicationService'
+    vol_creationclass = 'Symm_StorageVolume'
+    pool_creationclass = 'Symm_VirtualProvisioningPool'
+    lunmask_creationclass = 'Symm_LunMaskingSCSIProtocolController'
+    lunmask_creationclass2 = 'Symm_LunMaskingView'
+    hostedservice_creationclass = 'CIM_HostedService'
+    policycapability_creationclass = 'CIM_TierPolicyServiceCapabilities'
+    policyrule_creationclass = 'Symm_TierPolicyRule'
+    assoctierpolicy_creationclass = 'CIM_StorageTier'
+    storagepool_creationclass = 'Symm_VirtualProvisioningPool'
+    storagegroup_creationclass = 'CIM_DeviceMaskingGroup'
+    hardwareid_creationclass = 'SE_StorageHardwareID'
+    storagepoolid = 'SYMMETRIX+000195900551+U+gold'
+    storagegroupname = 'OS_default_GOLD1_SG'
+    storagevolume_creationclass = 'EMC_StorageVolume'
+    policyrule = 'gold'
+    poolname = 'gold'
+
+    unit_creationclass = 'CIM_ProtocolControllerForUnit'
+    storage_type = 'gold'
+    keybindings = {'CreationClassName': u'Symm_StorageVolume',
+                   'SystemName': u'SYMMETRIX+000195900551',
+                   'DeviceID': u'1',
+                   'SystemCreationClassName': u'Symm_StorageSystem'}
+
+    keybindings2 = {'CreationClassName': u'Symm_StorageVolume',
+                    'SystemName': u'SYMMETRIX+000195900551',
+                    'DeviceID': u'99999',
+                    'SystemCreationClassName': u'Symm_StorageSystem'}
+    provider_location = {'classname': 'Symm_StorageVolume',
+                         'keybindings': keybindings}
+    provider_location2 = {'classname': 'Symm_StorageVolume',
+                          'keybindings': keybindings2}
+
+    properties = {'ConsumableBlocks': '12345',
+                  'BlockSize': '512'}
+
+    test_volume = {'name': 'vol1',
+                   'size': 1,
+                   'volume_name': 'vol1',
+                   'id': '1',
+                   'provider_auth': None,
+                   'project_id': 'project',
+                   'display_name': 'vol1',
+                   'display_description': 'test volume',
+                   'volume_type_id': 'abc',
+                   'provider_location': str(provider_location),
+                   'status': 'available',
+                   'host': 'fake-host'
+                   }
+    test_failed_volume = {'name': 'failed_vol',
+                          'size': 1,
+                          'volume_name': 'failed_vol',
+                          'id': '4',
+                          'provider_auth': None,
+                          'project_id': 'project',
+                          'display_name': 'failed_vol',
+                          'display_description': 'test failed volume',
+                          'volume_type_id': 'abc'}
+
+    failed_delete_vol = {'name': 'failed_delete_vol',
+                         'size': '-1',
+                         'volume_name': 'failed_delete_vol',
+                         'id': '99999',
+                         'provider_auth': None,
+                         'project_id': 'project',
+                         'display_name': 'failed delete vol',
+                         'display_description': 'failed delete volume',
+                         'volume_type_id': 'abc',
+                         'provider_location': str(provider_location2)}
+
+    test_source_volume = {'size': 1,
+                          'volume_type_id': 'sourceid',
+                          'display_name': 'sourceVolume',
+                          'name': 'sourceVolume',
+                          'volume_name': 'vmax-154326',
+                          'provider_auth': None,
+                          'project_id':
+                          'project', 'id': '2',
+                          'provider_location': str(provider_location),
+                          'display_description': 'snapshot source volume'}
+
+    location_info = {'location_info': '000195900551#silver#None',
+                     'storage_protocol': 'ISCSI'}
+    test_host = {'capabilities': location_info,
+                 'host': 'fake_host'}
+    test_ctxt = {}
+    new_type = {}
+    diff = {}
+
+
+class FakeEcomConnection():
+
+    def __init__(self, *args, **kwargs):
+        self.data = EMCVMAXCommonData()
+
+    def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None,
+                     ElementType=None, Size=None,
+                     SyncType=None, SourceElement=None,
+                     Operation=None, Synchronization=None,
+                     TheElements=None, TheElement=None,
+                     LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None,
+                     ProtocolControllers=None,
+                     MaskingGroup=None, Members=None,
+                     HardwareId=None, ElementSource=None, EMCInPools=None,
+                     CompositeType=None, EMCNumberOfMembers=None,
+                     EMCBindElements=None,
+                     InElements=None, TargetPool=None, RequestedState=None):
+
+        rc = 0L
+        myjob = SE_ConcreteJob()
+        myjob.classname = 'SE_ConcreteJob'
+        myjob['InstanceID'] = '9999'
+        myjob['status'] = 'success'
+        myjob['type'] = ElementName
+
+        if Size == -1073741824 and \
+                MethodName == 'CreateOrModifyCompositeElement':
+            rc = 0L
+            myjob = SE_ConcreteJob()
+            myjob.classname = 'SE_ConcreteJob'
+            myjob['InstanceID'] = '99999'
+            myjob['status'] = 'success'
+            myjob['type'] = 'failed_delete_vol'
+        elif ElementName is None and \
+                MethodName == 'CreateOrModifyCompositeElement':
+            rc = 0L
+            myjob = SE_ConcreteJob()
+            myjob.classname = 'SE_ConcreteJob'
+            myjob['InstanceID'] = '9999'
+            myjob['status'] = 'success'
+            myjob['type'] = 'vol1'
+
+        if ElementName == 'failed_vol' and \
+                MethodName == 'CreateOrModifyElementFromStoragePool':
+            rc = 10L
+            myjob['status'] = 'failure'
+
+        elif TheElements and \
+                TheElements[0]['DeviceID'] == '99999' and \
+                MethodName == 'EMCReturnToStoragePool':
+            rc = 10L
+            myjob['status'] = 'failure'
+        elif HardwareId:
+            rc = 0L
+            targetendpoints = {}
+            endpoints = []
+            endpoint = {}
+            endpoint['Name'] = '1234567890123'
+            endpoints.append(endpoint)
+            endpoint2 = {}
+            endpoint2['Name'] = '0987654321321'
+            endpoints.append(endpoint2)
+            targetendpoints['TargetEndpoints'] = endpoints
+            return rc, targetendpoints
+
+        job = {'Job': myjob}
+        return rc, job
+
+    def EnumerateInstanceNames(self, name):
+        result = None
+        if name == 'EMC_StorageConfigurationService':
+            result = self._enum_stconfsvcs()
+        elif name == 'EMC_ControllerConfigurationService':
+            result = self._enum_ctrlconfsvcs()
+        elif name == 'Symm_ElementCompositionService':
+            result = self._enum_elemcompsvcs()
+        elif name == 'Symm_StorageRelocationService':
+            result = self._enum_storrelocsvcs()
+        elif name == 'EMC_ReplicationService':
+            result = self._enum_replicsvcs()
+        elif name == 'EMC_VirtualProvisioningPool':
+            result = self._enum_pools()
+        elif name == 'EMC_StorageVolume':
+            result = self._enum_storagevolumes()
+        elif name == 'Symm_StorageVolume':
+            result = self._enum_storagevolumes()
+        elif name == 'CIM_ProtocolControllerForUnit':
+            result = self._enum_unitnames()
+        elif name == 'EMC_LunMaskingSCSIProtocolController':
+            result = self._enum_lunmaskctrls()
+        elif name == 'EMC_StorageProcessorSystem':
+            result = self._enum_processors()
+        elif name == 'EMC_StorageHardwareIDManagementService':
+            result = self._enum_hdwidmgmts()
+        elif name == 'SE_StorageHardwareID':
+            result = self._enum_storhdwids()
+        else:
+            result = self._default_enum()
+        return result
+
+    def EnumerateInstances(self, name):
+        result = None
+        if name == 'EMC_VirtualProvisioningPool':
+            result = self._enum_pool_details()
+        elif name == 'SE_StorageHardwareID':
+            result = self._enum_storhdwids()
+        else:
+            result = self._default_enum()
+        return result
+
+    def GetInstance(self, objectpath, LocalOnly=False):
+
+        try:
+            name = objectpath['CreationClassName']
+        except KeyError:
+            name = objectpath.classname
+        result = None
+        if name == 'Symm_StorageVolume':
+            result = self._getinstance_storagevolume(objectpath)
+        elif name == 'CIM_ProtocolControllerForUnit':
+            result = self._getinstance_unit(objectpath)
+        elif name == 'SE_ConcreteJob':
+            result = self._getinstance_job(objectpath)
+        elif name == 'SE_StorageSynchronized_SV_SV':
+            result = self._getinstance_syncsvsv(objectpath)
+        elif name == 'Symm_TierPolicyServiceCapabilities':
+            result = self._getinstance_policycapabilities(objectpath)
+        elif name == 'CIM_TierPolicyServiceCapabilities':
+            result = self._getinstance_policycapabilities(objectpath)
+        elif name == 'SE_InitiatorMaskingGroup':
+            result = self._getinstance_initiatormaskinggroup(objectpath)
+        elif name == 'SE_StorageHardwareID':
+            result = self._getinstance_storagehardwareid(objectpath)
+        else:
+            result = self._default_getinstance(objectpath)
+
+        return result
+
+    def DeleteInstance(self, objectpath):
+        pass
+
+    def Associators(self, objectpath, ResultClass='EMC_StorageHardwareID'):
+        result = None
+        if ResultClass == 'EMC_StorageHardwareID':
+            result = self._assoc_hdwid()
+        elif ResultClass == 'EMC_iSCSIProtocolEndpoint':
+            result = self._assoc_endpoint()
+        elif ResultClass == 'EMC_StorageVolume':
+            result = self._assoc_storagevolume(objectpath)
+        else:
+            result = self._default_assoc(objectpath)
+        return result
+
+    def AssociatorNames(self, objectpath,
+                        ResultClass='default', AssocClass='default'):
+        result = None
+
+        if ResultClass == 'EMC_LunMaskingSCSIProtocolController':
+            result = self._assocnames_lunmaskctrl()
+        elif AssocClass == 'CIM_HostedService':
+            result = self._assocnames_hostedservice()
+        elif ResultClass == 'CIM_TierPolicyServiceCapabilities':
+            result = self._assocnames_policyCapabilities()
+        elif ResultClass == 'Symm_TierPolicyRule':
+            result = self._assocnames_policyrule()
+        elif AssocClass == 'CIM_AssociatedTierPolicy':
+            result = self._assocnames_assoctierpolicy()
+        elif ResultClass == 'CIM_StoragePool':
+            result = self._assocnames_storagepool()
+        elif ResultClass == 'EMC_VirtualProvisioningPool':
+            result = self._assocnames_storagepool()
+        elif ResultClass == 'CIM_DeviceMaskingGroup':
+            result = self._assocnames_storagegroup()
+        elif ResultClass == 'EMC_StorageVolume':
+            result = self._enum_storagevolumes()
+        elif ResultClass == 'Symm_StorageVolume':
+            result = self._enum_storagevolumes()
+        elif ResultClass == 'SE_InitiatorMaskingGroup':
+            result = self._enum_initiatorMaskingGroup()
+        elif ResultClass == 'CIM_StorageExtent':
+            result = self._enum_storage_extent()
+        elif ResultClass == 'SE_StorageHardwareID':
+            result = self._enum_storhdwids()
+
+        else:
+            result = self._default_assocnames(objectpath)
+        return result
+
+    def ReferenceNames(self, objectpath,
+                       ResultClass='CIM_ProtocolControllerForUnit'):
+        result = None
+        if ResultClass == 'CIM_ProtocolControllerForUnit':
+            result = self._ref_unitnames2()
+        else:
+            result = self._default_ref(objectpath)
+        return result
+
+    def _ref_unitnames(self):
+        unitnames = []
+        unitname = {}
+
+        dependent = {}
+        dependent['CreationClassName'] = self.data.vol_creationclass
+        dependent['DeviceID'] = self.data.test_volume['id']
+        dependent['ElementName'] = self.data.test_volume['name']
+        dependent['SystemName'] = self.data.storage_system
+
+        antecedent = {}
+        antecedent['CreationClassName'] = self.data.lunmask_creationclass
+        antecedent['DeviceID'] = self.data.lunmaskctrl_id
+        antecedent['SystemName'] = self.data.storage_system
+
+        unitname['Dependent'] = dependent
+        unitname['Antecedent'] = antecedent
+        unitname['CreationClassName'] = self.data.unit_creationclass
+        unitnames.append(unitname)
+
+        return unitnames
+
+    def _ref_unitnames2(self):
+        unitnames = []
+        unitname = {}
+
+        dependent = {}
+        dependent['CreationClassName'] = self.data.vol_creationclass
+        dependent['DeviceID'] = self.data.test_volume['id']
+        dependent['ElementName'] = self.data.test_volume['name']
+        dependent['SystemName'] = self.data.storage_system
+
+        antecedent = {}
+        antecedent['CreationClassName'] = self.data.lunmask_creationclass2
+        antecedent['SystemName'] = self.data.storage_system
+
+        unitname['Dependent'] = dependent
+        unitname['Antecedent'] = antecedent
+        unitname['CreationClassName'] = self.data.unit_creationclass
+        unitnames.append(unitname)
+
+        return unitnames
+
+    def _default_ref(self, objectpath):
+        return objectpath
+
+    def _assoc_hdwid(self):
+        assocs = []
+        assoc = {}
+        assoc['StorageID'] = self.data.connector['initiator']
+        assocs.append(assoc)
+        for wwpn in self.data.connector['wwpns']:
+            assoc2 = {}
+            assoc2['StorageID'] = wwpn
+            assocs.append(assoc2)
+        return assocs
+
+    def _assoc_endpoint(self):
+        assocs = []
+        assoc = {}
+        assoc['Name'] = 'iqn.1992-04.com.emc: 50000973f006dd80'
+        assoc['SystemName'] = self.data.storage_system
+        assocs.append(assoc)
+        return assocs
+
+    # Added test for EMC_StorageVolume associators
+    def _assoc_storagevolume(self, objectpath):
+        assocs = []
+        if 'type' not in objectpath:
+            vol = self.data.test_volume
+        elif objectpath['type'] == 'failed_delete_vol':
+            vol = self.data.failed_delete_vol
+        elif objectpath['type'] == 'vol1':
+            vol = self.data.test_volume
+        elif objectpath['type'] == 'appendVolume':
+            vol = self.data.test_volume
+        elif objectpath['type'] == 'failed_vol':
+            vol = self.data.test_failed_volume
+        else:
+            return None
+
+        vol['DeviceID'] = vol['id']
+        assoc = self._getinstance_storagevolume(vol)
+        assocs.append(assoc)
+        return assocs
+
+    def _default_assoc(self, objectpath):
+        return objectpath
+
+    def _assocnames_lunmaskctrl(self):
+        return self._enum_lunmaskctrls()
+
+    def _assocnames_hostedservice(self):
+        return self._enum_hostedservice()
+
+    def _assocnames_policyCapabilities(self):
+        return self._enum_policycapabilities()
+
+    def _assocnames_policyrule(self):
+        return self._enum_policyrules()
+
+    def _assocnames_assoctierpolicy(self):
+        return self._enum_assoctierpolicy()
+
+    def _assocnames_storagepool(self):
+        return self._enum_storagepool()
+
+    def _assocnames_storagegroup(self):
+        return self._enum_storagegroup()
+
+    def _assocnames_storagevolume(self):
+        return self._enum_storagevolume()
+
+    def _default_assocnames(self, objectpath):
+        return objectpath
+
+    def _getinstance_storagevolume(self, objectpath):
+        foundinstance = None
+        instance = EMC_StorageVolume()
+        vols = self._enum_storagevolumes()
+
+        for vol in vols:
+            if vol['DeviceID'] == objectpath['DeviceID']:
+                instance = vol
+                break
+        if not instance:
+            foundinstance = None
+        else:
+            foundinstance = instance
+
+        return foundinstance
+
+    def _getinstance_lunmask(self):
+        lunmask = {}
+        lunmask['CreationClassName'] = self.data.lunmask_creationclass
+        lunmask['DeviceID'] = self.data.lunmaskctrl_id
+        lunmask['SystemName'] = self.data.storage_system
+        return lunmask
+
+    def _getinstance_initiatormaskinggroup(self, objectpath):
+
+        initiatorgroup = SE_InitiatorMaskingGroup()
+        initiatorgroup['CreationClassName'] = (
+            self.data.initiatorgroup_creationclass)
+        initiatorgroup['DeviceID'] = self.data.initiatorgroup_id
+        initiatorgroup['SystemName'] = self.data.storage_system
+        initiatorgroup.path = initiatorgroup
+        return initiatorgroup
+
+    def _getinstance_storagehardwareid(self, objectpath):
+        hardwareid = SE_StorageHardwareID()
+        hardwareid['CreationClassName'] = self.data.hardwareid_creationclass
+        hardwareid['SystemName'] = self.data.storage_system
+        hardwareid['StorageID'] = self.data.connector['wwpns'][0]
+        hardwareid.path = hardwareid
+        return hardwareid
+
+    def _getinstance_unit(self, objectpath):
+        unit = {}
+
+        dependent = {}
+        dependent['CreationClassName'] = self.data.vol_creationclass
+        dependent['DeviceID'] = self.data.test_volume['id']
+        dependent['ElementName'] = self.data.test_volume['name']
+        dependent['SystemName'] = self.data.storage_system
+
+        antecedent = {}
+        antecedent['CreationClassName'] = self.data.lunmask_creationclass
+        antecedent['DeviceID'] = self.data.lunmaskctrl_id
+        antecedent['SystemName'] = self.data.storage_system
+
+        unit['Dependent'] = dependent
+        unit['Antecedent'] = antecedent
+        unit['CreationClassName'] = self.data.unit_creationclass
+        unit['DeviceNumber'] = '1'
+
+        return unit
+
+    def _getinstance_job(self, jobpath):
+        jobinstance = {}
+        jobinstance['InstanceID'] = '9999'
+        if jobpath['status'] == 'failure':
+            jobinstance['JobState'] = 10
+            jobinstance['ErrorCode'] = 99
+            jobinstance['ErrorDescription'] = 'Failure'
+        else:
+            jobinstance['JobState'] = 7
+            jobinstance['ErrorCode'] = 0
+            jobinstance['ErrorDescription'] = ''
+        return jobinstance
+
+    def _getinstance_policycapabilities(self, policycapabilitypath):
+        instance = Fake_CIM_TierPolicyServiceCapabilities()
+        fakeinstance = instance.fake_getpolicyinstance()
+        return fakeinstance
+
+    def _default_getinstance(self, objectpath):
+        return objectpath
+
+    def _enum_stconfsvcs(self):
+        conf_services = []
+        conf_service = {}
+        conf_service['SystemName'] = self.data.storage_system
+        conf_service['CreationClassName'] =\
+            self.data.stconf_service_creationclass
+        conf_services.append(conf_service)
+        return conf_services
+
+    def _enum_ctrlconfsvcs(self):
+        conf_services = []
+        conf_service = {}
+        conf_service['SystemName'] = self.data.storage_system
+        conf_service['CreationClassName'] =\
+            self.data.ctrlconf_service_creationclass
+        conf_services.append(conf_service)
+        return conf_services
+
+    def _enum_elemcompsvcs(self):
+        comp_services = []
+        comp_service = {}
+        comp_service['SystemName'] = self.data.storage_system
+        comp_service['CreationClassName'] =\
+            self.data.elementcomp_service_creationclass
+        comp_services.append(comp_service)
+        return comp_services
+
+    def _enum_storrelocsvcs(self):
+        reloc_services = []
+        reloc_service = {}
+        reloc_service['SystemName'] = self.data.storage_system
+        reloc_service['CreationClassName'] =\
+            self.data.storreloc_service_creationclass
+        reloc_services.append(reloc_service)
+        return reloc_services
+
+    def _enum_replicsvcs(self):
+        replic_services = []
+        replic_service = {}
+        replic_service['SystemName'] = self.data.storage_system
+        replic_service['CreationClassName'] =\
+            self.data.replication_service_creationclass
+        replic_services.append(replic_service)
+        return replic_services
+
+    def _enum_pools(self):
+        pools = []
+        pool = {}
+        pool['InstanceID'] = self.data.storage_system + '+U+' +\
+            self.data.storage_type
+        pool['CreationClassName'] = 'Symm_VirtualProvisioningPool'
+        pool['ElementName'] = 'gold'
+        pools.append(pool)
+        return pools
+
+    def _enum_pool_details(self):
+        pools = []
+        pool = {}
+        pool['InstanceID'] = self.data.storage_system + '+U+' +\
+            self.data.storage_type
+        pool['CreationClassName'] = 'Symm_VirtualProvisioningPool'
+        pool['TotalManagedSpace'] = 12345678
+        pool['RemainingManagedSpace'] = 123456
+        pools.append(pool)
+        return pools
+
+    def _enum_storagevolumes(self):
+        vols = []
+
+        vol = EMC_StorageVolume()
+        vol['name'] = self.data.test_volume['name']
+        vol['CreationClassName'] = 'Symm_StorageVolume'
+        vol['ElementName'] = self.data.test_volume['name']
+        vol['DeviceID'] = self.data.test_volume['id']
+        vol['SystemName'] = self.data.storage_system
+
+        # Added vol to vol.path
+        vol['SystemCreationClassName'] = 'Symm_StorageSystem'
+        vol.path = vol
+        vol.path.classname = vol['CreationClassName']
+
+        classcimproperty = Fake_CIMProperty()
+        blocksizecimproperty = classcimproperty.fake_getBlockSizeCIMProperty()
+        consumableBlockscimproperty = (
+            classcimproperty.fake_getConsumableBlocksCIMProperty())
+        isCompositecimproperty = (
+            classcimproperty.fake_getIsCompositeCIMProperty())
+        properties = {u'ConsumableBlocks': blocksizecimproperty,
+                      u'BlockSize': consumableBlockscimproperty,
+                      u'IsComposite': isCompositecimproperty}
+        vol.properties = properties
+
+        name = {}
+        name['classname'] = 'Symm_StorageVolume'
+        keys = {}
+        keys['CreationClassName'] = 'Symm_StorageVolume'
+        keys['SystemName'] = self.data.storage_system
+        keys['DeviceID'] = vol['DeviceID']
+        keys['SystemCreationClassName'] = 'Symm_StorageSystem'
+        name['keybindings'] = keys
+
+        vol['provider_location'] = str(name)
+
+        vols.append(vol)
+
+        failed_delete_vol = EMC_StorageVolume()
+        failed_delete_vol['name'] = 'failed_delete_vol'
+        failed_delete_vol['CreationClassName'] = 'Symm_StorageVolume'
+        failed_delete_vol['ElementName'] = 'failed_delete_vol'
+        failed_delete_vol['DeviceID'] = '99999'
+        failed_delete_vol['SystemName'] = self.data.storage_system
+        # Added vol to vol.path
+        failed_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
+        failed_delete_vol.path = failed_delete_vol
+        failed_delete_vol.path.classname =\
+            failed_delete_vol['CreationClassName']
+        vols.append(failed_delete_vol)
+
+        failed_vol = EMC_StorageVolume()
+        failed_vol['name'] = 'failed__vol'
+        failed_vol['CreationClassName'] = 'Symm_StorageVolume'
+        failed_vol['ElementName'] = 'failed_vol'
+        failed_vol['DeviceID'] = '4'
+        failed_vol['SystemName'] = self.data.storage_system
+        # Added vol to vol.path
+        failed_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
+        failed_vol.path = failed_vol
+        failed_vol.path.classname =\
+            failed_vol['CreationClassName']
+
+        name_failed = {}
+        name_failed['classname'] = 'Symm_StorageVolume'
+        keys_failed = {}
+        keys_failed['CreationClassName'] = 'Symm_StorageVolume'
+        keys_failed['SystemName'] = self.data.storage_system
+        keys_failed['DeviceID'] = failed_vol['DeviceID']
+        keys_failed['SystemCreationClassName'] = 'Symm_StorageSystem'
+        name_failed['keybindings'] = keys_failed
+        failed_vol['provider_location'] = str(name_failed)
+
+        vols.append(failed_vol)
+
+        return vols
+
+    def _enum_initiatorMaskingGroup(self):
+        initatorgroups = []
+        initatorgroup = {}
+        initatorgroup['CreationClassName'] = (
+            self.data.initiatorgroup_creationclass)
+        initatorgroup['DeviceID'] = self.data.initiatorgroup_id
+        initatorgroup['SystemName'] = self.data.storage_system
+        initatorgroup['ElementName'] = self.data.initiatorgroup_name
+#         initatorgroup.path = initatorgroup
+#         initatorgroup.path.classname = initatorgroup['CreationClassName']
+        initatorgroups.append(initatorgroup)
+        return initatorgroups
+
+    def _enum_storage_extent(self):
+        storageExtents = []
+        storageExtent = CIM_StorageExtent()
+        storageExtent['CreationClassName'] = (
+            self.data.storageextent_creationclass)
+
+        classcimproperty = Fake_CIMProperty()
+        isConcatenatedcimproperty = (
+            classcimproperty.fake_getIsConcatenatedCIMProperty())
+        properties = {u'IsConcatenated': isConcatenatedcimproperty}
+        storageExtent.properties = properties
+
+        storageExtents.append(storageExtent)
+        return storageExtents
+
+    def _enum_lunmaskctrls(self):
+        ctrls = []
+        ctrl = {}
+        ctrl['CreationClassName'] = self.data.lunmask_creationclass
+        ctrl['DeviceID'] = self.data.lunmaskctrl_id
+        ctrl['SystemName'] = self.data.storage_system
+        ctrl['ElementName'] = self.data.lunmaskctrl_name
+        ctrls.append(ctrl)
+        return ctrls
+
+    def _enum_hostedservice(self):
+        hostedservices = []
+        hostedservice = {}
+        hostedservice['CreationClassName'] = (
+            self.data.hostedservice_creationclass)
+        hostedservice['SystemName'] = self.data.storage_system
+        hostedservices.append(hostedservice)
+        return hostedservices
+
+    def _enum_policycapabilities(self):
+        policycapabilities = []
+        policycapability = {}
+        policycapability['CreationClassName'] = (
+            self.data.policycapability_creationclass)
+        policycapability['SystemName'] = self.data.storage_system
+
+        propertiesList = []
+        CIMProperty = {'is_array': True}
+        properties = {u'SupportedTierFeatures': CIMProperty}
+        propertiesList.append(properties)
+        policycapability['Properties'] = propertiesList
+
+        policycapabilities.append(policycapability)
+
+        return policycapabilities
+
+    def _enum_policyrules(self):
+        policyrules = []
+        policyrule = {}
+        policyrule['CreationClassName'] = self.data.policyrule_creationclass
+        policyrule['SystemName'] = self.data.storage_system
+        policyrule['PolicyRuleName'] = self.data.policyrule
+        policyrules.append(policyrule)
+        return policyrules
+
+    def _enum_assoctierpolicy(self):
+        assoctierpolicies = []
+        assoctierpolicy = {}
+        assoctierpolicy['CreationClassName'] = (
+            self.data.assoctierpolicy_creationclass)
+        assoctierpolicies.append(assoctierpolicy)
+        return assoctierpolicies
+
+    def _enum_storagepool(self):
+        storagepools = []
+        storagepool = {}
+        storagepool['CreationClassName'] = self.data.storagepool_creationclass
+        storagepool['InstanceID'] = self.data.storagepoolid
+        storagepool['ElementName'] = 'gold'
+        storagepools.append(storagepool)
+        return storagepools
+
+    def _enum_storagegroup(self):
+        storagegroups = []
+        storagegroup = {}
+        storagegroup['CreationClassName'] = (
+            self.data.storagegroup_creationclass)
+        storagegroup['ElementName'] = self.data.storagegroupname
+        storagegroups.append(storagegroup)
+        return storagegroups
+
+    def _enum_storagevolume(self):
+        storagevolumes = []
+        storagevolume = {}
+        storagevolume['CreationClassName'] = (
+            self.data.storagevolume_creationclass)
+        storagevolumes.append(storagevolume)
+        return storagevolumes
+
+    def _enum_hdwidmgmts(self):
+        services = []
+        srv = {}
+        srv['SystemName'] = self.data.storage_system
+        services.append(srv)
+        return services
+
+    def _enum_storhdwids(self):
+        storhdwids = []
+        hdwid = SE_StorageHardwareID()
+        hdwid['CreationClassName'] = self.data.hardwareid_creationclass
+        hdwid['StorageID'] = self.data.connector['wwpns'][0]
+
+        hdwid.path = hdwid
+        storhdwids.append(hdwid)
+        return storhdwids
+
+    def _default_enum(self):
+        names = []
+        name = {}
+        name['Name'] = 'default'
+        names.append(name)
+        return names
+
+
+class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase):
+    def setUp(self):
+
+        self.data = EMCVMAXCommonData()
+
+        self.tempdir = tempfile.mkdtemp()
+        super(EMCVMAXISCSIDriverNoFastTestCase, self).setUp()
+        self.config_file_path = None
+        self.create_fake_config_file_no_fast()
+
+        configuration = mock.Mock()
+        configuration.safe_get.return_value = 'ISCSINoFAST'
+        configuration.cinder_emc_config_file = self.config_file_path
+        configuration.config_group = 'ISCSINoFAST'
+
+        self.stubs.Set(EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery',
+                       self.fake_do_iscsi_discovery)
+        self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
+                       self.fake_ecom_connection)
+        instancename = FakeCIMInstanceName()
+        self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
+                       instancename.fake_getinstancename)
+        self.stubs.Set(time, 'sleep',
+                       self.fake_sleep)
+
+        driver = EMCVMAXISCSIDriver(configuration=configuration)
+        driver.db = FakeDB()
+        self.driver = driver
+
+    def create_fake_config_file_no_fast(self):
+
+        doc = Document()
+        emc = doc.createElement("EMC")
+        doc.appendChild(emc)
+
+        array = doc.createElement("Array")
+        arraytext = doc.createTextNode("1234567891011")
+        emc.appendChild(array)
+        array.appendChild(arraytext)
+
+        ecomserverip = doc.createElement("EcomServerIp")
+        ecomserveriptext = doc.createTextNode("1.1.1.1")
+        emc.appendChild(ecomserverip)
+        ecomserverip.appendChild(ecomserveriptext)
+
+        ecomserverport = doc.createElement("EcomServerPort")
+        ecomserverporttext = doc.createTextNode("10")
+        emc.appendChild(ecomserverport)
+        ecomserverport.appendChild(ecomserverporttext)
+
+        ecomusername = doc.createElement("EcomUserName")
+        ecomusernametext = doc.createTextNode("user")
+        emc.appendChild(ecomusername)
+        ecomusername.appendChild(ecomusernametext)
+
+        ecompassword = doc.createElement("EcomPassword")
+        ecompasswordtext = doc.createTextNode("pass")
+        emc.appendChild(ecompassword)
+        ecompassword.appendChild(ecompasswordtext)
+
+        portgroup = doc.createElement("PortGroup")
+        portgrouptext = doc.createTextNode("myPortGroup")
+        portgroup.appendChild(portgrouptext)
+
+        portgroups = doc.createElement("PortGroups")
+        portgroups.appendChild(portgroup)
+        emc.appendChild(portgroups)
+
+        pool = doc.createElement("Pool")
+        pooltext = doc.createTextNode("gold")
+        emc.appendChild(pool)
+        pool.appendChild(pooltext)
+
+        array = doc.createElement("Array")
+        arraytext = doc.createTextNode("0123456789")
+        emc.appendChild(array)
+        array.appendChild(arraytext)
+
+        timeout = doc.createElement("Timeout")
+        timeouttext = doc.createTextNode("0")
+        emc.appendChild(timeout)
+        timeout.appendChild(timeouttext)
+
+        filename = 'cinder_emc_config_ISCSINoFAST.xml'
+
+        self.config_file_path = self.tempdir + '/' + filename
+
+        f = open(self.config_file_path, 'w')
+        doc.writexml(f)
+        f.close()
+
+    def fake_ecom_connection(self):
+        conn = FakeEcomConnection()
+        return conn
+
+    def fake_do_iscsi_discovery(self, volume, ipAddress):
+        output = []
+        item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80'
+        output.append(item)
+        return output
+
+    def fake_sleep(self, seconds):
+        return
+
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storageSystem',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXFast,
+        'is_tiering_policy_enabled',
+        return_value=False)
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'get_pool_capacities',
+        return_value=(1234, 1200))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'parse_array_name_from_file',
+        return_value="123456789")
+    def test_get_volume_stats_no_fast(self, mock_storage_system,
+                                      mock_is_fast_enabled,
+                                      mock_capacity, mock_array):
+        self.driver.get_volume_stats(True)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    def test_create_volume_no_fast_success(
+            self, _mock_volume_type, mock_storage_system):
+        self.driver.create_volume(self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'storagetype: stripedmetacount': '4',
+                      'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    def test_create_volume_no_fast_striped_success(
+            self, _mock_volume_type, mock_storage_system):
+        self.driver.create_volume(self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    def test_delete_volume_no_fast_success(
+            self, _mock_volume_type, mock_storage_system):
+        self.driver.delete_volume(self.data.test_volume)
+
+    def test_create_volume_no_fast_failed(self):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume,
+                          self.data.test_failed_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    def test_delete_volume_no_fast_notfound(self, _mock_volume_type):
+        notfound_delete_vol = {}
+        notfound_delete_vol['name'] = 'notfound_delete_vol'
+        notfound_delete_vol['id'] = '10'
+        notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
+        notfound_delete_vol['SystemName'] = self.data.storage_system
+        notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
+        notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
+        notfound_delete_vol['volume_type_id'] = 'abc'
+        notfound_delete_vol['provider_location'] = None
+        name = {}
+        name['classname'] = 'Symm_StorageVolume'
+        keys = {}
+        keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
+        keys['SystemName'] = notfound_delete_vol['SystemName']
+        keys['DeviceID'] = notfound_delete_vol['DeviceID']
+        keys['SystemCreationClassName'] =\
+            notfound_delete_vol['SystemCreationClassName']
+        name['keybindings'] = keys
+
+        self.driver.delete_volume(notfound_delete_vol)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    def test_delete_volume_failed(
+            self, _mock_volume_type, mock_storage_system):
+        self.driver.create_volume(self.data.failed_delete_vol)
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.delete_volume,
+                          self.data.failed_delete_vol)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_wrap_find_device_number',
+        return_value={'hostlunid': 1,
+                      'storagesystem': EMCVMAXCommonData.storage_system})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_ip_protocol_endpoint',
+        return_value='10.10.10.10')
+    def test_map_no_fast_success(self, _mock_volume_type, mock_wrap_group,
+                                 mock_wrap_device, mock_find_ip):
+        self.driver.initialize_connection(self.data.test_volume,
+                                          self.data.connector)
+
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_wrap_find_device_number',
+        return_value={'storagesystem': EMCVMAXCommonData.storage_system})
+    def test_map_no_fast_failed(self, mock_wrap_group, mock_wrap_device):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.initialize_connection,
+                          self.data.test_volume,
+                          self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    def test_detach_no_fast_success(self, mock_volume_type,
+                                    mock_storage_group):
+
+        self.driver.terminate_connection(
+            self.data.test_volume, self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils, 'find_storage_system',
+        return_value={'Name': EMCVMAXCommonData.storage_system})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    def test_detach_no_fast_last_volume_success(
+            self, mock_volume_type,
+            mock_storage_system, mock_storage_group):
+        self.driver.terminate_connection(
+            self.data.test_volume, self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'get_volume_size',
+        return_value='2147483648')
+    def test_extend_volume_no_fast_success(
+            self, _mock_volume_type, mock_volume_size):
+        newSize = '2'
+        self.driver.extend_volume(self.data.test_volume, newSize)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'storagetype: stripedmetacount': '4',
+                      'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'check_if_volume_is_concatenated',
+        return_value='False')
+    def test_extend_volume_striped_no_fast_failed(
+            self, _mock_volume_type, _mock_is_concatenated):
+        newSize = '2'
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.extend_volume,
+                          self.data.test_volume,
+                          newSize)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_snapshot_no_fast_success(
+            self, mock_volume_type,
+            mock_volume, mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_snapshot(self.data.test_volume)
+
+    def test_create_snapshot_no_fast_failed(self):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_snapshot,
+                          self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_volume_from_snapshot_no_fast_success(
+            self, mock_volume_type,
+            mock_volume, mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_volume_from_snapshot(
+            self.data.test_volume, EMCVMAXCommonData.test_source_volume)
+
+    def test_create_volume_from_snapshot_no_fast_failed(self):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume_from_snapshot,
+                          self.data.test_volume,
+                          EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_clone_no_fast_success(self, mock_volume_type,
+                                          mock_volume, mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_cloned_volume(self.data.test_volume,
+                                         EMCVMAXCommonData.test_source_volume)
+
+    def test_create_clone_no_fast_failed(self):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_cloned_volume,
+                          self.data.test_volume,
+                          EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    def test_migrate_volume_no_fast_success(self, _mock_volume_type):
+        self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
+                                   self.data.test_host)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSINoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'parse_pool_instance_id',
+        return_value=('silver', 'SYMMETRIX+000195900551'))
+    def test_retype_volume_no_fast_success(
+            self, _mock_volume_type, mock_values):
+        self.driver.retype(
+            self.data.test_ctxt, self.data.test_volume, self.data.new_type,
+            self.data.diff, self.data.test_host)
+
+    def _cleanup(self):
+        bExists = os.path.exists(self.config_file_path)
+        if bExists:
+            os.remove(self.config_file_path)
+        shutil.rmtree(self.tempdir)
+
+    def tearDown(self):
+        self._cleanup()
+        super(EMCVMAXISCSIDriverNoFastTestCase, self).tearDown()
+
+
+class EMCVMAXISCSIDriverFastTestCase(test.TestCase):
+
+    def setUp(self):
+
+        self.data = EMCVMAXCommonData()
+
+        self.tempdir = tempfile.mkdtemp()
+        super(EMCVMAXISCSIDriverFastTestCase, self).setUp()
+        self.config_file_path = None
+        self.create_fake_config_file_fast()
+
+        configuration = mock.Mock()
+        configuration.cinder_emc_config_file = self.config_file_path
+        configuration.safe_get.return_value = 'ISCSIFAST'
+        configuration.config_group = 'ISCSIFAST'
+
+        self.stubs.Set(EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery',
+                       self.fake_do_iscsi_discovery)
+        self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
+                       self.fake_ecom_connection)
+        instancename = FakeCIMInstanceName()
+        self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
+                       instancename.fake_getinstancename)
+        self.stubs.Set(time, 'sleep',
+                       self.fake_sleep)
+        driver = EMCVMAXISCSIDriver(configuration=configuration)
+        driver.db = FakeDB()
+        self.driver = driver
+
+    def create_fake_config_file_fast(self):
+
+        doc = Document()
+        emc = doc.createElement("EMC")
+        doc.appendChild(emc)
+
+        array = doc.createElement("Array")
+        arraytext = doc.createTextNode("1234567891011")
+        emc.appendChild(array)
+        array.appendChild(arraytext)
+
+        fastPolicy = doc.createElement("FastPolicy")
+        fastPolicyText = doc.createTextNode("GOLD1")
+        emc.appendChild(fastPolicy)
+        fastPolicy.appendChild(fastPolicyText)
+
+        ecomserverip = doc.createElement("EcomServerIp")
+        ecomserveriptext = doc.createTextNode("1.1.1.1")
+        emc.appendChild(ecomserverip)
+        ecomserverip.appendChild(ecomserveriptext)
+
+        ecomserverport = doc.createElement("EcomServerPort")
+        ecomserverporttext = doc.createTextNode("10")
+        emc.appendChild(ecomserverport)
+        ecomserverport.appendChild(ecomserverporttext)
+
+        ecomusername = doc.createElement("EcomUserName")
+        ecomusernametext = doc.createTextNode("user")
+        emc.appendChild(ecomusername)
+        ecomusername.appendChild(ecomusernametext)
+
+        ecompassword = doc.createElement("EcomPassword")
+        ecompasswordtext = doc.createTextNode("pass")
+        emc.appendChild(ecompassword)
+        ecompassword.appendChild(ecompasswordtext)
+
+        timeout = doc.createElement("Timeout")
+        timeouttext = doc.createTextNode("0")
+        emc.appendChild(timeout)
+        timeout.appendChild(timeouttext)
+
+        portgroup = doc.createElement("PortGroup")
+        portgrouptext = doc.createTextNode("myPortGroup")
+        portgroup.appendChild(portgrouptext)
+
+        pool = doc.createElement("Pool")
+        pooltext = doc.createTextNode("gold")
+        emc.appendChild(pool)
+        pool.appendChild(pooltext)
+
+        array = doc.createElement("Array")
+        arraytext = doc.createTextNode("0123456789")
+        emc.appendChild(array)
+        array.appendChild(arraytext)
+
+        portgroups = doc.createElement("PortGroups")
+        portgroups.appendChild(portgroup)
+        emc.appendChild(portgroups)
+
+        filename = 'cinder_emc_config_ISCSIFAST.xml'
+
+        self.config_file_path = self.tempdir + '/' + filename
+
+        f = open(self.config_file_path, 'w')
+        doc.writexml(f)
+        f.close()
+
+    def fake_ecom_connection(self):
+        conn = FakeEcomConnection()
+        return conn
+
+    def fake_do_iscsi_discovery(self, volume, ipAddress):
+        output = []
+        item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80'
+        output.append(item)
+        return output
+
+    def fake_sleep(self, seconds):
+        return
+
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storageSystem',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXFast,
+        'is_tiering_policy_enabled',
+        return_value=True)
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_tier_policy_by_name',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_capacities_associated_to_policy',
+        return_value=(1234, 1200))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'parse_array_name_from_file',
+        return_value="123456789")
+    def test_get_volume_stats_fast(self, mock_storage_system,
+                                   mock_is_fast_enabled,
+                                   mock_get_policy, mock_capacity, mock_array):
+        self.driver.get_volume_stats(True)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_pool_associated_to_policy',
+        return_value=1)
+    def test_create_volume_fast_success(
+            self, _mock_volume_type, mock_storage_system, mock_pool_policy):
+        self.driver.create_volume(self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'storagetype: stripedmetacount': '4',
+                      'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_pool_associated_to_policy',
+        return_value=1)
+    def test_create_volume_fast_striped_success(
+            self, _mock_volume_type, mock_storage_system, mock_pool_policy):
+        self.driver.create_volume(self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    def test_delete_volume_fast_success(
+            self, _mock_volume_type, mock_storage_group):
+        self.driver.delete_volume(self.data.test_volume)
+
+    def test_create_volume_fast_failed(self):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume,
+                          self.data.test_failed_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    def test_delete_volume_fast_notfound(
+            self, _mock_volume_type, mock_wrapper):
+        notfound_delete_vol = {}
+        notfound_delete_vol['name'] = 'notfound_delete_vol'
+        notfound_delete_vol['id'] = '10'
+        notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
+        notfound_delete_vol['SystemName'] = self.data.storage_system
+        notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
+        notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
+        name = {}
+        name['classname'] = 'Symm_StorageVolume'
+        keys = {}
+        keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
+        keys['SystemName'] = notfound_delete_vol['SystemName']
+        keys['DeviceID'] = notfound_delete_vol['DeviceID']
+        keys['SystemCreationClassName'] =\
+            notfound_delete_vol['SystemCreationClassName']
+        name['keybindings'] = keys
+        notfound_delete_vol['volume_type_id'] = 'abc'
+        notfound_delete_vol['provider_location'] = None
+        self.driver.delete_volume(notfound_delete_vol)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_pool_associated_to_policy',
+        return_value=1)
+    def test_delete_volume_fast_failed(
+            self, _mock_volume_type, _mock_storage_group,
+            mock_storage_system, mock_policy_pool):
+        self.driver.create_volume(self.data.failed_delete_vol)
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.delete_volume,
+                          self.data.failed_delete_vol)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_wrap_find_device_number',
+        return_value={'hostlunid': 1,
+                      'storagesystem': EMCVMAXCommonData.storage_system})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_ip_protocol_endpoint',
+        return_value='10.10.10.10')
+    def test_map_fast_success(self, _mock_volume_type, mock_wrap_group,
+                              mock_wrap_device, mock_find_ip):
+        self.driver.initialize_connection(self.data.test_volume,
+                                          self.data.connector)
+
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_wrap_find_device_number',
+        return_value={'storagesystem': EMCVMAXCommonData.storage_system})
+    def test_map_fast_failed(self, mock_wrap_group, mock_wrap_device):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.initialize_connection,
+                          self.data.test_volume,
+                          self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    def test_detach_fast_success(self, mock_volume_type,
+                                 mock_storage_group):
+
+        self.driver.terminate_connection(
+            self.data.test_volume, self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils, 'find_storage_system',
+        return_value={'Name': EMCVMAXCommonData.storage_system})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    def test_detach_fast_last_volume_success(
+            self, mock_volume_type,
+            mock_storage_system, mock_storage_group):
+        self.driver.terminate_connection(
+            self.data.test_volume, self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'get_volume_size',
+        return_value='2147483648')
+    def test_extend_volume_fast_success(
+            self, _mock_volume_type, mock_volume_size):
+        newSize = '2'
+        self.driver.extend_volume(self.data.test_volume, newSize)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'check_if_volume_is_concatenated',
+        return_value='False')
+    def test_extend_volume_striped_fast_failed(
+            self, _mock_volume_type, _mock_is_concatenated):
+        newSize = '2'
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.extend_volume,
+                          self.data.test_volume,
+                          newSize)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_controller_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_or_create_default_storage_group',
+        return_value=1)
+    def test_create_snapshot_fast_success(
+            self, mock_volume_type, mock_storage_group, mock_volume,
+            mock_sync_sv, mock_storage_config_service, mock_controller_service,
+            mock_default_sg):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_snapshot(self.data.test_volume)
+
+    def test_create_snapshot_fast_failed(self):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_snapshot,
+                          self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_controller_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_or_create_default_storage_group',
+        return_value=1)
+    def test_create_volume_from_snapshot_fast_success(
+            self, mock_volume_type, mock_storage_group, mock_volume,
+            mock_sync_sv, mock_storage_config_service, mock_controller_service,
+            mock_default_sg):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_volume_from_snapshot(
+            self.data.test_volume, EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_replication_service',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_volume_from_snapshot_fast_failed(
+            self, mock_volume_type,
+            mock_rep_service, mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume_from_snapshot,
+                          self.data.test_volume,
+                          EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_controller_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_or_create_default_storage_group',
+        return_value=1)
+    def test_create_clone_fast_success(self, mock_volume_type,
+                                       mock_storage_group, mock_volume,
+                                       mock_sync_sv,
+                                       mock_storage_config_service,
+                                       mock_controller_service,
+                                       mock_default_sg):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_cloned_volume(self.data.test_volume,
+                                         EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_clone_fast_failed(self, mock_volume_type,
+                                      mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_cloned_volume,
+                          self.data.test_volume,
+                          EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    def test_migrate_volume_fast_success(self, _mock_volume_type):
+        self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
+                                   self.data.test_host)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'ISCSIFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'parse_pool_instance_id',
+        return_value=('silver', 'SYMMETRIX+000195900551'))
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    def test_retype_volume_fast_success(
+            self, _mock_volume_type, mock_values, mock_wrap):
+        self.driver.retype(
+            self.data.test_ctxt, self.data.test_volume, self.data.new_type,
+            self.data.diff, self.data.test_host)
+
+    def _cleanup(self):
+        bExists = os.path.exists(self.config_file_path)
+        if bExists:
+            os.remove(self.config_file_path)
+        shutil.rmtree(self.tempdir)
+
+    def tearDown(self):
+        self._cleanup()
+        super(EMCVMAXISCSIDriverFastTestCase, self).tearDown()
+
+
+class EMCVMAXFCDriverNoFastTestCase(test.TestCase):
+    def setUp(self):
+
+        self.data = EMCVMAXCommonData()
+
+        self.tempdir = tempfile.mkdtemp()
+        super(EMCVMAXFCDriverNoFastTestCase, self).setUp()
+        self.config_file_path = None
+        self.create_fake_config_file_no_fast()
+
+        configuration = mock.Mock()
+        configuration.cinder_emc_config_file = self.config_file_path
+        configuration.safe_get.return_value = 'FCNoFAST'
+        configuration.config_group = 'FCNoFAST'
+
+        self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
+                       self.fake_ecom_connection)
+        instancename = FakeCIMInstanceName()
+        self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
+                       instancename.fake_getinstancename)
+        self.stubs.Set(time, 'sleep',
+                       self.fake_sleep)
+
+        driver = EMCVMAXFCDriver(configuration=configuration)
+        driver.db = FakeDB()
+        self.driver = driver
+
+    def create_fake_config_file_no_fast(self):
+
+        doc = Document()
+        emc = doc.createElement("EMC")
+        doc.appendChild(emc)
+
+        array = doc.createElement("Array")
+        arraytext = doc.createTextNode("1234567891011")
+        emc.appendChild(array)
+        array.appendChild(arraytext)
+
+        ecomserverip = doc.createElement("EcomServerIp")
+        ecomserveriptext = doc.createTextNode("1.1.1.1")
+        emc.appendChild(ecomserverip)
+        ecomserverip.appendChild(ecomserveriptext)
+
+        ecomserverport = doc.createElement("EcomServerPort")
+        ecomserverporttext = doc.createTextNode("10")
+        emc.appendChild(ecomserverport)
+        ecomserverport.appendChild(ecomserverporttext)
+
+        ecomusername = doc.createElement("EcomUserName")
+        ecomusernametext = doc.createTextNode("user")
+        emc.appendChild(ecomusername)
+        ecomusername.appendChild(ecomusernametext)
+
+        ecompassword = doc.createElement("EcomPassword")
+        ecompasswordtext = doc.createTextNode("pass")
+        emc.appendChild(ecompassword)
+        ecompassword.appendChild(ecompasswordtext)
+
+        portgroup = doc.createElement("PortGroup")
+        portgrouptext = doc.createTextNode("myPortGroup")
+        portgroup.appendChild(portgrouptext)
+
+        portgroups = doc.createElement("PortGroups")
+        portgroups.appendChild(portgroup)
+        emc.appendChild(portgroups)
+
+        pool = doc.createElement("Pool")
+        pooltext = doc.createTextNode("gold")
+        emc.appendChild(pool)
+        pool.appendChild(pooltext)
+
+        array = doc.createElement("Array")
+        arraytext = doc.createTextNode("0123456789")
+        emc.appendChild(array)
+        array.appendChild(arraytext)
+
+        timeout = doc.createElement("Timeout")
+        timeouttext = doc.createTextNode("0")
+        emc.appendChild(timeout)
+        timeout.appendChild(timeouttext)
+
+        filename = 'cinder_emc_config_FCNoFAST.xml'
+
+        self.config_file_path = self.tempdir + '/' + filename
+
+        f = open(self.config_file_path, 'w')
+        doc.writexml(f)
+        f.close()
+
+    def fake_ecom_connection(self):
+        conn = FakeEcomConnection()
+        return conn
+
+    def fake_sleep(self, seconds):
+        return
+
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storageSystem',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXFast,
+        'is_tiering_policy_enabled',
+        return_value=False)
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'get_pool_capacities',
+        return_value=(1234, 1200))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'parse_array_name_from_file',
+        return_value="123456789")
+    def test_get_volume_stats_no_fast(self,
+                                      mock_storage_system,
+                                      mock_is_fast_enabled,
+                                      mock_capacity,
+                                      mock_array):
+        self.driver.get_volume_stats(True)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    def test_create_volume_no_fast_success(
+            self, _mock_volume_type, mock_storage_system):
+        self.driver.create_volume(self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'storagetype: stripedmetacount': '4',
+                      'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    def test_create_volume_no_fast_striped_success(
+            self, _mock_volume_type, mock_storage_system):
+        self.driver.create_volume(self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    def test_delete_volume_no_fast_success(
+            self, _mock_volume_type, mock_storage_system):
+        self.driver.delete_volume(self.data.test_volume)
+
+    def test_create_volume_no_fast_failed(self):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume,
+                          self.data.test_failed_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    def test_delete_volume_no_fast_notfound(self, _mock_volume_type):
+        notfound_delete_vol = {}
+        notfound_delete_vol['name'] = 'notfound_delete_vol'
+        notfound_delete_vol['id'] = '10'
+        notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
+        notfound_delete_vol['SystemName'] = self.data.storage_system
+        notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
+        notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
+        name = {}
+        name['classname'] = 'Symm_StorageVolume'
+        keys = {}
+        keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
+        keys['SystemName'] = notfound_delete_vol['SystemName']
+        keys['DeviceID'] = notfound_delete_vol['DeviceID']
+        keys['SystemCreationClassName'] =\
+            notfound_delete_vol['SystemCreationClassName']
+        name['keybindings'] = keys
+        notfound_delete_vol['volume_type_id'] = 'abc'
+        notfound_delete_vol['provider_location'] = None
+        self.driver.delete_volume(notfound_delete_vol)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    def test_delete_volume_failed(
+            self, _mock_volume_type, mock_storage_system):
+        self.driver.create_volume(self.data.failed_delete_vol)
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.delete_volume,
+                          self.data.failed_delete_vol)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_wrap_find_device_number',
+        return_value={'hostlunid': 1,
+                      'storagesystem': EMCVMAXCommonData.storage_system})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_ip_protocol_endpoint',
+        return_value='10.10.10.10')
+    def test_map_no_fast_success(self, _mock_volume_type, mock_wrap_group,
+                                 mock_wrap_device, mock_find_ip):
+        self.driver.initialize_connection(self.data.test_volume,
+                                          self.data.connector)
+
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_wrap_find_device_number',
+        return_value={'storagesystem': EMCVMAXCommonData.storage_system})
+    def test_map_no_fast_failed(self, mock_wrap_group, mock_wrap_device):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.initialize_connection,
+                          self.data.test_volume,
+                          self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    def test_detach_no_fast_success(self, mock_volume_type,
+                                    mock_storage_group):
+
+        self.driver.terminate_connection(self.data.test_volume,
+                                         self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils, 'find_storage_system',
+        return_value={'Name': EMCVMAXCommonData.storage_system})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    def test_detach_no_fast_last_volume_success(self, mock_volume_type,
+                                                mock_storage_system,
+                                                mock_storage_group):
+        self.driver.terminate_connection(self.data.test_volume,
+                                         self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'get_volume_size',
+        return_value='2147483648')
+    def test_extend_volume_no_fast_success(self, _mock_volume_type,
+                                           _mock_volume_size):
+        newSize = '2'
+        self.driver.extend_volume(self.data.test_volume, newSize)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'check_if_volume_is_concatenated',
+        return_value='False')
+    def test_extend_volume_striped_no_fast_failed(
+            self, _mock_volume_type, _mock_is_concatenated):
+        newSize = '2'
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.extend_volume,
+                          self.data.test_volume,
+                          newSize)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_snapshot_no_fast_success(
+            self, mock_volume_type,
+            mock_volume, mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_snapshot(self.data.test_volume)
+
+    def test_create_snapshot_no_fast_failed(self):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_snapshot,
+                          self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_volume_from_snapshot_no_fast_success(
+            self, mock_volume_type,
+            mock_volume, mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_volume_from_snapshot(
+            self.data.test_volume, EMCVMAXCommonData.test_source_volume)
+
+    def test_create_volume_from_snapshot_no_fast_failed(self):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume_from_snapshot,
+                          self.data.test_volume,
+                          EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_clone_no_fast_success(self, mock_volume_type,
+                                          mock_volume, mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_cloned_volume(self.data.test_volume,
+                                         EMCVMAXCommonData.test_source_volume)
+
+    def test_create_clone_no_fast_failed(self):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_cloned_volume,
+                          self.data.test_volume,
+                          EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    def test_migrate_volume_no_fast_success(self, _mock_volume_type):
+        self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
+                                   self.data.test_host)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCNoFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'parse_pool_instance_id',
+        return_value=('silver', 'SYMMETRIX+000195900551'))
+    def test_retype_volume_no_fast_success(
+            self, _mock_volume_type, mock_values):
+        self.driver.retype(
+            self.data.test_ctxt, self.data.test_volume, self.data.new_type,
+            self.data.diff, self.data.test_host)
+
+    def _cleanup(self):
+        bExists = os.path.exists(self.config_file_path)
+        if bExists:
+            os.remove(self.config_file_path)
+        shutil.rmtree(self.tempdir)
+
+    def tearDown(self):
+        self._cleanup()
+        super(EMCVMAXFCDriverNoFastTestCase, self).tearDown()
+
+
+class EMCVMAXFCDriverFastTestCase(test.TestCase):
+
+    def setUp(self):
+
+        self.data = EMCVMAXCommonData()
+
+        self.tempdir = tempfile.mkdtemp()
+        super(EMCVMAXFCDriverFastTestCase, self).setUp()
+        self.config_file_path = None
+        self.create_fake_config_file_fast()
+
+        configuration = mock.Mock()
+        configuration.cinder_emc_config_file = self.config_file_path
+        configuration.safe_get.return_value = 'FCFAST'
+        configuration.config_group = 'FCFAST'
+
+        self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
+                       self.fake_ecom_connection)
+        instancename = FakeCIMInstanceName()
+        self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
+                       instancename.fake_getinstancename)
+        self.stubs.Set(time, 'sleep',
+                       self.fake_sleep)
+
+        driver = EMCVMAXFCDriver(configuration=configuration)
+        driver.db = FakeDB()
+        self.driver = driver
+
+    def create_fake_config_file_fast(self):
+
+        doc = Document()
+        emc = doc.createElement("EMC")
+        doc.appendChild(emc)
+
+        fastPolicy = doc.createElement("FastPolicy")
+        fastPolicyText = doc.createTextNode("GOLD1")
+        emc.appendChild(fastPolicy)
+        fastPolicy.appendChild(fastPolicyText)
+
+        ecomserverip = doc.createElement("EcomServerIp")
+        ecomserveriptext = doc.createTextNode("1.1.1.1")
+        emc.appendChild(ecomserverip)
+        ecomserverip.appendChild(ecomserveriptext)
+
+        ecomserverport = doc.createElement("EcomServerPort")
+        ecomserverporttext = doc.createTextNode("10")
+        emc.appendChild(ecomserverport)
+        ecomserverport.appendChild(ecomserverporttext)
+
+        ecomusername = doc.createElement("EcomUserName")
+        ecomusernametext = doc.createTextNode("user")
+        emc.appendChild(ecomusername)
+        ecomusername.appendChild(ecomusernametext)
+
+        ecompassword = doc.createElement("EcomPassword")
+        ecompasswordtext = doc.createTextNode("pass")
+        emc.appendChild(ecompassword)
+        ecompassword.appendChild(ecompasswordtext)
+
+        portgroup = doc.createElement("PortGroup")
+        portgrouptext = doc.createTextNode("myPortGroup")
+        portgroup.appendChild(portgrouptext)
+
+        pool = doc.createElement("Pool")
+        pooltext = doc.createTextNode("gold")
+        emc.appendChild(pool)
+        pool.appendChild(pooltext)
+
+        array = doc.createElement("Array")
+        arraytext = doc.createTextNode("0123456789")
+        emc.appendChild(array)
+        array.appendChild(arraytext)
+
+        portgroups = doc.createElement("PortGroups")
+        portgroups.appendChild(portgroup)
+        emc.appendChild(portgroups)
+
+        timeout = doc.createElement("Timeout")
+        timeouttext = doc.createTextNode("0")
+        emc.appendChild(timeout)
+        timeout.appendChild(timeouttext)
+
+        filename = 'cinder_emc_config_FCFAST.xml'
+
+        self.config_file_path = self.tempdir + '/' + filename
+
+        f = open(self.config_file_path, 'w')
+        doc.writexml(f)
+        f.close()
+
+    def fake_ecom_connection(self):
+        conn = FakeEcomConnection()
+        return conn
+
+    def fake_sleep(self, seconds):
+        return
+
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storageSystem',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXFast,
+        'is_tiering_policy_enabled',
+        return_value=True)
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_tier_policy_by_name',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_capacities_associated_to_policy',
+        return_value=(1234, 1200))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'parse_array_name_from_file',
+        return_value="123456789")
+    def test_get_volume_stats_fast(self,
+                                   mock_storage_system,
+                                   mock_is_fast_enabled,
+                                   mock_get_policy,
+                                   mock_capacity,
+                                   mock_array):
+        self.driver.get_volume_stats(True)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_pool_associated_to_policy',
+        return_value=1)
+    def test_create_volume_fast_success(
+            self, _mock_volume_type, mock_storage_system, mock_pool_policy):
+        self.driver.create_volume(self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'storagetype: stripedmetacount': '4',
+                      'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_pool_associated_to_policy',
+        return_value=1)
+    def test_create_volume_fast_striped_success(
+            self, _mock_volume_type, mock_storage_system, mock_pool_policy):
+        self.driver.create_volume(self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    def test_delete_volume_fast_success(self, _mock_volume_type,
+                                        mock_storage_group):
+        self.driver.delete_volume(self.data.test_volume)
+
+    def test_create_volume_fast_failed(self):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume,
+                          self.data.test_failed_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    def test_delete_volume_fast_notfound(self, _mock_volume_type):
+        """We do not set the provider location.
+        """
+        notfound_delete_vol = {}
+        notfound_delete_vol['name'] = 'notfound_delete_vol'
+        notfound_delete_vol['id'] = '10'
+        notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
+        notfound_delete_vol['SystemName'] = self.data.storage_system
+        notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
+        notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
+        name = {}
+        name['classname'] = 'Symm_StorageVolume'
+        keys = {}
+        keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
+        keys['SystemName'] = notfound_delete_vol['SystemName']
+        keys['DeviceID'] = notfound_delete_vol['DeviceID']
+        keys['SystemCreationClassName'] =\
+            notfound_delete_vol['SystemCreationClassName']
+        name['keybindings'] = keys
+        notfound_delete_vol['volume_type_id'] = 'abc'
+        notfound_delete_vol['provider_location'] = None
+
+        self.driver.delete_volume(notfound_delete_vol)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_pool_and_storage_system',
+        return_value=(None, EMCVMAXCommonData.storage_system))
+    @mock.patch.object(
+        EMCVMAXFast,
+        'get_pool_associated_to_policy',
+        return_value=1)
+    def test_delete_volume_fast_failed(
+            self, _mock_volume_type, mock_wrapper,
+            mock_storage_system, mock_pool_policy):
+        self.driver.create_volume(self.data.failed_delete_vol)
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.delete_volume,
+                          self.data.failed_delete_vol)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_wrap_find_device_number',
+        return_value={'hostlunid': 1,
+                      'storagesystem': EMCVMAXCommonData.storage_system})
+    def test_map_fast_success(self, _mock_volume_type, mock_wrap_group,
+                              mock_wrap_device):
+        self.driver.initialize_connection(self.data.test_volume,
+                                          self.data.connector)
+
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_wrap_find_device_number',
+        return_value={'storagesystem': EMCVMAXCommonData.storage_system})
+    def test_map_fast_failed(self, mock_wrap_group, mock_wrap_device):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.initialize_connection,
+                          self.data.test_volume,
+                          self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    def test_detach_fast_success(self, mock_volume_type,
+                                 mock_storage_group):
+
+        self.driver.terminate_connection(self.data.test_volume,
+                                         self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils, 'find_storage_system',
+        return_value={'Name': EMCVMAXCommonData.storage_system})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    def test_detach_fast_last_volume_success(
+            self, mock_volume_type,
+            mock_storage_system, mock_storage_group):
+        self.driver.terminate_connection(self.data.test_volume,
+                                         self.data.connector)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'get_volume_size',
+        return_value='2147483648')
+    def test_extend_volume_fast_success(self, _mock_volume_type,
+                                        _mock_volume_size):
+        newSize = '2'
+        self.driver.extend_volume(self.data.test_volume, newSize)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'check_if_volume_is_concatenated',
+        return_value='False')
+    def test_extend_volume_striped_fast_failed(self, _mock_volume_type,
+                                               _mock_is_concatenated):
+        newSize = '2'
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.extend_volume,
+                          self.data.test_volume,
+                          newSize)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_controller_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_or_create_default_storage_group',
+        return_value=1)
+    def test_create_snapshot_fast_success(self, mock_volume_type,
+                                          mock_storage_group, mock_volume,
+                                          mock_sync_sv,
+                                          mock_storage_config_service,
+                                          mock_controller_config_service,
+                                          mock_default_sg):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_snapshot(self.data.test_volume)
+
+    def test_create_snapshot_fast_failed(self):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_snapshot,
+                          self.data.test_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_controller_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_or_create_default_storage_group',
+        return_value=1)
+    def test_create_volume_from_snapshot_fast_success(
+            self, mock_volume_type, mock_storage_group, mock_volume,
+            mock_sync_sv, mock_storage_config_service,
+            mock_controller_config_service, mock_default_sg):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_volume_from_snapshot(
+            self.data.test_volume, EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'storagetype: pool': 'gold',
+                      'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_replication_service',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_volume_from_snapshot_fast_failed(self, mock_volume_type,
+                                                     mock_rep_service,
+                                                     mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_volume_from_snapshot,
+                          self.data.test_volume,
+                          EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST',
+                      'FASTPOLICY': 'FC_GOLD1'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_masking_group',
+        return_value=EMCVMAXCommonData.storagegroupname)
+    @mock.patch.object(
+        FakeDB,
+        'volume_get',
+        return_value=EMCVMAXCommonData.test_source_volume)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_storage_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_controller_configuration_service',
+        return_value=1)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_get_or_create_default_storage_group',
+        return_value=1)
+    def test_create_clone_fast_success(self, mock_volume_type,
+                                       mock_storage_group, mock_volume,
+                                       mock_sync_sv,
+                                       mock_storage_config_service,
+                                       mock_controller_config_service,
+                                       mock_default_sg):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.driver.create_cloned_volume(self.data.test_volume,
+                                         EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'find_replication_service',
+        return_value=None)
+    @mock.patch.object(
+        EMCVMAXCommon,
+        '_find_storage_sync_sv_sv',
+        return_value=(None, None))
+    def test_create_clone_fast_failed(self, mock_volume_type,
+                                      mock_rep_service, mock_sync_sv):
+        self.data.test_volume['volume_name'] = "vmax-1234567"
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver.create_cloned_volume,
+                          self.data.test_volume,
+                          EMCVMAXCommonData.test_source_volume)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    def test_migrate_volume_fast_success(self, _mock_volume_type):
+        self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
+                                   self.data.test_host)
+
+    @mock.patch.object(
+        volume_types,
+        'get_volume_type_extra_specs',
+        return_value={'volume_backend_name': 'FCFAST'})
+    @mock.patch.object(
+        EMCVMAXUtils,
+        'parse_pool_instance_id',
+        return_value=('silver', 'SYMMETRIX+000195900551'))
+    @mock.patch.object(
+        EMCVMAXMasking,
+        '_wrap_get_storage_group_from_volume',
+        return_value=None)
+    def test_retype_volume_fast_success(
+            self, _mock_volume_type, mock_values, mock_wrap):
+        self.driver.retype(
+            self.data.test_ctxt, self.data.test_volume, self.data.new_type,
+            self.data.diff, self.data.test_host)
+
+    def _cleanup(self):
+        bExists = os.path.exists(self.config_file_path)
+        if bExists:
+            os.remove(self.config_file_path)
+        shutil.rmtree(self.tempdir)
+
+    def tearDown(self):
+        self._cleanup()
+        super(EMCVMAXFCDriverFastTestCase, self).tearDown()
diff --git a/cinder/volume/drivers/emc/emc_smis_common.py b/cinder/volume/drivers/emc/emc_smis_common.py
deleted file mode 100644 (file)
index dc16cf6..0000000
+++ /dev/null
@@ -1,1802 +0,0 @@
-# Copyright (c) 2012 - 2014 EMC Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-"""
-Common class for SMI-S based EMC volume drivers.
-
-This common class is for EMC volume drivers based on SMI-S.
-It supports VNX and VMAX arrays.
-
-"""
-
-import time
-from xml.dom.minidom import parseString
-
-from oslo.config import cfg
-
-from cinder import exception
-from cinder.i18n import _
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import units
-from cinder.volume import volume_types
-
-LOG = logging.getLogger(__name__)
-
-CONF = cfg.CONF
-
-try:
-    import pywbem
-except ImportError:
-    LOG.info(_('Module PyWBEM not installed.  '
-               'Install PyWBEM using the python-pywbem package.'))
-
-CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml'
-EMC_ROOT = 'root/emc'
-PROVISIONING = 'storagetype:provisioning'
-POOL = 'storagetype:pool'
-
-emc_opts = [
-    cfg.StrOpt('cinder_emc_config_file',
-               default=CINDER_EMC_CONFIG_FILE,
-               help='The configuration file for the Cinder '
-                    'EMC driver'), ]
-
-
-CONF.register_opts(emc_opts)
-
-
-class EMCSMISCommon():
-    """Common code that can be used by ISCSI and FC drivers."""
-
-    stats = {'driver_version': '1.0',
-             'free_capacity_gb': 0,
-             'reserved_percentage': 0,
-             'storage_protocol': None,
-             'total_capacity_gb': 0,
-             'vendor_name': 'EMC',
-             'volume_backend_name': None}
-
-    def __init__(self, prtcl, configuration=None):
-
-        self.protocol = prtcl
-        self.configuration = configuration
-        self.configuration.append_config_values(emc_opts)
-
-        ip, port = self._get_ecom_server()
-        self.user, self.passwd = self._get_ecom_cred()
-        self.url = 'http://' + ip + ':' + port
-        self.conn = self._get_ecom_connection()
-
-    def create_volume(self, volume):
-        """Creates a EMC(VMAX/VNX) volume."""
-        LOG.debug('Entering create_volume.')
-        volumesize = int(volume['size']) * units.Gi
-        volumename = volume['name']
-
-        LOG.info(_('Create Volume: %(volume)s  Size: %(size)lu')
-                 % {'volume': volumename,
-                    'size': volumesize})
-
-        self.conn = self._get_ecom_connection()
-
-        storage_type = self._get_storage_type(volume)
-
-        LOG.debug('Create Volume: %(volume)s  '
-                  'Storage type: %(storage_type)s'
-                  % {'volume': volumename,
-                     'storage_type': storage_type})
-
-        pool, storage_system = self._find_pool(storage_type[POOL])
-
-        LOG.debug('Create Volume: %(volume)s  Pool: %(pool)s  '
-                  'Storage System: %(storage_system)s'
-                  % {'volume': volumename,
-                     'pool': pool,
-                     'storage_system': storage_system})
-
-        configservice = self._find_storage_configuration_service(
-            storage_system)
-        if configservice is None:
-            exception_message = (_("Error Create Volume: %(volumename)s. "
-                                   "Storage Configuration Service not found "
-                                   "for pool %(storage_type)s.")
-                                 % {'volumename': volumename,
-                                    'storage_type': storage_type})
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        provisioning = self._get_provisioning(storage_type)
-
-        LOG.debug('Create Volume: %(name)s  Method: '
-                  'CreateOrModifyElementFromStoragePool  ConfigServicie: '
-                  '%(service)s  ElementName: %(name)s  InPool: %(pool)s  '
-                  'ElementType: %(provisioning)s  Size: %(size)lu'
-                  % {'service': configservice,
-                     'name': volumename,
-                     'pool': pool,
-                     'provisioning': provisioning,
-                     'size': volumesize})
-
-        rc, job = self.conn.InvokeMethod(
-            'CreateOrModifyElementFromStoragePool',
-            configservice, ElementName=volumename, InPool=pool,
-            ElementType=self._getnum(provisioning, '16'),
-            Size=self._getnum(volumesize, '64'))
-
-        LOG.debug('Create Volume: %(volumename)s  Return code: %(rc)lu'
-                  % {'volumename': volumename,
-                     'rc': rc})
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                LOG.error(_('Error Create Volume: %(volumename)s.  '
-                          'Return code: %(rc)lu.  Error: %(error)s')
-                          % {'volumename': volumename,
-                             'rc': rc,
-                             'error': errordesc})
-                raise exception.VolumeBackendAPIException(data=errordesc)
-
-        # Find the newly created volume
-        associators = self.conn.Associators(
-            job['Job'],
-            resultClass='EMC_StorageVolume')
-        volpath = associators[0].path
-        name = {}
-        name['classname'] = volpath.classname
-        keys = {}
-        keys['CreationClassName'] = volpath['CreationClassName']
-        keys['SystemName'] = volpath['SystemName']
-        keys['DeviceID'] = volpath['DeviceID']
-        keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
-        name['keybindings'] = keys
-
-        LOG.debug('Leaving create_volume: %(volumename)s  '
-                  'Return code: %(rc)lu '
-                  'volume instance: %(name)s'
-                  % {'volumename': volumename,
-                     'rc': rc,
-                     'name': name})
-
-        return name
-
-    def create_volume_from_snapshot(self, volume, snapshot):
-        """Creates a volume from a snapshot."""
-
-        LOG.debug('Entering create_volume_from_snapshot.')
-
-        snapshotname = snapshot['name']
-        volumename = volume['name']
-
-        LOG.info(_('Create Volume from Snapshot: Volume: %(volumename)s  '
-                 'Snapshot: %(snapshotname)s')
-                 % {'volumename': volumename,
-                    'snapshotname': snapshotname})
-
-        self.conn = self._get_ecom_connection()
-
-        snapshot_instance = self._find_lun(snapshot)
-        storage_system = snapshot_instance['SystemName']
-
-        LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s  '
-                  'Snapshot: %(snapshotname)s  Snapshot Instance: '
-                  '%(snapshotinstance)s  Storage System: %(storage_system)s.'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'snapshotinstance': snapshot_instance.path,
-                     'storage_system': storage_system})
-
-        isVMAX = storage_system.find('SYMMETRIX')
-        if isVMAX > -1:
-            exception_message = (_('Error Create Volume from Snapshot: '
-                                   'Volume: %(volumename)s  Snapshot: '
-                                   '%(snapshotname)s. Create Volume '
-                                   'from Snapshot is NOT supported on VMAX.')
-                                 % {'volumename': volumename,
-                                    'snapshotname': snapshotname})
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        repservice = self._find_replication_service(storage_system)
-        if repservice is None:
-            exception_message = (_('Error Create Volume from Snapshot: '
-                                   'Volume: %(volumename)s  Snapshot: '
-                                   '%(snapshotname)s. Cannot find Replication '
-                                   'Service to create volume from snapshot.')
-                                 % {'volumename': volumename,
-                                    'snapshotname': snapshotname})
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s  '
-                  'Snapshot: %(snapshotname)s  Method: CreateElementReplica  '
-                  'ReplicationService: %(service)s  ElementName: '
-                  '%(elementname)s  SyncType: 8  SourceElement: '
-                  '%(sourceelement)s'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'service': repservice,
-                     'elementname': volumename,
-                     'sourceelement': snapshot_instance.path})
-
-        # Create a Clone from snapshot
-        rc, job = self.conn.InvokeMethod(
-            'CreateElementReplica', repservice,
-            ElementName=volumename,
-            SyncType=self._getnum(8, '16'),
-            SourceElement=snapshot_instance.path)
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                exception_message = (_('Error Create Volume from Snapshot: '
-                                       'Volume: %(volumename)s  Snapshot:'
-                                       '%(snapshotname)s.  Return code: '
-                                       '%(rc)lu. Error: %(error)s')
-                                     % {'volumename': volumename,
-                                        'snapshotname': snapshotname,
-                                        'rc': rc,
-                                        'error': errordesc})
-                LOG.error(exception_message)
-                raise exception.VolumeBackendAPIException(
-                    data=exception_message)
-
-        # Find the newly created volume
-        associators = self.conn.Associators(
-            job['Job'],
-            resultClass='EMC_StorageVolume')
-        volpath = associators[0].path
-        name = {}
-        name['classname'] = volpath.classname
-        keys = {}
-        keys['CreationClassName'] = volpath['CreationClassName']
-        keys['SystemName'] = volpath['SystemName']
-        keys['DeviceID'] = volpath['DeviceID']
-        keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
-        name['keybindings'] = keys
-
-        LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s  '
-                  'Snapshot: %(snapshotname)s.  Successfully clone volume '
-                  'from snapshot.  Finding the clone relationship.'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname})
-
-        volume['provider_location'] = str(name)
-        sync_name, storage_system = self._find_storage_sync_sv_sv(
-            volume, snapshot)
-
-        # Remove the Clone relationshop so it can be used as a regular lun
-        # 8 - Detach operation
-        LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s  '
-                  'Snapshot: %(snapshotname)s.  Remove the clone '
-                  'relationship. Method: ModifyReplicaSynchronization '
-                  'ReplicationService: %(service)s  Operation: 8  '
-                  'Synchronization: %(sync_name)s'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'service': repservice,
-                     'sync_name': sync_name})
-
-        rc, job = self.conn.InvokeMethod(
-            'ModifyReplicaSynchronization',
-            repservice,
-            Operation=self._getnum(8, '16'),
-            Synchronization=sync_name)
-
-        LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s  '
-                  'Snapshot: %(snapshotname)s  Return code: %(rc)lu'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'rc': rc})
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                exception_message = (_('Error Create Volume from Snapshot: '
-                                       'Volume: %(volumename)s  '
-                                       'Snapshot: %(snapshotname)s.  '
-                                       'Return code: %(rc)lu.  Error: '
-                                       '%(error)s')
-                                     % {'volumename': volumename,
-                                        'snapshotname': snapshotname,
-                                        'rc': rc,
-                                        'error': errordesc})
-                LOG.error(exception_message)
-                raise exception.VolumeBackendAPIException(
-                    data=exception_message)
-
-        LOG.debug('Leaving create_volume_from_snapshot: Volume: '
-                  '%(volumename)s Snapshot: %(snapshotname)s  '
-                  'Return code: %(rc)lu.'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'rc': rc})
-
-        return name
-
-    def create_cloned_volume(self, volume, src_vref):
-        """Creates a clone of the specified volume."""
-        LOG.debug('Entering create_cloned_volume.')
-
-        srcname = src_vref['name']
-        volumename = volume['name']
-
-        LOG.info(_('Create a Clone from Volume: Volume: %(volumename)s  '
-                 'Source Volume: %(srcname)s')
-                 % {'volumename': volumename,
-                    'srcname': srcname})
-
-        self.conn = self._get_ecom_connection()
-
-        src_instance = self._find_lun(src_vref)
-        storage_system = src_instance['SystemName']
-
-        LOG.debug('Create Cloned Volume: Volume: %(volumename)s  '
-                  'Source Volume: %(srcname)s  Source Instance: '
-                  '%(src_instance)s  Storage System: %(storage_system)s.'
-                  % {'volumename': volumename,
-                     'srcname': srcname,
-                     'src_instance': src_instance.path,
-                     'storage_system': storage_system})
-
-        repservice = self._find_replication_service(storage_system)
-        if repservice is None:
-            exception_message = (_('Error Create Cloned Volume: '
-                                   'Volume: %(volumename)s  Source Volume: '
-                                   '%(srcname)s. Cannot find Replication '
-                                   'Service to create cloned volume.')
-                                 % {'volumename': volumename,
-                                    'srcname': srcname})
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        LOG.debug('Create Cloned Volume: Volume: %(volumename)s  '
-                  'Source Volume: %(srcname)s  Method: CreateElementReplica  '
-                  'ReplicationService: %(service)s  ElementName: '
-                  '%(elementname)s  SyncType: 8  SourceElement: '
-                  '%(sourceelement)s'
-                  % {'volumename': volumename,
-                     'srcname': srcname,
-                     'service': repservice,
-                     'elementname': volumename,
-                     'sourceelement': src_instance.path})
-
-        # Create a Clone from source volume
-        rc, job = self.conn.InvokeMethod(
-            'CreateElementReplica', repservice,
-            ElementName=volumename,
-            SyncType=self._getnum(8, '16'),
-            SourceElement=src_instance.path)
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                exception_message = (_('Error Create Cloned Volume: '
-                                       'Volume: %(volumename)s  Source Volume:'
-                                       '%(srcname)s.  Return code: %(rc)lu.'
-                                       'Error: %(error)s')
-                                     % {'volumename': volumename,
-                                        'srcname': srcname,
-                                        'rc': rc,
-                                        'error': errordesc})
-                LOG.error(exception_message)
-                raise exception.VolumeBackendAPIException(
-                    data=exception_message)
-
-        # Find the newly created volume
-        associators = self.conn.Associators(
-            job['Job'],
-            resultClass='EMC_StorageVolume')
-        volpath = associators[0].path
-        name = {}
-        name['classname'] = volpath.classname
-        keys = {}
-        keys['CreationClassName'] = volpath['CreationClassName']
-        keys['SystemName'] = volpath['SystemName']
-        keys['DeviceID'] = volpath['DeviceID']
-        keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
-        name['keybindings'] = keys
-
-        LOG.debug('Create Cloned Volume: Volume: %(volumename)s  '
-                  'Source Volume: %(srcname)s.  Successfully cloned volume '
-                  'from source volume.  Finding the clone relationship.'
-                  % {'volumename': volumename,
-                     'srcname': srcname})
-
-        volume['provider_location'] = str(name)
-        sync_name, storage_system = self._find_storage_sync_sv_sv(
-            volume, src_vref)
-
-        # Remove the Clone relationshop so it can be used as a regular lun
-        # 8 - Detach operation
-        LOG.debug('Create Cloned Volume: Volume: %(volumename)s  '
-                  'Source Volume: %(srcname)s.  Remove the clone '
-                  'relationship. Method: ModifyReplicaSynchronization '
-                  'ReplicationService: %(service)s  Operation: 8  '
-                  'Synchronization: %(sync_name)s'
-                  % {'volumename': volumename,
-                     'srcname': srcname,
-                     'service': repservice,
-                     'sync_name': sync_name})
-
-        rc, job = self.conn.InvokeMethod(
-            'ModifyReplicaSynchronization',
-            repservice,
-            Operation=self._getnum(8, '16'),
-            Synchronization=sync_name)
-
-        LOG.debug('Create Cloned Volume: Volume: %(volumename)s  '
-                  'Source Volume: %(srcname)s  Return code: %(rc)lu'
-                  % {'volumename': volumename,
-                     'srcname': srcname,
-                     'rc': rc})
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                exception_message = (_('Error Create Cloned Volume: '
-                                       'Volume: %(volumename)s  '
-                                       'Source Volume: %(srcname)s.  '
-                                       'Return code: %(rc)lu.  Error: '
-                                       '%(error)s')
-                                     % {'volumename': volumename,
-                                        'srcname': srcname,
-                                        'rc': rc,
-                                        'error': errordesc})
-                LOG.error(exception_message)
-                raise exception.VolumeBackendAPIException(
-                    data=exception_message)
-
-        LOG.debug('Leaving create_cloned_volume: Volume: '
-                  '%(volumename)s Source Volume: %(srcname)s  '
-                  'Return code: %(rc)lu.'
-                  % {'volumename': volumename,
-                     'srcname': srcname,
-                     'rc': rc})
-
-        return name
-
-    def delete_volume(self, volume):
-        """Deletes an EMC volume."""
-        LOG.debug('Entering delete_volume.')
-        volumename = volume['name']
-        LOG.info(_('Delete Volume: %(volume)s')
-                 % {'volume': volumename})
-
-        self.conn = self._get_ecom_connection()
-
-        vol_instance = self._find_lun(volume)
-        if vol_instance is None:
-            LOG.error(_('Volume %(name)s not found on the array. '
-                      'No volume to delete.')
-                      % {'name': volumename})
-            return
-
-        storage_system = vol_instance['SystemName']
-
-        configservice =\
-            self._find_storage_configuration_service(storage_system)
-        if configservice is None:
-            exception_message = (_("Error Delete Volume: %(volumename)s. "
-                                   "Storage Configuration Service not found.")
-                                 % {'volumename': volumename})
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        device_id = vol_instance['DeviceID']
-
-        LOG.debug('Delete Volume: %(name)s  DeviceID: %(deviceid)s'
-                  % {'name': volumename,
-                     'deviceid': device_id})
-
-        LOG.debug('Delete Volume: %(name)s  Method: EMCReturnToStoragePool '
-                  'ConfigServic: %(service)s  TheElement: %(vol_instance)s'
-                  % {'service': configservice,
-                     'name': volumename,
-                     'vol_instance': vol_instance.path})
-
-        rc, job =\
-            self.conn.InvokeMethod('EMCReturnToStoragePool',
-                                   configservice,
-                                   TheElements=[vol_instance.path])
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                exception_message = (_('Error Delete Volume: %(volumename)s.  '
-                                       'Return code: %(rc)lu.  Error: '
-                                       '%(error)s')
-                                     % {'volumename': volumename,
-                                        'rc': rc,
-                                        'error': errordesc})
-                LOG.error(exception_message)
-                raise exception.VolumeBackendAPIException(
-                    data=exception_message)
-
-        LOG.debug('Leaving delete_volume: %(volumename)s  Return code: '
-                  '%(rc)lu'
-                  % {'volumename': volumename,
-                     'rc': rc})
-
-    def create_snapshot(self, snapshot, volume):
-        """Creates a snapshot."""
-        LOG.debug('Entering create_snapshot.')
-
-        snapshotname = snapshot['name']
-        volumename = snapshot['volume_name']
-        LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s')
-                 % {'snapshot': snapshotname,
-                    'volume': volumename})
-
-        self.conn = self._get_ecom_connection()
-
-        vol_instance = self._find_lun(volume)
-
-        device_id = vol_instance['DeviceID']
-        storage_system = vol_instance['SystemName']
-        LOG.debug('Device ID: %(deviceid)s: Storage System: '
-                  '%(storagesystem)s'
-                  % {'deviceid': device_id,
-                     'storagesystem': storage_system})
-
-        repservice = self._find_replication_service(storage_system)
-        if repservice is None:
-            LOG.error(_("Cannot find Replication Service to create snapshot "
-                      "for volume %s.") % volumename)
-            exception_message = (_("Cannot find Replication Service to "
-                                 "create snapshot for volume %s.")
-                                 % volumename)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        LOG.debug("Create Snapshot:  Method: CreateElementReplica: "
-                  "Target: %(snapshot)s  Source: %(volume)s  Replication "
-                  "Service: %(service)s  ElementName: %(elementname)s  Sync "
-                  "Type: 7  SourceElement: %(sourceelement)s."
-                  % {'snapshot': snapshotname,
-                     'volume': volumename,
-                     'service': repservice,
-                     'elementname': snapshotname,
-                     'sourceelement': vol_instance.path})
-
-        rc, job =\
-            self.conn.InvokeMethod('CreateElementReplica', repservice,
-                                   ElementName=snapshotname,
-                                   SyncType=self._getnum(7, '16'),
-                                   SourceElement=vol_instance.path)
-
-        LOG.debug('Create Snapshot: Volume: %(volumename)s  '
-                  'Snapshot: %(snapshotname)s  Return code: %(rc)lu'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'rc': rc})
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                exception_message = (_('Error Create Snapshot: %(snapshot)s '
-                                       'Volume: %(volume)s Error: '
-                                       '%(errordesc)s')
-                                     % {'snapshot': snapshotname, 'volume':
-                                        volumename, 'errordesc': errordesc})
-                LOG.error(exception_message)
-                raise exception.VolumeBackendAPIException(
-                    data=exception_message)
-
-        # Find the newly created volume
-        associators = self.conn.Associators(
-            job['Job'],
-            resultClass='EMC_StorageVolume')
-        volpath = associators[0].path
-        name = {}
-        name['classname'] = volpath.classname
-        keys = {}
-        keys['CreationClassName'] = volpath['CreationClassName']
-        keys['SystemName'] = volpath['SystemName']
-        keys['DeviceID'] = volpath['DeviceID']
-        keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
-        name['keybindings'] = keys
-
-        LOG.debug('Leaving create_snapshot: Snapshot: %(snapshot)s '
-                  'Volume: %(volume)s  Return code: %(rc)lu.' %
-                  {'snapshot': snapshotname, 'volume': volumename, 'rc': rc})
-
-        return name
-
-    def delete_snapshot(self, snapshot, volume):
-        """Deletes a snapshot."""
-        LOG.debug('Entering delete_snapshot.')
-
-        snapshotname = snapshot['name']
-        volumename = snapshot['volume_name']
-        LOG.info(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s')
-                 % {'snapshot': snapshotname,
-                    'volume': volumename})
-
-        self.conn = self._get_ecom_connection()
-
-        LOG.debug('Delete Snapshot: %(snapshot)s: volume: %(volume)s. '
-                  'Finding StorageSychronization_SV_SV.'
-                  % {'snapshot': snapshotname,
-                     'volume': volumename})
-
-        sync_name, storage_system =\
-            self._find_storage_sync_sv_sv(snapshot, volume, False)
-        if sync_name is None:
-            LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s '
-                      'not found on the array. No snapshot to delete.')
-                      % {'snapshot': snapshotname,
-                         'volume': volumename})
-            return
-
-        repservice = self._find_replication_service(storage_system)
-        if repservice is None:
-            exception_message = (_("Cannot find Replication Service to "
-                                 "create snapshot for volume %s.")
-                                 % volumename)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        # Delete snapshot - deletes both the target element
-        # and the snap session
-        LOG.debug("Delete Snapshot: Target: %(snapshot)s  "
-                  "Source: %(volume)s.  Method: "
-                  "ModifyReplicaSynchronization:  "
-                  "Replication Service: %(service)s  Operation: 19  "
-                  "Synchronization: %(sync_name)s."
-                  % {'snapshot': snapshotname,
-                     'volume': volumename,
-                     'service': repservice,
-                     'sync_name': sync_name})
-
-        rc, job =\
-            self.conn.InvokeMethod('ModifyReplicaSynchronization',
-                                   repservice,
-                                   Operation=self._getnum(19, '16'),
-                                   Synchronization=sync_name)
-
-        LOG.debug('Delete Snapshot: Volume: %(volumename)s  Snapshot: '
-                  '%(snapshotname)s  Return code: %(rc)lu'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'rc': rc})
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                exception_message = (_('Error Delete Snapshot: Volume: '
-                                       '%(volumename)s  Snapshot: '
-                                       '%(snapshotname)s. Return code: '
-                                       '%(rc)lu.  Error: %(error)s')
-                                     % {'volumename': volumename,
-                                        'snapshotname': snapshotname,
-                                        'rc': rc,
-                                        'error': errordesc})
-                LOG.error(exception_message)
-                raise exception.VolumeBackendAPIException(
-                    data=exception_message)
-
-        # It takes a while for the relationship between the snapshot
-        # and the source volume gets cleaned up.  Needs to wait until
-        # it is cleaned up.  Otherwise, the source volume can't be
-        # deleted immediately after the snapshot deletion because it
-        # still has snapshot.
-        wait_timeout = int(self._get_timeout())
-        wait_interval = 10
-        start = int(time.time())
-        while True:
-            try:
-                sync_name, storage_system =\
-                    self._find_storage_sync_sv_sv(snapshot, volume, False)
-                if sync_name is None:
-                    LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
-                               'Snapshot is deleted.')
-                             % {'snapshot': snapshotname,
-                                'volume': volumename})
-                    break
-                time.sleep(wait_interval)
-                if int(time.time()) - start >= wait_timeout:
-                    LOG.warn(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
-                               'Snapshot deleted but cleanup timed out.')
-                             % {'snapshot': snapshotname,
-                                'volume': volumename})
-                    break
-            except Exception as ex:
-                if ex.args[0] == 6:
-                    # 6 means object not found, so snapshot is deleted cleanly
-                    LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
-                               'Snapshot is deleted.')
-                             % {'snapshot': snapshotname,
-                                'volume': volumename})
-                else:
-                    LOG.warn(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
-                               'Snapshot deleted but error during cleanup. '
-                               'Error: %(error)s')
-                             % {'snapshot': snapshotname,
-                                'volume': volumename,
-                                'error': str(ex.args)})
-                break
-
-        LOG.debug('Leaving delete_snapshot: Volume: %(volumename)s  '
-                  'Snapshot: %(snapshotname)s  Return code: %(rc)lu.'
-                  % {'volumename': volumename,
-                     'snapshotname': snapshotname,
-                     'rc': rc})
-
-    # Mapping method for VNX
-    def _expose_paths(self, configservice, vol_instance,
-                      connector):
-        """This method maps a volume to a host.
-
-        It adds a volume and initiator to a Storage Group
-        and therefore maps the volume to the host.
-        """
-        volumename = vol_instance['ElementName']
-        lun_name = vol_instance['DeviceID']
-        initiators = self._find_initiator_names(connector)
-        storage_system = vol_instance['SystemName']
-        lunmask_ctrl = self._find_lunmasking_scsi_protocol_controller(
-            storage_system, connector)
-
-        LOG.debug('ExposePaths: %(vol)s  ConfigServicie: %(service)s  '
-                  'LUNames: %(lun_name)s  InitiatorPortIDs: %(initiator)s  '
-                  'DeviceAccesses: 2'
-                  % {'vol': vol_instance.path,
-                     'service': configservice,
-                     'lun_name': lun_name,
-                     'initiator': initiators})
-
-        if lunmask_ctrl is None:
-            rc, controller =\
-                self.conn.InvokeMethod('ExposePaths',
-                                       configservice, LUNames=[lun_name],
-                                       InitiatorPortIDs=initiators,
-                                       DeviceAccesses=[self._getnum(2, '16')])
-        else:
-            LOG.debug('ExposePaths parameter '
-                      'LunMaskingSCSIProtocolController: '
-                      '%(lunmasking)s'
-                      % {'lunmasking': lunmask_ctrl})
-            rc, controller =\
-                self.conn.InvokeMethod('ExposePaths',
-                                       configservice, LUNames=[lun_name],
-                                       DeviceAccesses=[self._getnum(2, '16')],
-                                       ProtocolControllers=[lunmask_ctrl])
-
-        if rc != 0L:
-            msg = (_('Error mapping volume %s.') % volumename)
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-
-        LOG.debug('ExposePaths for volume %s completed successfully.'
-                  % volumename)
-
-    # Unmapping method for VNX
-    def _hide_paths(self, configservice, vol_instance,
-                    connector):
-        """This method unmaps a volume from the host.
-
-        Removes a volume from the Storage Group
-        and therefore unmaps the volume from the host.
-        """
-        volumename = vol_instance['ElementName']
-        device_id = vol_instance['DeviceID']
-        lunmask_ctrl = self._find_lunmasking_scsi_protocol_controller_for_vol(
-            vol_instance, connector)
-
-        LOG.debug('HidePaths: %(vol)s  ConfigServicie: %(service)s  '
-                  'LUNames: %(device_id)s  LunMaskingSCSIProtocolController: '
-                  '%(lunmasking)s'
-                  % {'vol': vol_instance.path,
-                     'service': configservice,
-                     'device_id': device_id,
-                     'lunmasking': lunmask_ctrl})
-
-        rc, controller = self.conn.InvokeMethod(
-            'HidePaths', configservice,
-            LUNames=[device_id], ProtocolControllers=[lunmask_ctrl])
-
-        if rc != 0L:
-            msg = (_('Error unmapping volume %s.') % volumename)
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-
-        LOG.debug('HidePaths for volume %s completed successfully.'
-                  % volumename)
-
-    # Mapping method for VMAX
-    def _add_members(self, configservice, vol_instance):
-        """This method maps a volume to a host.
-
-        Add volume to the Device Masking Group that belongs to
-        a Masking View.
-        """
-        volumename = vol_instance['ElementName']
-        masking_group = self._find_device_masking_group()
-
-        LOG.debug('AddMembers: ConfigServicie: %(service)s  MaskingGroup: '
-                  '%(masking_group)s  Members: %(vol)s'
-                  % {'service': configservice,
-                     'masking_group': masking_group,
-                     'vol': vol_instance.path})
-
-        rc, job =\
-            self.conn.InvokeMethod('AddMembers',
-                                   configservice,
-                                   MaskingGroup=masking_group,
-                                   Members=[vol_instance.path])
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                msg = (_('Error mapping volume %(vol)s. %(error)s') %
-                       {'vol': volumename, 'error': errordesc})
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-
-        LOG.debug('AddMembers for volume %s completed successfully.'
-                  % volumename)
-
-    # Unmapping method for VMAX
-    def _remove_members(self, configservice, vol_instance):
-        """This method unmaps a volume from a host.
-
-        Removes volume from the Device Masking Group that belongs to
-        a Masking View.
-        """
-        volumename = vol_instance['ElementName']
-        masking_group = self._find_device_masking_group()
-
-        LOG.debug('RemoveMembers: ConfigServicie: %(service)s  '
-                  'MaskingGroup: %(masking_group)s  Members: %(vol)s'
-                  % {'service': configservice,
-                     'masking_group': masking_group,
-                     'vol': vol_instance.path})
-
-        rc, job = self.conn.InvokeMethod('RemoveMembers', configservice,
-                                         MaskingGroup=masking_group,
-                                         Members=[vol_instance.path])
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                msg = (_('Error unmapping volume %(vol)s. %(error)s')
-                       % {'vol': volumename, 'error': errordesc})
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-
-        LOG.debug('RemoveMembers for volume %s completed successfully.'
-                  % volumename)
-
-    def _map_lun(self, volume, connector):
-        """Maps a volume to the host."""
-        volumename = volume['name']
-        LOG.info(_('Map volume: %(volume)s')
-                 % {'volume': volumename})
-
-        vol_instance = self._find_lun(volume)
-        storage_system = vol_instance['SystemName']
-
-        configservice = self._find_controller_configuration_service(
-            storage_system)
-        if configservice is None:
-            exception_message = (_("Cannot find Controller Configuration "
-                                 "Service for storage system %s")
-                                 % storage_system)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        isVMAX = storage_system.find('SYMMETRIX')
-        if isVMAX > -1:
-            self._add_members(configservice, vol_instance)
-        else:
-            self._expose_paths(configservice, vol_instance, connector)
-
-    def _unmap_lun(self, volume, connector):
-        """Unmaps a volume from the host."""
-        volumename = volume['name']
-        LOG.info(_('Unmap volume: %(volume)s')
-                 % {'volume': volumename})
-
-        device_info = self.find_device_number(volume, connector)
-        device_number = device_info['hostlunid']
-        if device_number is None:
-            LOG.info(_("Volume %s is not mapped. No volume to unmap.")
-                     % (volumename))
-            return
-
-        vol_instance = self._find_lun(volume)
-        storage_system = vol_instance['SystemName']
-
-        configservice = self._find_controller_configuration_service(
-            storage_system)
-        if configservice is None:
-            exception_message = (_("Cannot find Controller Configuration "
-                                 "Service for storage system %s")
-                                 % storage_system)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        isVMAX = storage_system.find('SYMMETRIX')
-        if isVMAX > -1:
-            self._remove_members(configservice, vol_instance)
-        else:
-            self._hide_paths(configservice, vol_instance, connector)
-
-    def initialize_connection(self, volume, connector):
-        """Initializes the connection and returns connection info."""
-        volumename = volume['name']
-        LOG.info(_('Initialize connection: %(volume)s')
-                 % {'volume': volumename})
-        self.conn = self._get_ecom_connection()
-        device_info = self.find_device_number(volume, connector)
-        device_number = device_info['hostlunid']
-        if device_number is not None:
-            LOG.info(_("Volume %s is already mapped.")
-                     % (volumename))
-        else:
-            self._map_lun(volume, connector)
-            # Find host lun id again after the volume is exported to the host
-            device_info = self.find_device_number(volume, connector)
-
-        return device_info
-
-    def terminate_connection(self, volume, connector):
-        """Disallow connection from connector."""
-        volumename = volume['name']
-        LOG.info(_('Terminate connection: %(volume)s')
-                 % {'volume': volumename})
-        self.conn = self._get_ecom_connection()
-        self._unmap_lun(volume, connector)
-
-    def extend_volume(self, volume, new_size):
-        """Extends an existing  volume."""
-        LOG.debug('Entering extend_volume.')
-        volumesize = int(new_size) * units.Gi
-        volumename = volume['name']
-
-        LOG.info(_('Extend Volume: %(volume)s  New size: %(size)lu')
-                 % {'volume': volumename,
-                    'size': volumesize})
-
-        self.conn = self._get_ecom_connection()
-
-        storage_type = self._get_storage_type(volume)
-
-        vol_instance = self._find_lun(volume)
-
-        device_id = vol_instance['DeviceID']
-        storage_system = vol_instance['SystemName']
-        LOG.debug('Device ID: %(deviceid)s: Storage System: '
-                  '%(storagesystem)s'
-                  % {'deviceid': device_id,
-                     'storagesystem': storage_system})
-
-        configservice = self._find_storage_configuration_service(
-            storage_system)
-        if configservice is None:
-            exception_message = (_("Error Extend Volume: %(volumename)s. "
-                                 "Storage Configuration Service not found.")
-                                 % {'volumename': volumename})
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        provisioning = self._get_provisioning(storage_type)
-
-        LOG.debug('Extend Volume: %(name)s  Method: '
-                  'CreateOrModifyElementFromStoragePool  ConfigServicie: '
-                  '%(service)s ElementType: %(provisioning)s  Size: %(size)lu'
-                  'Volume path: %(volumepath)s'
-                  % {'service': configservice,
-                     'name': volumename,
-                     'provisioning': provisioning,
-                     'size': volumesize,
-                     'volumepath': vol_instance.path})
-
-        rc, job = self.conn.InvokeMethod(
-            'CreateOrModifyElementFromStoragePool',
-            configservice, ElementType=self._getnum(provisioning, '16'),
-            Size=self._getnum(volumesize, '64'),
-            TheElement=vol_instance.path)
-
-        LOG.debug('Extend Volume: %(volumename)s  Return code: %(rc)lu'
-                  % {'volumename': volumename,
-                     'rc': rc})
-
-        if rc != 0L:
-            rc, errordesc = self._wait_for_job_complete(job)
-            if rc != 0L:
-                LOG.error(_('Error Extend Volume: %(volumename)s.  '
-                          'Return code: %(rc)lu.  Error: %(error)s')
-                          % {'volumename': volumename,
-                             'rc': rc,
-                             'error': errordesc})
-                raise exception.VolumeBackendAPIException(data=errordesc)
-
-        LOG.debug('Leaving extend_volume: %(volumename)s  '
-                  'Return code: %(rc)lu '
-                  % {'volumename': volumename,
-                     'rc': rc})
-
-    def update_volume_stats(self):
-        """Retrieve stats info."""
-        LOG.debug("Updating volume stats")
-        self.stats['total_capacity_gb'] = 'unknown'
-        self.stats['free_capacity_gb'] = 'unknown'
-
-        return self.stats
-
-    def _get_storage_type(self, volume, filename=None):
-        """Get storage type.
-
-        Look for user input volume type first.
-        If not available, fall back to finding it in conf file.
-        """
-        specs = self._get_volumetype_extraspecs(volume)
-        if not specs:
-            specs = self._get_storage_type_conffile()
-        LOG.debug("Storage Type: %s" % (specs))
-        return specs
-
-    def _get_storage_type_conffile(self, filename=None):
-        """Get the storage type from the config file."""
-        if filename is None:
-            filename = self.configuration.cinder_emc_config_file
-
-        file = open(filename, 'r')
-        data = file.read()
-        file.close()
-        dom = parseString(data)
-        storageTypes = dom.getElementsByTagName('StorageType')
-        if storageTypes is not None and len(storageTypes) > 0:
-            storageType = storageTypes[0].toxml()
-            storageType = storageType.replace('<StorageType>', '')
-            storageType = storageType.replace('</StorageType>', '')
-            LOG.debug("Found Storage Type in config file: %s"
-                      % (storageType))
-            specs = {}
-            specs[POOL] = storageType
-            return specs
-        else:
-            exception_message = (_("Storage type not found."))
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-    def _get_masking_view(self, filename=None):
-        if filename is None:
-            filename = self.configuration.cinder_emc_config_file
-
-        file = open(filename, 'r')
-        data = file.read()
-        file.close()
-        dom = parseString(data)
-        views = dom.getElementsByTagName('MaskingView')
-        if views is not None and len(views) > 0:
-            view = views[0].toxml().replace('<MaskingView>', '')
-            view = view.replace('</MaskingView>', '')
-            LOG.debug("Found Masking View: %s" % (view))
-            return view
-        else:
-            LOG.debug("Masking View not found.")
-            return None
-
-    def _get_timeout(self, filename=None):
-        if filename is None:
-            filename = self.configuration.cinder_emc_config_file
-
-        file = open(filename, 'r')
-        data = file.read()
-        file.close()
-        dom = parseString(data)
-        timeouts = dom.getElementsByTagName('Timeout')
-        if timeouts is not None and len(timeouts) > 0:
-            timeout = timeouts[0].toxml().replace('<Timeout>', '')
-            timeout = timeout.replace('</Timeout>', '')
-            LOG.debug("Found Timeout: %s" % (timeout))
-            return timeout
-        else:
-            LOG.debug("Timeout not specified.")
-            return 10
-
-    def _get_ecom_cred(self, filename=None):
-        if filename is None:
-            filename = self.configuration.cinder_emc_config_file
-
-        file = open(filename, 'r')
-        data = file.read()
-        file.close()
-        dom = parseString(data)
-        ecomUsers = dom.getElementsByTagName('EcomUserName')
-        if ecomUsers is not None and len(ecomUsers) > 0:
-            ecomUser = ecomUsers[0].toxml().replace('<EcomUserName>', '')
-            ecomUser = ecomUser.replace('</EcomUserName>', '')
-        ecomPasswds = dom.getElementsByTagName('EcomPassword')
-        if ecomPasswds is not None and len(ecomPasswds) > 0:
-            ecomPasswd = ecomPasswds[0].toxml().replace('<EcomPassword>', '')
-            ecomPasswd = ecomPasswd.replace('</EcomPassword>', '')
-        if ecomUser is not None and ecomPasswd is not None:
-            return ecomUser, ecomPasswd
-        else:
-            LOG.debug("Ecom user not found.")
-            return None
-
-    def _get_ecom_server(self, filename=None):
-        if filename is None:
-            filename = self.configuration.cinder_emc_config_file
-
-        file = open(filename, 'r')
-        data = file.read()
-        file.close()
-        dom = parseString(data)
-        ecomIps = dom.getElementsByTagName('EcomServerIp')
-        if ecomIps is not None and len(ecomIps) > 0:
-            ecomIp = ecomIps[0].toxml().replace('<EcomServerIp>', '')
-            ecomIp = ecomIp.replace('</EcomServerIp>', '')
-        ecomPorts = dom.getElementsByTagName('EcomServerPort')
-        if ecomPorts is not None and len(ecomPorts) > 0:
-            ecomPort = ecomPorts[0].toxml().replace('<EcomServerPort>', '')
-            ecomPort = ecomPort.replace('</EcomServerPort>', '')
-        if ecomIp is not None and ecomPort is not None:
-            LOG.debug("Ecom IP: %(ecomIp)s Port: %(ecomPort)s",
-                      {'ecomIp': ecomIp, 'ecomPort': ecomPort})
-            return ecomIp, ecomPort
-        else:
-            LOG.debug("Ecom server not found.")
-            return None
-
-    def _get_ecom_connection(self, filename=None):
-        conn = pywbem.WBEMConnection(self.url, (self.user, self.passwd),
-                                     default_namespace='root/emc')
-        if conn is None:
-            exception_message = (_("Cannot connect to ECOM server"))
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        return conn
-
-    def _find_replication_service(self, storage_system):
-        foundRepService = None
-        repservices = self.conn.EnumerateInstanceNames(
-            'EMC_ReplicationService')
-        for repservice in repservices:
-            if storage_system == repservice['SystemName']:
-                foundRepService = repservice
-                LOG.debug("Found Replication Service: %s"
-                          % (repservice))
-                break
-
-        return foundRepService
-
-    def _find_storage_configuration_service(self, storage_system):
-        foundConfigService = None
-        configservices = self.conn.EnumerateInstanceNames(
-            'EMC_StorageConfigurationService')
-        for configservice in configservices:
-            if storage_system == configservice['SystemName']:
-                foundConfigService = configservice
-                LOG.debug("Found Storage Configuration Service: %s"
-                          % (configservice))
-                break
-
-        return foundConfigService
-
-    def _find_controller_configuration_service(self, storage_system):
-        foundConfigService = None
-        configservices = self.conn.EnumerateInstanceNames(
-            'EMC_ControllerConfigurationService')
-        for configservice in configservices:
-            if storage_system == configservice['SystemName']:
-                foundConfigService = configservice
-                LOG.debug("Found Controller Configuration Service: %s"
-                          % (configservice))
-                break
-
-        return foundConfigService
-
-    def _find_storage_hardwareid_service(self, storage_system):
-        foundConfigService = None
-        configservices = self.conn.EnumerateInstanceNames(
-            'EMC_StorageHardwareIDManagementService')
-        for configservice in configservices:
-            if storage_system == configservice['SystemName']:
-                foundConfigService = configservice
-                LOG.debug("Found Storage Hardware ID Management Service: %s"
-                          % (configservice))
-                break
-
-        return foundConfigService
-
-    # Find pool based on storage_type
-    def _find_pool(self, storage_type, details=False):
-        foundPool = None
-        systemname = None
-        # Only get instance names if details flag is False;
-        # Otherwise get the whole instances
-        if details is False:
-            vpools = self.conn.EnumerateInstanceNames(
-                'EMC_VirtualProvisioningPool')
-            upools = self.conn.EnumerateInstanceNames(
-                'EMC_UnifiedStoragePool')
-        else:
-            vpools = self.conn.EnumerateInstances(
-                'EMC_VirtualProvisioningPool')
-            upools = self.conn.EnumerateInstances(
-                'EMC_UnifiedStoragePool')
-
-        for upool in upools:
-            poolinstance = upool['InstanceID']
-            # Example: CLARiiON+APM00115204878+U+Pool 0
-            poolname, systemname = self._parse_pool_instance_id(poolinstance)
-            if poolname is not None and systemname is not None:
-                if str(storage_type) == str(poolname):
-                    foundPool = upool
-                    break
-
-        if foundPool is None:
-            for vpool in vpools:
-                poolinstance = vpool['InstanceID']
-                # Example: SYMMETRIX+000195900551+TP+Sol_Innov
-                poolname, systemname = self._parse_pool_instance_id(
-                    poolinstance)
-                if poolname is not None and systemname is not None:
-                    if str(storage_type) == str(poolname):
-                        foundPool = vpool
-                        break
-
-        if foundPool is None:
-            exception_message = (_("Pool %(storage_type)s is not found.")
-                                 % {'storage_type': storage_type})
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        if systemname is None:
-            exception_message = (_("Storage system not found for pool "
-                                 "%(storage_type)s.")
-                                 % {'storage_type': storage_type})
-            LOG.error(exception_message)
-            raise exception.VolumeBackendAPIException(data=exception_message)
-
-        LOG.debug("Pool: %(pool)s  SystemName: %(systemname)s."
-                  % {'pool': foundPool,
-                     'systemname': systemname})
-        return foundPool, systemname
-
-    def _parse_pool_instance_id(self, instanceid):
-        # Example of pool InstanceId: CLARiiON+APM00115204878+U+Pool 0
-        poolname = None
-        systemname = None
-        endp = instanceid.rfind('+')
-        if endp > -1:
-            poolname = instanceid[endp + 1:]
-
-        idarray = instanceid.split('+')
-        if len(idarray) > 2:
-            systemname = idarray[0] + '+' + idarray[1]
-
-        LOG.debug("Pool name: %(poolname)s  System name: %(systemname)s."
-                  % {'poolname': poolname, 'systemname': systemname})
-        return poolname, systemname
-
-    def _find_lun(self, volume):
-        foundinstance = None
-
-        volumename = volume['name']
-        loc = volume['provider_location']
-        name = eval(loc)
-        instancename = self._getinstancename(name['classname'],
-                                             name['keybindings'])
-        foundinstance = self.conn.GetInstance(instancename)
-
-        if foundinstance is None:
-            LOG.debug("Volume %(volumename)s not found on the array."
-                      % {'volumename': volumename})
-        else:
-            LOG.debug("Volume name: %(volumename)s  Volume instance: "
-                      "%(vol_instance)s."
-                      % {'volumename': volumename,
-                         'vol_instance': foundinstance.path})
-
-        return foundinstance
-
-    def _find_storage_sync_sv_sv(self, snapshot, volume,
-                                 waitforsync=True):
-        foundsyncname = None
-        storage_system = None
-        percent_synced = 0
-
-        snapshotname = snapshot['name']
-        volumename = volume['name']
-        LOG.debug("Source: %(volumename)s  Target: %(snapshotname)s."
-                  % {'volumename': volumename, 'snapshotname': snapshotname})
-
-        snapshot_instance = self._find_lun(snapshot)
-        volume_instance = self._find_lun(volume)
-        storage_system = volume_instance['SystemName']
-        classname = 'SE_StorageSynchronized_SV_SV'
-        bindings = {'SyncedElement': snapshot_instance.path,
-                    'SystemElement': volume_instance.path}
-        foundsyncname = self._getinstancename(classname, bindings)
-
-        if foundsyncname is None:
-            LOG.debug("Source: %(volumename)s  Target: %(snapshotname)s. "
-                      "Storage Synchronized not found. "
-                      % {'volumename': volumename,
-                         'snapshotname': snapshotname})
-        else:
-            LOG.debug("Storage system: %(storage_system)s  "
-                      "Storage Synchronized instance: %(sync)s."
-                      % {'storage_system': storage_system,
-                         'sync': foundsyncname})
-            # Wait for SE_StorageSynchronized_SV_SV to be fully synced
-            while waitforsync and percent_synced < 100:
-                time.sleep(10)
-                sync_instance = self.conn.GetInstance(foundsyncname,
-                                                      LocalOnly=False)
-                percent_synced = sync_instance['PercentSynced']
-
-        return foundsyncname, storage_system
-
-    def _find_initiator_names(self, connector):
-        foundinitiatornames = []
-        iscsi = 'iscsi'
-        fc = 'fc'
-        name = 'initiator name'
-        if self.protocol.lower() == iscsi and connector['initiator']:
-            foundinitiatornames.append(connector['initiator'])
-        elif self.protocol.lower() == fc and connector['wwpns']:
-            for wwn in connector['wwpns']:
-                foundinitiatornames.append(wwn)
-            name = 'world wide port names'
-
-        if foundinitiatornames is None or len(foundinitiatornames) == 0:
-            msg = (_('Error finding %s.') % name)
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-
-        LOG.debug("Found %(name)s: %(initiator)s."
-                  % {'name': name,
-                     'initiator': foundinitiatornames})
-        return foundinitiatornames
-
-    def _wait_for_job_complete(self, job):
-        jobinstancename = job['Job']
-
-        while True:
-            jobinstance = self.conn.GetInstance(jobinstancename,
-                                                LocalOnly=False)
-            jobstate = jobinstance['JobState']
-            # From ValueMap of JobState in CIM_ConcreteJob
-            # 2L=New, 3L=Starting, 4L=Running, 32767L=Queue Pending
-            # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767,
-            # 32768..65535"),
-            # Values("New, Starting, Running, Suspended, Shutting Down,
-            # Completed, Terminated, Killed, Exception, Service,
-            # Query Pending, DMTF Reserved, Vendor Reserved")]
-            if jobstate in [2L, 3L, 4L, 32767L]:
-                time.sleep(10)
-            else:
-                break
-
-        rc = jobinstance['ErrorCode']
-        errordesc = jobinstance['ErrorDescription']
-
-        return rc, errordesc
-
-    # Find LunMaskingSCSIProtocolController for the local host on the
-    # specified storage system
-    def _find_lunmasking_scsi_protocol_controller(self, storage_system,
-                                                  connector):
-        foundCtrl = None
-        initiators = self._find_initiator_names(connector)
-        controllers = self.conn.EnumerateInstanceNames(
-            'EMC_LunMaskingSCSIProtocolController')
-        for ctrl in controllers:
-            if storage_system != ctrl['SystemName']:
-                continue
-            associators =\
-                self.conn.Associators(ctrl,
-                                      resultClass='EMC_StorageHardwareID')
-            for assoc in associators:
-                # if EMC_StorageHardwareID matches the initiator,
-                # we found the existing EMC_LunMaskingSCSIProtocolController
-                # (Storage Group for VNX)
-                # we can use for masking a new LUN
-                hardwareid = assoc['StorageID']
-                for initiator in initiators:
-                    if hardwareid.lower() == initiator.lower():
-                        foundCtrl = ctrl
-                        break
-
-                if foundCtrl is not None:
-                    break
-
-            if foundCtrl is not None:
-                break
-
-        LOG.debug("LunMaskingSCSIProtocolController for storage system "
-                  "%(storage_system)s and initiator %(initiator)s is  "
-                  "%(ctrl)s."
-                  % {'storage_system': storage_system,
-                     'initiator': initiators,
-                     'ctrl': foundCtrl})
-        return foundCtrl
-
-    # Find LunMaskingSCSIProtocolController for the local host and the
-    # specified storage volume
-    def _find_lunmasking_scsi_protocol_controller_for_vol(self, vol_instance,
-                                                          connector):
-        foundCtrl = None
-        initiators = self._find_initiator_names(connector)
-        controllers =\
-            self.conn.AssociatorNames(
-                vol_instance.path,
-                resultClass='EMC_LunMaskingSCSIProtocolController')
-
-        for ctrl in controllers:
-            associators =\
-                self.conn.Associators(
-                    ctrl,
-                    resultClass='EMC_StorageHardwareID')
-            for assoc in associators:
-                # if EMC_StorageHardwareID matches the initiator,
-                # we found the existing EMC_LunMaskingSCSIProtocolController
-                # (Storage Group for VNX)
-                # we can use for masking a new LUN
-                hardwareid = assoc['StorageID']
-                for initiator in initiators:
-                    if hardwareid.lower() == initiator.lower():
-                        foundCtrl = ctrl
-                        break
-
-                if foundCtrl is not None:
-                    break
-
-            if foundCtrl is not None:
-                break
-
-        LOG.debug("LunMaskingSCSIProtocolController for storage volume "
-                  "%(vol)s and initiator %(initiator)s is  %(ctrl)s."
-                  % {'vol': vol_instance.path,
-                     'initiator': initiators,
-                     'ctrl': foundCtrl})
-        return foundCtrl
-
-    # Find out how many volumes are mapped to a host
-    # assoociated to the LunMaskingSCSIProtocolController
-    def get_num_volumes_mapped(self, volume, connector):
-        numVolumesMapped = 0
-        volumename = volume['name']
-        vol_instance = self._find_lun(volume)
-        if vol_instance is None:
-            msg = (_('Volume %(name)s not found on the array. '
-                   'Cannot determine if there are volumes mapped.')
-                   % {'name': volumename})
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-
-        storage_system = vol_instance['SystemName']
-
-        ctrl = self._find_lunmasking_scsi_protocol_controller(
-            storage_system,
-            connector)
-
-        LOG.debug("LunMaskingSCSIProtocolController for storage system "
-                  "%(storage)s and %(connector)s is %(ctrl)s."
-                  % {'storage': storage_system,
-                     'connector': connector,
-                     'ctrl': ctrl})
-
-        associators = self.conn.Associators(
-            ctrl,
-            resultClass='EMC_StorageVolume')
-
-        numVolumesMapped = len(associators)
-
-        LOG.debug("Found %(numVolumesMapped)d volumes on storage system "
-                  "%(storage)s mapped to %(connector)s."
-                  % {'numVolumesMapped': numVolumesMapped,
-                     'storage': storage_system,
-                     'connector': connector})
-
-        return numVolumesMapped
-
-    # Find an available device number that a host can see
-    def _find_avail_device_number(self, storage_system):
-        out_device_number = '000000'
-        out_num_device_number = 0
-        numlist = []
-        myunitnames = []
-
-        unitnames = self.conn.EnumerateInstanceNames(
-            'CIM_ProtocolControllerForUnit')
-        for unitname in unitnames:
-            controller = unitname['Antecedent']
-            if storage_system != controller['SystemName']:
-                continue
-            classname = controller['CreationClassName']
-            index = classname.find('LunMaskingSCSIProtocolController')
-            if index > -1:
-                unitinstance = self.conn.GetInstance(unitname,
-                                                     LocalOnly=False)
-                numDeviceNumber = int(unitinstance['DeviceNumber'])
-                numlist.append(numDeviceNumber)
-                myunitnames.append(unitname)
-
-        maxnum = max(numlist)
-        out_num_device_number = maxnum + 1
-
-        out_device_number = '%06d' % out_num_device_number
-
-        LOG.debug("Available device number on %(storage)s: %(device)s."
-                  % {'storage': storage_system, 'device': out_device_number})
-        return out_device_number
-
-    # Find a device number that a host can see for a volume
-    def find_device_number(self, volume, connector):
-        out_num_device_number = None
-
-        volumename = volume['name']
-        vol_instance = self._find_lun(volume)
-        storage_system = vol_instance['SystemName']
-        sp = None
-        try:
-            sp = vol_instance['EMCCurrentOwningStorageProcessor']
-        except KeyError:
-            # VMAX LUN doesn't have this property
-            pass
-
-        indexVMAX = storage_system.find('SYMMETRIX')
-        if indexVMAX == -1:
-            # find out whether the volume is already attached to the host
-            ctrl = self._find_lunmasking_scsi_protocol_controller_for_vol(
-                vol_instance,
-                connector)
-
-            LOG.debug("LunMaskingSCSIProtocolController for "
-                      "volume %(vol)s and connector %(connector)s "
-                      "is %(ctrl)s."
-                      % {'vol': vol_instance.path,
-                         'connector': connector,
-                         'ctrl': ctrl})
-
-        if indexVMAX > -1 or ctrl:
-            unitnames = self.conn.ReferenceNames(
-                vol_instance.path,
-                ResultClass='CIM_ProtocolControllerForUnit')
-
-            for unitname in unitnames:
-                controller = unitname['Antecedent']
-                classname = controller['CreationClassName']
-                index = classname.find('LunMaskingSCSIProtocolController')
-                if index > -1:  # VNX
-                    if ctrl['DeviceID'] != controller['DeviceID']:
-                        continue
-                    # Get an instance of CIM_ProtocolControllerForUnit
-                    unitinstance = self.conn.GetInstance(unitname,
-                                                         LocalOnly=False)
-                    numDeviceNumber = int(unitinstance['DeviceNumber'], 16)
-                    out_num_device_number = numDeviceNumber
-                    break
-                else:
-                    index = classname.find('Symm_LunMaskingView')
-                    if index > -1:  # VMAX
-                        unitinstance = self.conn.GetInstance(unitname,
-                                                             LocalOnly=False)
-                        numDeviceNumber = int(unitinstance['DeviceNumber'],
-                                              16)
-                        out_num_device_number = numDeviceNumber
-                        break
-
-        if out_num_device_number is None:
-            LOG.info(_("Device number not found for volume "
-                     "%(volumename)s %(vol_instance)s.") %
-                     {'volumename': volumename,
-                      'vol_instance': vol_instance.path})
-        else:
-            LOG.debug("Found device number %(device)d for volume "
-                      "%(volumename)s %(vol_instance)s." %
-                      {'device': out_num_device_number,
-                       'volumename': volumename,
-                       'vol_instance': vol_instance.path})
-
-        data = {'hostlunid': out_num_device_number,
-                'storagesystem': storage_system,
-                'owningsp': sp}
-
-        LOG.debug("Device info: %(data)s." % {'data': data})
-
-        return data
-
-    def _find_device_masking_group(self):
-        """Finds the Device Masking Group in a masking view."""
-        foundMaskingGroup = None
-        maskingview_name = self._get_masking_view()
-
-        maskingviews = self.conn.EnumerateInstanceNames(
-            'EMC_LunMaskingSCSIProtocolController')
-        for view in maskingviews:
-            instance = self.conn.GetInstance(view, LocalOnly=False)
-            if maskingview_name == instance['ElementName']:
-                foundView = view
-                break
-
-        groups = self.conn.AssociatorNames(
-            foundView,
-            ResultClass='SE_DeviceMaskingGroup')
-        foundMaskingGroup = groups[0]
-
-        LOG.debug("Masking view: %(view)s DeviceMaskingGroup: %(masking)s."
-                  % {'view': maskingview_name,
-                     'masking': foundMaskingGroup})
-
-        return foundMaskingGroup
-
-    # Find a StorageProcessorSystem given sp and storage system
-    def _find_storage_processor_system(self, owningsp, storage_system):
-        foundSystem = None
-        systems = self.conn.EnumerateInstanceNames(
-            'EMC_StorageProcessorSystem')
-        for system in systems:
-            # Clar_StorageProcessorSystem.CreationClassName=
-            # "Clar_StorageProcessorSystem",Name="CLARiiON+APM00123907237+SP_A"
-            idarray = system['Name'].split('+')
-            if len(idarray) > 2:
-                storsystemname = idarray[0] + '+' + idarray[1]
-                sp = idarray[2]
-
-            if (storage_system == storsystemname and
-                    owningsp == sp):
-                foundSystem = system
-                LOG.debug("Found Storage Processor System: %s"
-                          % (system))
-                break
-
-        return foundSystem
-
-    # Find EMC_iSCSIProtocolEndpoint for the specified sp
-    def _find_iscsi_protocol_endpoints(self, owningsp, storage_system):
-        foundEndpoints = []
-
-        processor = self._find_storage_processor_system(
-            owningsp,
-            storage_system)
-
-        associators = self.conn.Associators(
-            processor,
-            resultClass='EMC_iSCSIProtocolEndpoint')
-        for assoc in associators:
-            # Name = iqn.1992-04.com.emc:cx.apm00123907237.a8,t,0x0001
-            # SystemName = CLARiiON+APM00123907237+SP_A+8
-            arr = assoc['SystemName'].split('+')
-            if len(arr) > 2:
-                processor_name = arr[0] + '+' + arr[1] + '+' + arr[2]
-                if processor_name == processor['Name']:
-                    arr2 = assoc['Name'].split(',')
-                    if len(arr2) > 1:
-                        foundEndpoints.append(arr2[0])
-
-        LOG.debug("iSCSIProtocolEndpoint for storage system "
-                  "%(storage_system)s and SP %(sp)s is  "
-                  "%(endpoint)s."
-                  % {'storage_system': storage_system,
-                     'sp': owningsp,
-                     'endpoint': foundEndpoints})
-        return foundEndpoints
-
-    def _getnum(self, num, datatype):
-        try:
-            result = {
-                '8': pywbem.Uint8(num),
-                '16': pywbem.Uint16(num),
-                '32': pywbem.Uint32(num),
-                '64': pywbem.Uint64(num)
-            }
-            result = result.get(datatype, num)
-        except NameError:
-            result = num
-
-        return result
-
-    def _getinstancename(self, classname, bindings):
-        instancename = None
-        try:
-            instancename = pywbem.CIMInstanceName(
-                classname,
-                namespace=EMC_ROOT,
-                keybindings=bindings)
-        except NameError:
-            instancename = None
-
-        return instancename
-
-    # Find target WWNs
-    def get_target_wwns(self, storage_system, connector):
-        target_wwns = []
-
-        configservice = self._find_storage_hardwareid_service(
-            storage_system)
-        if configservice is None:
-            exception_msg = (_("Error finding Storage Hardware ID Service."))
-            LOG.error(exception_msg)
-            raise exception.VolumeBackendAPIException(data=exception_msg)
-
-        hardwareids = self._find_storage_hardwareids(connector)
-
-        LOG.debug('EMCGetTargetEndpoints: Service: %(service)s  '
-                  'Storage HardwareIDs: %(hardwareids)s.'
-                  % {'service': configservice,
-                     'hardwareids': hardwareids})
-
-        for hardwareid in hardwareids:
-            rc, targetendpoints = self.conn.InvokeMethod(
-                'EMCGetTargetEndpoints',
-                configservice,
-                HardwareId=hardwareid)
-
-            if rc != 0L:
-                msg = (_('Error finding Target WWNs.'))
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-
-            endpoints = targetendpoints['TargetEndpoints']
-            for targetendpoint in endpoints:
-                wwn = targetendpoint['Name']
-                # Add target wwn to the list if it is not already there
-                if not any(d == wwn for d in target_wwns):
-                    target_wwns.append(wwn)
-                LOG.debug('Add target WWN: %s.' % wwn)
-
-        LOG.debug('Target WWNs: %s.' % target_wwns)
-
-        return target_wwns
-
-    # Find Storage Hardware IDs
-    def _find_storage_hardwareids(self, connector):
-        foundInstances = []
-        wwpns = self._find_initiator_names(connector)
-        hardwareids = self.conn.EnumerateInstances(
-            'SE_StorageHardwareID')
-        for hardwareid in hardwareids:
-            storid = hardwareid['StorageID']
-            for wwpn in wwpns:
-                if wwpn.lower() == storid.lower():
-                    foundInstances.append(hardwareid.path)
-
-        LOG.debug("Storage Hardware IDs for %(wwpns)s is "
-                  "%(foundInstances)s."
-                  % {'wwpns': wwpns,
-                     'foundInstances': foundInstances})
-
-        return foundInstances
-
-    def _get_volumetype_extraspecs(self, volume):
-        specs = {}
-        type_id = volume['volume_type_id']
-        if type_id is not None:
-            specs = volume_types.get_volume_type_extra_specs(type_id)
-            # If specs['storagetype:pool'] not defined,
-            # set specs to {} so we can ready from config file later
-            if POOL not in specs:
-                specs = {}
-
-        return specs
-
-    def _get_provisioning(self, storage_type):
-        # provisioning is thin (5) by default
-        provisioning = 5
-        thick_str = 'thick'
-        try:
-            type_prov = storage_type[PROVISIONING]
-            if type_prov.lower() == thick_str.lower():
-                provisioning = 2
-        except KeyError:
-            # Default to thin if not defined
-            pass
-
-        return provisioning
diff --git a/cinder/volume/drivers/emc/emc_vmax_common.py b/cinder/volume/drivers/emc/emc_vmax_common.py
new file mode 100644 (file)
index 0000000..b7d7cc9
--- /dev/null
@@ -0,0 +1,2196 @@
+# Copyright (c) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os.path
+
+from oslo.config import cfg
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.emc import emc_vmax_fast
+from cinder.volume.drivers.emc import emc_vmax_masking
+from cinder.volume.drivers.emc import emc_vmax_provision
+from cinder.volume.drivers.emc import emc_vmax_utils
+
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+try:
+    import pywbem
+    pywbemAvailable = True
+except ImportError:
+    pywbemAvailable = False
+
+CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml'
+CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_'
+CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml'
+EMC_ROOT = 'root/emc'
+POOL = 'storagetype:pool'
+ARRAY = 'storagetype:array'
+FASTPOLICY = 'storagetype:fastpolicy'
+BACKENDNAME = 'volume_backend_name'
+COMPOSITETYPE = 'storagetype:compositetype'
+STRIPECOUNT = 'storagetype:stripecount'
+MEMBERCOUNT = 'storagetype:membercount'
+STRIPED = 'striped'
+CONCATENATED = 'concatenated'
+
+emc_opts = [
+    cfg.StrOpt('cinder_emc_config_file',
+               default=CINDER_EMC_CONFIG_FILE,
+               help='use this file for cinder emc plugin '
+                    'config data'), ]
+
+CONF.register_opts(emc_opts)
+
+
+class EMCVMAXCommon(object):
+    """Common class for SMI-S based EMC volume drivers.
+
+    This common class is for EMC volume drivers based on SMI-S.
+    It supports VNX and VMAX arrays.
+
+    """
+
+    stats = {'driver_version': '1.0',
+             'free_capacity_gb': 0,
+             'reserved_percentage': 0,
+             'storage_protocol': None,
+             'total_capacity_gb': 0,
+             'vendor_name': 'EMC',
+             'volume_backend_name': None}
+
+    def __init__(self, prtcl, configuration=None):
+
+        if not pywbemAvailable:
+            LOG.info(_(
+                'Module PyWBEM not installed.  '
+                'Install PyWBEM using the python-pywbem package.'))
+
+        self.protocol = prtcl
+        self.configuration = configuration
+        self.configuration.append_config_values(emc_opts)
+        self.conn = None
+        self.url = None
+        self.user = None
+        self.passwd = None
+        self.masking = emc_vmax_masking.EMCVMAXMasking(prtcl)
+        self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
+        self.fast = emc_vmax_fast.EMCVMAXFast(prtcl)
+        self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl)
+
+    def create_volume(self, volume):
+        """Creates a EMC(VMAX) volume from a pre-existing storage pool.
+
+        For a concatenated compositeType:
+        If the volume size is over 240GB then a composite is created
+        EMCNumberOfMembers > 1, otherwise it defaults to a non composite
+
+        For a striped compositeType:
+        The user must supply an extra spec to determine how many metas
+        will make up the striped volume.If the meta size is greater than
+        240GB an error is returned to the user. Otherwise the
+        EMCNumberOfMembers is what the user specifies.
+
+        :param volume: volume Object
+        :returns: volumeInstance, the volume instance
+        :raises: VolumeBackendAPIException
+        """
+        volumeSize = int(self.utils.convert_gb_to_bits(volume['size']))
+        volumeName = volume['name']
+
+        extraSpecs = self._initial_setup(volume)
+        memberCount, errorDesc = self.utils.determine_member_count(
+            volume['size'], extraSpecs[MEMBERCOUNT], extraSpecs[COMPOSITETYPE])
+        if errorDesc is not None:
+            exceptionMessage = (_("The striped meta count of %(memberCount)s "
+                                  "is too small for volume: %(volumeName)s. "
+                                  "with size %(volumeSize)s ")
+                                % {'memberCount': memberCount,
+                                   'volumeName': volumeName,
+                                   'volumeSize': volume['size']})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        self.conn = self._get_ecom_connection()
+
+        poolInstanceName, storageSystemName = (
+            self._get_pool_and_storage_system(extraSpecs))
+
+        LOG.debug("Create Volume: %(volume)s  Pool: %(pool)s  "
+                  "Storage System: %(storageSystem)s "
+                  "Size: %(size)lu "
+                  % {'volume': volumeName,
+                     'pool': poolInstanceName,
+                     'storageSystem': storageSystemName,
+                     'size': volumeSize})
+
+        elementCompositionService = (
+            self.utils.find_element_composition_service(self.conn,
+                                                        storageSystemName))
+
+        storageConfigService = self.utils.find_storage_configuration_service(
+            self.conn, storageSystemName)
+
+        # If FAST is intended to be used we must first check that the pool
+        # is associated with the correct storage tier
+        if extraSpecs[FASTPOLICY] is not None:
+            foundPoolInstanceName = self.fast.get_pool_associated_to_policy(
+                self.conn, extraSpecs[FASTPOLICY], extraSpecs[ARRAY],
+                storageConfigService, poolInstanceName)
+            if foundPoolInstanceName is None:
+                exceptionMessage = (_("Pool: %(poolName)s. "
+                                      "is not associated to storage tier for "
+                                      "fast policy %(fastPolicy)s.")
+                                    % {'poolName': extraSpecs[POOL],
+                                       'fastPolicy': extraSpecs[FASTPOLICY]})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        compositeType = self.utils.get_composite_type(
+            extraSpecs[COMPOSITETYPE])
+
+        volumeDict, rc = self.provision.create_composite_volume(
+            self.conn, elementCompositionService, volumeSize, volumeName,
+            poolInstanceName, compositeType, memberCount)
+
+        # Now that we have already checked that the pool is associated with
+        # the correct storage tier and the volume was successfully created
+        # add the volume to the default storage group created for
+        # volumes in pools associated with this fast policy
+        if extraSpecs[FASTPOLICY]:
+            LOG.info(_("Adding volume: %(volumeName)s to default storage group"
+                       " for FAST policy: %(fastPolicyName)s ")
+                     % {'volumeName': volumeName,
+                        'fastPolicyName': extraSpecs[FASTPOLICY]})
+            defaultStorageGroupInstanceName = (
+                self._get_or_create_default_storage_group(
+                    self.conn, storageSystemName, volumeDict,
+                    volumeName, extraSpecs[FASTPOLICY]))
+            if not defaultStorageGroupInstanceName:
+                exceptionMessage = (_(
+                    "Unable to create or get default storage group for "
+                    "FAST policy: %(fastPolicyName)s. ")
+                    % {'fastPolicyName': extraSpecs[FASTPOLICY]})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+            self._add_volume_to_default_storage_group_on_create(
+                volumeDict, volumeName, storageConfigService,
+                storageSystemName, extraSpecs[FASTPOLICY])
+
+        LOG.info(_("Leaving create_volume: %(volumeName)s  "
+                   "Return code: %(rc)lu "
+                   "volume dict: %(name)s")
+                 % {'volumeName': volumeName,
+                    'rc': rc,
+                    'name': volumeDict})
+
+        return volumeDict
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot.
+
+        For VMAX, replace snapshot with clone.
+
+        :param volume - volume Object
+        :param snapshot - snapshot object
+        :returns: cloneVolumeDict - the cloned volume dictionary
+        """
+        return self._create_cloned_volume(volume, snapshot)
+
+    def create_cloned_volume(self, cloneVolume, sourceVolume):
+        """Creates a clone of the specified volume.
+
+        :param CloneVolume - clone volume Object
+        :param sourceVolume - volume object
+        :returns: cloneVolumeDict - the cloned volume dictionary
+        """
+        return self._create_cloned_volume(cloneVolume, sourceVolume)
+
+    def delete_volume(self, volume):
+        """Deletes a EMC(VMAX) volume
+
+        :param volume: volume Object
+        """
+        LOG.info(_("Deleting Volume: %(volume)s")
+                 % {'volume': volume['name']})
+
+        rc, volumeName = self._delete_volume(volume)
+        LOG.info(_("Leaving delete_volume: %(volumename)s  Return code: "
+                   "%(rc)lu")
+                 % {'volumename': volumeName,
+                    'rc': rc})
+
+    def create_snapshot(self, snapshot, volume):
+        """Creates a snapshot.
+
+        For VMAX, replace snapshot with clone
+
+        :param snapshot: snapshot object
+        :param volume: volume Object to create snapshot from
+        :returns: cloneVolumeDict,the cloned volume dictionary
+        """
+        return self._create_cloned_volume(snapshot, volume, True)
+
+    def delete_snapshot(self, snapshot, volume):
+        """Deletes a snapshot.
+
+        :param snapshot: snapshot object
+        :param volume: volume Object to create snapshot from
+        """
+        LOG.info(_("Delete Snapshot: %(snapshotName)s ")
+                 % {'snapshotName': snapshot['name']})
+        rc, snapshotName = self._delete_volume(snapshot)
+        LOG.debug("Leaving delete_snapshot: %(snapshotname)s  Return code: "
+                  "%(rc)lu "
+                  % {'snapshotname': snapshotName,
+                     'rc': rc})
+
+    def _remove_members(
+            self, controllerConfigService, volumeInstance, extraSpecs):
+        """This method unmaps a volume from a host.
+
+        Removes volume from the Device Masking Group that belongs to
+        a Masking View.
+        Check if fast policy is in the extra specs, if it isn't we do
+        not need to do any thing for FAST
+        Assume that isTieringPolicySupported is False unless the FAST
+        policy is in the extra specs and tiering is enabled on the array
+
+        :param controllerConfigService: instance name of
+                                  ControllerConfigurationService
+        :param volume: volume Object
+        """
+        volumeName = volumeInstance['ElementName']
+        LOG.debug("Detaching volume %s" % volumeName)
+        fastPolicyName = extraSpecs[FASTPOLICY]
+        return self.masking.remove_and_reset_members(
+            self.conn, controllerConfigService, volumeInstance,
+            fastPolicyName, volumeName)
+
+    def _unmap_lun(self, volume, connector):
+        """Unmaps a volume from the host.
+
+        :param volume: the volume Object
+        :param connector: the connector Object
+        :raises: VolumeBackendAPIException
+        """
+        extraSpecs = self._initial_setup(volume)
+        volumename = volume['name']
+        LOG.info(_("Unmap volume: %(volume)s")
+                 % {'volume': volumename})
+
+        device_info = self.find_device_number(volume, connector)
+        device_number = device_info['hostlunid']
+        if device_number is None:
+            LOG.info(_("Volume %s is not mapped. No volume to unmap.")
+                     % (volumename))
+            return
+
+        vol_instance = self._find_lun(volume)
+        storage_system = vol_instance['SystemName']
+
+        configservice = self.utils.find_controller_configuration_service(
+            self.conn, storage_system)
+        if configservice is None:
+            exception_message = (_("Cannot find Controller Configuration "
+                                   "Service for storage system "
+                                   "%(storage_system)s")
+                                 % {'storage_system': storage_system})
+            raise exception.VolumeBackendAPIException(data=exception_message)
+
+        self._remove_members(configservice, vol_instance, extraSpecs)
+
+    def initialize_connection(self, volume, connector):
+        """Initializes the connection and returns device and connection info.
+
+        The volume may be already mapped, if this is so the deviceInfo tuple
+        is returned.  If the volume is not already mapped then we need to
+        gather information to either 1. Create an new masking view or 2.Add
+        the volume to to an existing storage group within an already existing
+        maskingview.
+
+        The naming convention is the following:
+        initiatorGroupName = OS-<shortHostName>-<shortProtocol>-IG
+                             e.g OS-myShortHost-I-IG
+        storageGroupName = OS-<shortHostName>-<poolName>-<shortProtocol>-SG
+                           e.g OS-myShortHost-SATA_BRONZ1-I-SG
+        portGroupName = OS-<target>-PG  The portGroupName will come from
+                        the EMC configuration xml file.
+                        These are precreated. If the portGroup does not exist
+                        then a error will be returned to the user
+        maskingView  = OS-<shortHostName>-<poolName>-<shortProtocol>-MV
+                       e.g OS-myShortHost-SATA_BRONZ1-I-MV
+
+        :param volume: volume Object
+        :param connector: the connector Object
+        :returns: deviceInfoDict, device information tuple
+        :returns: ipAddress, required for ISCSI command
+        :raises: VolumeBackendAPIException
+        """
+        ipAddress = None
+        extraSpecs = self._initial_setup(volume)
+
+        volumeName = volume['name']
+        LOG.info(_("Initialize connection: %(volume)s")
+                 % {'volume': volumeName})
+        self.conn = self._get_ecom_connection()
+        deviceInfoDict = self._wrap_find_device_number(volume, connector)
+        if ('hostlunid' in deviceInfoDict and
+                deviceInfoDict['hostlunid'] is not None):
+            # Device is already mapped so we will leave the state as is
+            deviceNumber = deviceInfoDict['hostlunid']
+            LOG.info(_("Volume %(volume)s is already mapped. "
+                       "The device number is  %(deviceNumber)s ")
+                     % {'volume': volumeName,
+                        'deviceNumber': deviceNumber})
+        else:
+            maskingViewDict = self._populate_masking_dict(
+                volume, connector, extraSpecs)
+            rollbackDict = self.masking.get_or_create_masking_view_and_map_lun(
+                self.conn, maskingViewDict)
+
+            # Find host lun id again after the volume is exported to the host
+            deviceInfoDict = self.find_device_number(volume, connector)
+            if 'hostlunid' not in deviceInfoDict:
+                # Did not successfully attach to host,
+                # so a rollback for FAST is required
+                LOG.error(_("Error Attaching volume %(vol)s ")
+                          % {'vol': volumeName})
+                if rollbackDict['fastPolicyName'] is not None:
+                    (
+                        self.masking
+                        ._check_if_rollback_action_for_masking_required(
+                            self.conn,
+                            rollbackDict['controllerConfigService'],
+                            rollbackDict['volumeInstance'],
+                            rollbackDict['volumeName'],
+                            rollbackDict['fastPolicyName'],
+                            rollbackDict['defaultStorageGroupInstanceName']))
+                exception_message = ("Error Attaching volume %(vol)s"
+                                     % {'vol': volumeName})
+                raise exception.VolumeBackendAPIException(
+                    data=exception_message)
+        if self.protocol.lower() == 'iscsi':
+            ipAddress = self.utils.find_ip_protocol_endpoint(
+                self.conn, deviceInfoDict['storagesystem'])
+            if ipAddress is None:
+                LOG.info(_("Unable to get iscsi IP address "
+                           "for storagesystem %(storageSystem)s")
+                         % {'storageSystem': deviceInfoDict['storagesystem']})
+
+        return deviceInfoDict, ipAddress
+
+    def _wrap_find_device_number(self, volume, connector):
+        """Aid for unit testing
+
+        :params volume: the volume Object
+        :params connector: the connector Object
+        :returns: deviceInfoDict
+        """
+        return self.find_device_number(volume, connector)
+
+    def terminate_connection(self, volume, connector):
+        """Disallow connection from connector.
+
+        :params volume: the volume Object
+        :params connectorL the connector Object
+        """
+        self._initial_setup(volume)
+
+        volumename = volume['name']
+        LOG.info(_("Terminate connection: %(volume)s")
+                 % {'volume': volumename})
+
+        self.conn = self._get_ecom_connection()
+        self._unmap_lun(volume, connector)
+
+    def extend_volume(self, volume, newSize):
+        """Extends an existing volume.
+
+        Prequisites:
+        1. The volume must be composite e.g StorageVolume.EMCIsComposite=True
+        2. The volume can only be concatenated
+           e.g StorageExtent.IsConcatenated=True
+
+        :params volume: the volume Object
+        :params newSize: the new size to increase the volume to
+        :raises: VolumeBackendAPIException
+        """
+        originalVolumeSize = volume['size']
+        volumeName = volume['name']
+        self._initial_setup(volume)
+        self.conn = self._get_ecom_connection()
+        volumeInstance = self._find_lun(volume)
+        if volumeInstance is None:
+            exceptionMessage = (_("Cannot find Volume: %(volumename)s. "
+                                  "Extend operation.  Exiting....")
+                                % {'volumename': volumeName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        if int(originalVolumeSize) > int(newSize):
+            exceptionMessage = (_(
+                "Your original size: %(originalVolumeSize)s GB is greater "
+                "than: %(newSize)s GB. Only Extend is supported. Exiting...")
+                % {'originalVolumeSize': originalVolumeSize,
+                   'newSize': newSize})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        additionalVolumeSize = six.text_type(
+            int(newSize) - int(originalVolumeSize))
+        additionalVolumeSize = self.utils.convert_gb_to_bits(
+            additionalVolumeSize)
+
+        # is the volume concatenated
+        isConcatenated = self.utils.check_if_volume_is_concatenated(
+            self.conn, volumeInstance)
+        if 'True' not in isConcatenated:
+            exceptionMessage = (_(
+                "Volume: %(volumeName)s is not a concatenated volume. "
+                "You can only perform extend on concatenated volume. "
+                "Exiting...")
+                % {'volumeName': volumeName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+        else:
+            compositeType = self.utils.get_composite_type(CONCATENATED)
+
+        LOG.debug("Extend Volume: %(volume)s  New size: %(newSize)s GBs"
+                  % {'volume': volumeName,
+                     'newSize': newSize})
+
+        deviceId = volumeInstance['DeviceID']
+        storageSystemName = volumeInstance['SystemName']
+        LOG.debug(
+            "Device ID: %(deviceid)s: Storage System: "
+            "%(storagesystem)s"
+            % {'deviceid': deviceId,
+               'storagesystem': storageSystemName})
+
+        storageConfigService = self.utils.find_storage_configuration_service(
+            self.conn, storageSystemName)
+
+        elementCompositionService = (
+            self.utils.find_element_composition_service(
+                self.conn, storageSystemName))
+
+        # create a volume to the size of the
+        # newSize - oldSize = additionalVolumeSize
+        unboundVolumeInstance = self._create_and_get_unbound_volume(
+            self.conn, storageConfigService, volumeInstance.path,
+            additionalVolumeSize)
+        if unboundVolumeInstance is None:
+            exceptionMessage = (_(
+                "Error Creating unbound volume on an Extend operation"))
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        # add the new unbound volume to the original composite volume
+        rc, modifiedVolumeDict = (
+            self._modify_and_get_composite_volume_instance(
+                self.conn, elementCompositionService, volumeInstance,
+                unboundVolumeInstance.path, volumeName, compositeType))
+        if modifiedVolumeDict is None:
+            exceptionMessage = (_(
+                "On an Extend Operation, error adding volume to composite "
+                "volume: %(volumename)s. ")
+                % {'volumename': volumeName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        # check the occupied space of the new extended volume
+        extendedVolumeInstance = self.utils.find_volume_instance(
+            self.conn, modifiedVolumeDict, volumeName)
+        extendedVolumeSize = self.utils.get_volume_size(
+            self.conn, extendedVolumeInstance)
+        LOG.debug(
+            "The actual volume size of the extended volume: %(volumeName)s "
+            "is %(volumeSize)s"
+            % {'volumeName': volumeName,
+               'volumeSize': extendedVolumeSize})
+
+        # If the requested size and the actual size don't
+        # tally throw an exception
+        newSizeBits = self.utils.convert_gb_to_bits(newSize)
+        diffVolumeSize = self.utils.compare_size(
+            newSizeBits, extendedVolumeSize)
+        if diffVolumeSize != 0:
+            exceptionMessage = (_(
+                "The requested size : %(requestedSize)s is not the same as "
+                "resulting size: %(resultSize)s")
+                % {'requestedSize': newSizeBits,
+                   'resultSize': extendedVolumeSize})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        LOG.debug(
+            "Leaving extend_volume: %(volumeName)s  "
+            "Return code: %(rc)lu "
+            "volume dict: %(name)s"
+            % {'volumeName': volumeName,
+               'rc': rc,
+               'name': modifiedVolumeDict})
+
+        return modifiedVolumeDict
+
+    def update_volume_stats(self):
+        """Retrieve stats info.
+        """
+        if hasattr(self.configuration, 'cinder_emc_config_file'):
+            emcConfigFileName = self.configuration.cinder_emc_config_file
+        else:
+            emcConfigFileName = self.configuration.safe_get(
+                'cinder_emc_config_file')
+
+        backendName = self.configuration.safe_get('volume_backend_name')
+        LOG.debug(
+            "Updating volume stats on file %(emcConfigFileName)s on "
+            "backend %(backendName)s "
+            % {'emcConfigFileName': emcConfigFileName,
+               'backendName': backendName})
+
+        poolName = self.utils.parse_pool_name_from_file(emcConfigFileName)
+        if poolName is None:
+            LOG.error(_(
+                "PoolName %(poolName)s must be in the file "
+                "%(emcConfigFileName)s ")
+                % {'poolName': poolName,
+                   'emcConfigFileName': emcConfigFileName})
+        arrayName = self.utils.parse_array_name_from_file(emcConfigFileName)
+        if arrayName is None:
+            LOG.error(_(
+                "Array Serial Number %(arrayName)s must be in the file "
+                "%(emcConfigFileName)s ")
+                % {'arrayName': arrayName,
+                   'emcConfigFileName': emcConfigFileName})
+        # This value can be None
+        fastPolicyName = self.utils.parse_fast_policy_name_from_file(
+            emcConfigFileName)
+        if fastPolicyName is not None:
+            LOG.debug(
+                "Fast policy %(fastPolicyName)s is enabled on %(arrayName)s. "
+                % {'fastPolicyName': fastPolicyName,
+                   'arrayName': arrayName})
+        else:
+            LOG.debug(
+                "No Fast policy for Array:%(arrayName)s "
+                "backend:%(backendName)s"
+                % {'arrayName': arrayName,
+                   'backendName': backendName})
+
+        if self.conn is None:
+            self._set_ecom_credentials(emcConfigFileName)
+
+        storageSystemInstanceName = self._find_storageSystem(arrayName)
+        isTieringPolicySupported = (
+            self.fast.is_tiering_policy_enabled_on_storage_system(
+                self.conn, storageSystemInstanceName))
+
+        if (fastPolicyName is not None and
+                isTieringPolicySupported is True):  # FAST enabled
+            total_capacity_gb, free_capacity_gb = (
+                self.fast.get_capacities_associated_to_policy(
+                    self.conn, arrayName, fastPolicyName))
+            LOG.info(
+                "FAST: capacity stats for policy %(fastPolicyName)s on "
+                "array %(arrayName)s (total_capacity_gb=%(total_capacity_gb)lu"
+                ", free_capacity_gb=%(free_capacity_gb)lu"
+                % {'fastPolicyName': fastPolicyName,
+                   'arrayName': arrayName,
+                   'total_capacity_gb': total_capacity_gb,
+                   'free_capacity_gb': free_capacity_gb})
+        else:  # NON-FAST
+            total_capacity_gb, free_capacity_gb = (
+                self.utils.get_pool_capacities(self.conn, poolName, arrayName))
+            LOG.info(
+                "NON-FAST: capacity stats for pool %(poolName)s on array "
+                "%(arrayName)s (total_capacity_gb=%(total_capacity_gb)lu, "
+                "free_capacity_gb=%(free_capacity_gb)lu"
+                % {'poolName': poolName,
+                   'arrayName': arrayName,
+                   'total_capacity_gb': total_capacity_gb,
+                   'free_capacity_gb': free_capacity_gb})
+
+        if poolName is None:
+            LOG.debug("Unable to get the poolName for location_info")
+        if arrayName is None:
+            LOG.debug("Unable to get the arrayName for location_info")
+        if fastPolicyName is None:
+            LOG.debug("FAST is not enabled for this configuration: "
+                      "%(emcConfigFileName)s"
+                      % {'emcConfigFileName': emcConfigFileName})
+
+        location_info = ("%(arrayName)s#%(poolName)s#%(policyName)s"
+                         % {'arrayName': arrayName,
+                            'poolName': poolName,
+                            'policyName': fastPolicyName})
+
+        data = {'total_capacity_gb': total_capacity_gb,
+                'free_capacity_gb': free_capacity_gb,
+                'reserved_percentage': 0,
+                'QoS_support': False,
+                'volume_backend_name': backendName or self.__class__.__name__,
+                'vendor_name': "EMC",
+                'driver_version': '1.0',
+                'storage_protocol': 'unknown',
+                'location_info': location_info}
+
+        self.stats = data
+
+        return self.stats
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Migrate volume to another host using retype.
+
+        :param ctxt: context
+        :param volume: the volume object including the volume_type_id
+        :param new_type: the new volume type.
+        :param host: The host dict holding the relevant target(destination)
+               information
+        :returns: boolean True/False
+        :returns: list
+        """
+
+        volumeName = volume['name']
+        volumeStatus = volume['status']
+        LOG.info(_("Migrating using retype Volume: %(volume)s")
+                 % {'volume': volumeName})
+
+        extraSpecs = self._initial_setup(volume)
+        self.conn = self._get_ecom_connection()
+
+        volumeInstance = self._find_lun(volume)
+        if volumeInstance is None:
+            LOG.error(_("Volume %(name)s not found on the array. "
+                        "No volume to migrate using retype.")
+                      % {'name': volumeName})
+            return False
+
+        storageSystemName = volumeInstance['SystemName']
+        isValid, targetPoolName, targetFastPolicyName = (
+            self._is_valid_for_storage_assisted_migration(
+                volumeInstance.path, host, storageSystemName,
+                volumeName, volumeStatus))
+
+        if not isValid:
+            LOG.error(_("Volume %(name)s is not suitable for storage "
+                        "assisted migration using retype")
+                      % {'name': volumeName})
+            return False
+        if volume['host'] != host['host']:
+            LOG.debug(
+                "Retype Volume %(name)s from source host %(sourceHost)s "
+                "to target host %(targetHost)s"
+                % {'name': volumeName,
+                   'sourceHost': volume['host'],
+                   'targetHost': host['host']})
+            return self._migrate_volume(
+                volume, volumeInstance, targetPoolName, targetFastPolicyName,
+                extraSpecs[FASTPOLICY], new_type)
+
+        return True
+
+    def migrate_volume(self, ctxt, volume, host, new_type=None):
+        """Migrate volume to another host
+
+        :param ctxt: context
+        :param volume: the volume object including the volume_type_id
+        :param host: the host dict holding the relevant target(destination)
+               information
+        :param new_type: None
+        :returns: boolean True/False
+        :returns: list
+        """
+        LOG.warn(_("The VMAX plugin only supports Retype.  "
+                   "If a pool based migration is necessary "
+                   "this will happen on a Retype "
+                   "From the command line: "
+                   "cinder --os-volume-api-version 2 retype "
+                   "<volumeId> <volumeType> --migration-policy on-demand"))
+        return True, {}
+
+    def _migrate_volume(
+            self, volume, volumeInstance, targetPoolName,
+            targetFastPolicyName, sourceFastPolicyName, new_type=None):
+        """Migrate volume to another host
+
+        :param volume: the volume object including the volume_type_id
+        :param volumeInstance: the volume instance
+        :param targetPoolName: the target poolName
+        :param targetFastPolicyName: the target FAST policy name, can be None
+        :param sourceFastPolicyName: the source FAST policy name, can be None
+        :param new_type: None
+        :returns:  boolean True/False
+        :returns:  empty list
+        """
+        volumeName = volume['name']
+        storageSystemName = volumeInstance['SystemName']
+
+        sourcePoolInstanceName = self.utils.get_assoc_pool_from_volume(
+            self.conn, volumeInstance.path)
+
+        moved, rc = self._migrate_volume_from(
+            volume, volumeInstance, targetPoolName, sourceFastPolicyName)
+
+        if moved is False and sourceFastPolicyName is not None:
+            # Return the volume to the default source fast policy storage
+            # group because the migrate was unsuccessful
+            LOG.warn(_("Failed to migrate: %(volumeName)s from "
+                       "default source storage group "
+                       "for FAST policy: %(sourceFastPolicyName)s "
+                       "Attempting cleanup... ")
+                     % {'volumeName': volumeName,
+                        'sourceFastPolicyName': sourceFastPolicyName})
+            if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume(
+                    self.conn, volumeInstance.path):
+                self._migrate_cleanup(self.conn, volumeInstance,
+                                      storageSystemName, sourceFastPolicyName,
+                                      volumeName)
+            else:
+                # migrate was successful but still issues
+                self._migrate_rollback(
+                    self.conn, volumeInstance, storageSystemName,
+                    sourceFastPolicyName, volumeName, sourcePoolInstanceName)
+
+            return moved
+
+        if targetFastPolicyName == 'None':
+            targetFastPolicyName = None
+
+        if moved is True and targetFastPolicyName is not None:
+            if not self._migrate_volume_fast_target(
+                    volumeInstance, storageSystemName,
+                    targetFastPolicyName, volumeName):
+                LOG.warn(_("Attempting a rollback of: %(volumeName)s to "
+                           "original pool %(sourcePoolInstanceName)s ")
+                         % {'volumeName': volumeName,
+                            'sourcePoolInstanceName': sourcePoolInstanceName})
+                self._migrate_rollback(
+                    self.conn, volumeInstance, storageSystemName,
+                    sourceFastPolicyName, volumeName, sourcePoolInstanceName)
+
+        if rc == 0:
+            moved = True
+
+        return moved
+
+    def _migrate_rollback(self, conn, volumeInstance,
+                          storageSystemName, sourceFastPolicyName,
+                          volumeName, sourcePoolInstanceName):
+        """Full rollback
+
+        Failed on final step on adding migrated volume to new target
+        default storage group for the target FAST policy
+
+        :param conn: connection info to ECOM
+        :param volumeInstance: the volume instance
+        :param storageSystemName: the storage system name
+        :param sourceFastPolicyName: the source FAST policy name
+        :param volumeName: the volume Name
+
+        :returns: boolean True/False
+        :returns: int, the return code from migrate operation
+        """
+
+        LOG.warn(_("_migrate_rollback on : %(volumeName)s from ")
+                 % {'volumeName': volumeName})
+
+        storageRelocationService = self.utils.find_storage_relocation_service(
+            conn, storageSystemName)
+
+        try:
+            self.provision.migrate_volume_to_storage_pool(
+                conn, storageRelocationService, volumeInstance.path,
+                sourcePoolInstanceName)
+        except Exception:
+            exceptionMessage = (_(
+                "Failed to return volume %(volumeName)s to "
+                "original storage pool. Please contact your system "
+                "administrator to return it to the correct location ")
+                % {'volumeName': volumeName})
+            LOG.error(exceptionMessage)
+
+        if sourceFastPolicyName is not None:
+            self.add_to_default_SG(
+                conn, volumeInstance, storageSystemName, sourceFastPolicyName,
+                volumeName)
+
+    def _migrate_cleanup(self, conn, volumeInstance,
+                         storageSystemName, sourceFastPolicyName,
+                         volumeName):
+        """If the migrate fails, put volume back to source FAST SG
+
+        :param conn: connection info to ECOM
+        :param volumeInstance: the volume instance
+        :param storageSystemName: the storage system name
+        :param sourceFastPolicyName: the source FAST policy name
+        :param volumeName: the volume Name
+
+        :returns: boolean True/False
+        :returns: int, the return code from migrate operation
+        """
+
+        LOG.warn(_("_migrate_cleanup on : %(volumeName)s from ")
+                 % {'volumeName': volumeName})
+
+        controllerConfigurationService = (
+            self.utils.find_controller_configuration_service(
+                conn, storageSystemName))
+
+        # check to see what SG it is in
+        assocStorageGroupInstanceName = (
+            self.utils.get_storage_group_from_volume(conn,
+                                                     volumeInstance.path))
+        # This is the SG it should be in
+        defaultStorageGroupInstanceName = (
+            self.fast.get_policy_default_storage_group(
+                conn, controllerConfigurationService, sourceFastPolicyName))
+
+        # It is not in any storage group.  Must add it to default source
+        if assocStorageGroupInstanceName is None:
+            self.add_to_default_SG(conn, volumeInstance,
+                                   storageSystemName, sourceFastPolicyName,
+                                   volumeName)
+
+        # It is in the incorrect storage group
+        if (assocStorageGroupInstanceName is not None and
+                (assocStorageGroupInstanceName !=
+                    defaultStorageGroupInstanceName)):
+            self.provision.remove_device_from_storage_group(
+                conn, controllerConfigurationService,
+                assocStorageGroupInstanceName, volumeInstance.path, volumeName)
+
+            self.add_to_default_SG(
+                conn, volumeInstance, storageSystemName, sourceFastPolicyName,
+                volumeName)
+
+    def _migrate_volume_fast_target(
+            self, volumeInstance, storageSystemName,
+            targetFastPolicyName, volumeName):
+        """If the target host is FAST enabled.
+
+        If the target host is FAST enabled then we need to add it to the
+        default storage group for that policy
+
+        :param volumeInstance: the volume instance
+        :param storageSystemName: the storage system name
+        :param targetFastPolicyName: the target fast policy name
+        :param volumeName: the volume name
+        :returns: boolean True/False
+        """
+        falseRet = False
+        LOG.info(_("Adding volume: %(volumeName)s to default storage group "
+                   "for FAST policy: %(fastPolicyName)s ")
+                 % {'volumeName': volumeName,
+                    'fastPolicyName': targetFastPolicyName})
+
+        controllerConfigurationService = (
+            self.utils.find_controller_configuration_service(
+                self.conn, storageSystemName))
+
+        defaultStorageGroupInstanceName = (
+            self.fast.get_or_create_default_storage_group(
+                self.conn, controllerConfigurationService,
+                targetFastPolicyName, volumeInstance))
+        if defaultStorageGroupInstanceName is None:
+            exceptionMessage = (_(
+                "Unable to create or get default storage group for FAST policy"
+                ": %(fastPolicyName)s. ")
+                % {'fastPolicyName': targetFastPolicyName})
+            LOG.error(exceptionMessage)
+
+            return falseRet
+
+        defaultStorageGroupInstanceName = (
+            self.fast.add_volume_to_default_storage_group_for_fast_policy(
+                self.conn, controllerConfigurationService, volumeInstance,
+                volumeName, targetFastPolicyName))
+        if defaultStorageGroupInstanceName is None:
+            exceptionMessage = (_(
+                "Failed to verify that volume was added to storage group for "
+                "FAST policy: %(fastPolicyName)s. ")
+                % {'fastPolicyName': targetFastPolicyName})
+            LOG.error(exceptionMessage)
+            return falseRet
+
+        return True
+
+    def _migrate_volume_from(self, volume, volumeInstance,
+                             targetPoolName, sourceFastPolicyName):
+        """Check FAST policies and migrate from source pool
+
+        :param volume: the volume object including the volume_type_id
+        :param volumeInstance: the volume instance
+        :param targetPoolName: the target poolName
+        :param sourceFastPolicyName: the source FAST policy name, can be None
+        :returns: boolean True/False
+        :returns: int, the return code from migrate operation
+        """
+        falseRet = (False, -1)
+        volumeName = volume['name']
+        storageSystemName = volumeInstance['SystemName']
+
+        LOG.debug("sourceFastPolicyName is : %(sourceFastPolicyName)s. "
+                  % {'sourceFastPolicyName': sourceFastPolicyName})
+
+        # If the source volume is is FAST enabled it must first be removed
+        # from the default storage group for that policy
+        if sourceFastPolicyName is not None:
+            self.remove_from_default_SG(
+                self.conn, volumeInstance, storageSystemName,
+                sourceFastPolicyName, volumeName)
+
+        # migrate from one pool to another
+        storageRelocationService = self.utils.find_storage_relocation_service(
+            self.conn, storageSystemName)
+
+        targetPoolInstanceName = self.utils.get_pool_by_name(
+            self.conn, targetPoolName, storageSystemName)
+        if targetPoolInstanceName is None:
+            exceptionMessage = (_(
+                "Error finding targe pool instance name for pool: "
+                "%(targetPoolName)s. ")
+                % {'targetPoolName': targetPoolName})
+            LOG.error(exceptionMessage)
+            return falseRet
+        try:
+            rc = self.provision.migrate_volume_to_storage_pool(
+                self.conn, storageRelocationService, volumeInstance.path,
+                targetPoolInstanceName)
+        except Exception as e:
+            # rollback by deleting the volume if adding the volume to the
+            # default storage group were to fail
+            LOG.error(_("Exception: %s") % six.text_type(e))
+            exceptionMessage = (_("Error migrating volume: %(volumename)s. "
+                                  "to target pool  %(targetPoolName)s. ")
+                                % {'volumename': volumeName,
+                                   'targetPoolName': targetPoolName})
+            LOG.error(exceptionMessage)
+            return falseRet
+
+        # check that the volume is now migrated to the correct storage pool,
+        # if it is terminate the migrate session
+        foundPoolInstanceName = self.utils.get_assoc_pool_from_volume(
+            self.conn, volumeInstance.path)
+
+        if (foundPoolInstanceName is None or
+                (foundPoolInstanceName['InstanceID'] !=
+                    targetPoolInstanceName['InstanceID'])):
+            exceptionMessage = (_(
+                "Volume : %(volumeName)s. was not successfully migrated to "
+                "target pool %(targetPoolName)s.")
+                % {'volumeName': volumeName,
+                   'targetPoolName': targetPoolName})
+            LOG.error(exceptionMessage)
+            return falseRet
+
+        else:
+            LOG.debug("Terminating migration session on : %(volumeName)s. "
+                      % {'volumeName': volumeName})
+            self.provision._terminate_migrate_session(
+                self.conn, volumeInstance.path)
+
+        if rc == 0:
+            moved = True
+
+        return moved, rc
+
+    def remove_from_default_SG(
+            self, conn, volumeInstance, storageSystemName,
+            sourceFastPolicyName, volumeName):
+        """For FAST, remove volume from default storage group
+
+        :param conn: connection info to ECOM
+        :param volumeInstance: the volume instance
+        :param storageSystemName: the storage system name
+        :param sourceFastPolicyName: the source FAST policy name
+        :param volumeName: the volume Name
+
+        :returns: boolean True/False
+        :returns: int, the return code from migrate operation
+        """
+        controllerConfigurationService = (
+            self.utils.find_controller_configuration_service(
+                conn, storageSystemName))
+        try:
+            defaultStorageGroupInstanceName = (
+                self.masking.remove_device_from_default_storage_group(
+                    conn, controllerConfigurationService,
+                    volumeInstance.path, volumeName, sourceFastPolicyName))
+        except Exception as ex:
+            LOG.error(_("Exception: %s") % six.text_type(ex))
+            exceptionMessage = (_("Failed to remove: %(volumename)s. "
+                                  "from the default storage group for "
+                                  "FAST policy %(fastPolicyName)s. ")
+                                % {'volumename': volumeName,
+                                   'fastPolicyName': sourceFastPolicyName})
+
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        if defaultStorageGroupInstanceName is None:
+            warnMessage = (_("The volume: %(volumename)s. "
+                             "was not first part of the default storage "
+                             "group for FAST policy %(fastPolicyName)s.")
+                           % {'volumename': volumeName,
+                              'fastPolicyName': sourceFastPolicyName})
+            LOG.warn(warnMessage)
+
+    def add_to_default_SG(
+            self, conn, volumeInstance, storageSystemName,
+            targetFastPolicyName, volumeName):
+        """For FAST, add volume to default storage group
+
+        :param conn: connection info to ECOM
+        :param volumeInstance: the volume instance
+        :param storageSystemName: the storage system name
+        :param targetFastPolicyName: the target FAST policy name
+        :param volumeName: the volume Name
+
+        :returns: boolean True/False
+        :returns: int, the return code from migrate operation
+        """
+        controllerConfigurationService = (
+            self.utils.find_controller_configuration_service(
+                conn, storageSystemName))
+        assocDefaultStorageGroupName = (
+            self.fast
+            .add_volume_to_default_storage_group_for_fast_policy(
+                conn, controllerConfigurationService, volumeInstance,
+                volumeName, targetFastPolicyName))
+        if assocDefaultStorageGroupName is None:
+            errorMsg = (_(
+                "Failed to add %(volumeName)s "
+                "to default storage group for fast policy "
+                "%(fastPolicyName)s ")
+                % {'volumeName': volumeName,
+                   'fastPolicyName': targetFastPolicyName})
+            LOG.error(errorMsg)
+
+    def _is_valid_for_storage_assisted_migration(
+            self, volumeInstanceName, host, sourceArraySerialNumber,
+            volumeName, volumeStatus):
+        """Check if volume is suitable for storage assisted (pool) migration.
+
+        :param volumeInstanceName: the volume instance id
+        :param host: the host object
+        :param sourceArraySerialNumber: the array serial number of
+                                  the original volume
+        :param volumeName: the name of the volume to be migrated
+        :param volumeStatus: the status of the volume e.g
+        :returns: boolean, True/False
+        :returns: string, targetPool
+        :returns: string, targetFastPolicy
+        """
+        falseRet = (False, None, None)
+        if 'location_info' not in host['capabilities']:
+            LOG.error(_('Error getting target pool name and array'))
+            return falseRet
+        info = host['capabilities']['location_info']
+
+        LOG.debug("Location info is : %(info)s."
+                  % {'info': info})
+        try:
+            infoDetail = info.split('#')
+            targetArraySerialNumber = infoDetail[0]
+            targetPoolName = infoDetail[1]
+            targetFastPolicy = infoDetail[2]
+        except Exception:
+            LOG.error(_("Error parsing target pool name, array, "
+                        "and fast policy"))
+
+        if targetArraySerialNumber not in sourceArraySerialNumber:
+            errorMessage = (_(
+                "The source array : %(sourceArraySerialNumber)s does not "
+                "match the target array: %(targetArraySerialNumber)s"
+                "skipping storage-assisted migration")
+                % {'sourceArraySerialNumber': sourceArraySerialNumber,
+                   'targetArraySerialNumber': targetArraySerialNumber})
+            LOG.error(errorMessage)
+            return falseRet
+
+        # get the pool from the source array and check that is is different
+        # to the pool in the target array
+        assocPoolInstanceName = self.utils.get_assoc_pool_from_volume(
+            self.conn, volumeInstanceName)
+        assocPoolInstance = self.conn.GetInstance(
+            assocPoolInstanceName)
+        if assocPoolInstance['ElementName'] == targetPoolName:
+            errorMessage = (_("No action required. Volume : %(volumeName)s is "
+                              "already part of pool : %(pool)s")
+                            % {'volumeName': volumeName,
+                               'pool': targetPoolName})
+            LOG.error(errorMessage)
+            return falseRet
+
+        LOG.info("Volume status is: %s" % volumeStatus)
+        if (host['capabilities']['storage_protocol'] != self.protocol and
+                (volumeStatus != 'available' and volumeStatus != 'retyping')):
+            errorMessage = (_(
+                "Only available volumes can be migrated between "
+                "different protocols"))
+            LOG.error(errorMessage)
+            return falseRet
+
+        return (True, targetPoolName, targetFastPolicy)
+
+    def _set_config_file_and_get_extra_specs(self, volume, filename=None):
+        """Given the volume object get the associated volumetype.
+
+        Given the volume object get the associated volumetype and the
+        extra specs associated with it.
+        Based on the name of the config group, register the config file
+
+        :param volume: the volume object including the volume_type_id
+        :returns: tuple the extra specs tuple
+        :returns: string configuration file
+        """
+        extraSpecs = self.utils.get_volumetype_extraspecs(volume)
+        configGroup = None
+
+        # If there are no extra specs then the default case is assumed
+        if extraSpecs:
+            configGroup = self.configuration.config_group
+            LOG.info("configGroup of current host: %s" % configGroup)
+
+        configurationFile = self._register_config_file_from_config_group(
+            configGroup)
+
+        return extraSpecs, configurationFile
+
+    def _get_ecom_connection(self):
+        """Get the ecom connection
+
+        :returns: conn,the ecom connection
+        """
+        conn = pywbem.WBEMConnection(self.url, (self.user, self.passwd),
+                                     default_namespace='root/emc')
+        if conn is None:
+            exception_message = (_("Cannot connect to ECOM server"))
+            raise exception.VolumeBackendAPIException(data=exception_message)
+
+        return conn
+
+    def _find_storageSystem(self, arrayStr):
+        """Find an array instance name given the array name.
+
+        :param arrayStr: the array Serial number (String)
+        :returns: foundPoolInstanceName, the CIM Instance Name of the Pool
+        """
+        foundStorageSystemInstanceName = None
+        storageSystemInstanceNames = self.conn.EnumerateInstanceNames(
+            'EMC_StorageSystem')
+        for storageSystemInstanceName in storageSystemInstanceNames:
+            arrayName = storageSystemInstanceName['Name']
+            index = arrayName.find(arrayStr)
+            if index > -1:
+                foundStorageSystemInstanceName = storageSystemInstanceName
+
+        if foundStorageSystemInstanceName is None:
+            exceptionMessage = (_("StorageSystem %(array)s is not found.")
+                                % {'storage_array': arrayStr})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        LOG.debug("Array Found: %(array)s.."
+                  % {'array': arrayStr})
+
+        return foundStorageSystemInstanceName
+
+    def _find_pool_in_array(self, arrayStr, poolNameInStr):
+        """Find a pool based on the pool name on a given array.
+
+        :param arrayStr: the array Serial number (String)
+        :parampoolNameInStr: the name of the poolname (String)
+        :returns: foundPoolInstanceName, the CIM Instance Name of the Pool
+        """
+        foundPoolInstanceName = None
+        systemNameStr = None
+
+        storageSystemInstanceName = self._find_storageSystem(arrayStr)
+
+        vpools = self.conn.AssociatorNames(
+            storageSystemInstanceName,
+            resultClass='EMC_VirtualProvisioningPool')
+
+        for vpool in vpools:
+            poolinstance = vpool['InstanceID']
+            # Example: SYMMETRIX+000195900551+TP+Sol_Innov
+            poolnameStr, systemNameStr = self.utils.parse_pool_instance_id(
+                poolinstance)
+            if poolnameStr is not None and systemNameStr is not None:
+                if six.text_type(poolNameInStr) == six.text_type(poolnameStr):
+                    foundPoolInstanceName = vpool
+                    break
+
+        if foundPoolInstanceName is None:
+            exceptionMessage = (_("Pool %(poolNameInStr)s is not found.")
+                                % {'poolNameInStr': poolNameInStr})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        if systemNameStr is None:
+            exception_message = (_("Storage system not found for pool "
+                                   "%(poolNameInStr)s.")
+                                 % {'poolNameInStr': poolNameInStr})
+            LOG.error(exception_message)
+            raise exception.VolumeBackendAPIException(data=exception_message)
+
+        LOG.debug("Pool: %(pool)s  SystemName: %(systemname)s."
+                  % {'pool': foundPoolInstanceName,
+                     'systemname': systemNameStr})
+        return foundPoolInstanceName, systemNameStr
+
+    def _find_lun(self, volume):
+        """Given the volume get the instance from it.
+
+        :param conn: connection the the ecom server
+        :param volume: volume object
+        :returns: foundVolumeinstance
+        """
+        foundVolumeinstance = None
+        volumename = volume['name']
+
+        loc = volume['provider_location']
+        if isinstance(loc, six.string_types):
+            name = eval(loc)
+
+            instancename = self.utils.get_instance_name(
+                name['classname'], name['keybindings'])
+
+            foundVolumeinstance = self.conn.GetInstance(instancename)
+
+        if foundVolumeinstance is None:
+            LOG.debug("Volume %(volumename)s not found on the array."
+                      % {'volumename': volumename})
+        else:
+            LOG.debug("Volume name: %(volumename)s  Volume instance: "
+                      "%(foundVolumeinstance)s."
+                      % {'volumename': volumename,
+                         'foundVolumeinstance': foundVolumeinstance})
+
+        return foundVolumeinstance
+
+    def _find_storage_sync_sv_sv(self, snapshot, volume,
+                                 waitforsync=True):
+        """Find the storage synchronized name
+
+        :param snapshot: snapshot object
+        :param volume: volume object
+        :returns: foundsyncname (String)
+        :returns: storage_system (String)
+        """
+        snapshotname = snapshot['name']
+        volumename = volume['name']
+        LOG.debug("Source: %(volumename)s  Target: %(snapshotname)s."
+                  % {'volumename': volumename, 'snapshotname': snapshotname})
+
+        snapshot_instance = self._find_lun(snapshot)
+        volume_instance = self._find_lun(volume)
+        storage_system = volume_instance['SystemName']
+        classname = 'SE_StorageSynchronized_SV_SV'
+        bindings = {'SyncedElement': snapshot_instance.path,
+                    'SystemElement': volume_instance.path}
+        foundsyncname = self.utils.get_instance_name(classname, bindings)
+
+        if foundsyncname is None:
+            LOG.debug(
+                "Source: %(volumename)s  Target: %(snapshotname)s. "
+                "Storage Synchronized not found. "
+                % {'volumename': volumename,
+                   'snapshotname': snapshotname})
+        else:
+            LOG.debug("Storage system: %(storage_system)s  "
+                      "Storage Synchronized instance: %(sync)s."
+                      % {'storage_system': storage_system,
+                         'sync': foundsyncname})
+            # Wait for SE_StorageSynchronized_SV_SV to be fully synced
+            if waitforsync:
+                self.utils.wait_for_sync(self.conn, foundsyncname)
+
+        return foundsyncname, storage_system
+
+    def _find_initiator_names(self, connector):
+        foundinitiatornames = []
+        iscsi = 'iscsi'
+        fc = 'fc'
+        name = 'initiator name'
+        if self.protocol.lower() == iscsi and connector['initiator']:
+            foundinitiatornames.append(connector['initiator'])
+        elif self.protocol.lower() == fc and connector['wwpns']:
+            for wwn in connector['wwpns']:
+                foundinitiatornames.append(wwn)
+            name = 'world wide port names'
+
+        if foundinitiatornames is None or len(foundinitiatornames) == 0:
+            msg = (_("Error finding %s.") % name)
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        LOG.debug("Found %(name)s: %(initiator)s."
+                  % {'name': name,
+                     'initiator': foundinitiatornames})
+        return foundinitiatornames
+
+    def find_device_number(self, volume, connector):
+        """Given the volume dict find a device number.
+
+        Find a device number  that a host can see
+        for a volume
+
+        :param volume: the volume dict
+        :param connector: the connector dict
+        :returns: data, the data dict
+
+        """
+        foundNumDeviceNumber = None
+        volumeName = volume['name']
+        volumeInstance = self._find_lun(volume)
+        storageSystemName = volumeInstance['SystemName']
+
+        unitnames = self.conn.ReferenceNames(
+            volumeInstance.path,
+            ResultClass='CIM_ProtocolControllerForUnit')
+
+        for unitname in unitnames:
+            controller = unitname['Antecedent']
+            classname = controller['CreationClassName']
+            index = classname.find('Symm_LunMaskingView')
+            if index > -1:
+                unitinstance = self.conn.GetInstance(unitname,
+                                                     LocalOnly=False)
+                numDeviceNumber = int(unitinstance['DeviceNumber'],
+                                      16)
+                foundNumDeviceNumber = numDeviceNumber
+                break
+
+        if foundNumDeviceNumber is None:
+            LOG.debug(
+                "Device number not found for volume "
+                "%(volumeName)s %(volumeInstance)s."
+                % {'volumeName': volumeName,
+                   'volumeInstance': volumeInstance.path})
+
+        data = {'hostlunid': foundNumDeviceNumber,
+                'storagesystem': storageSystemName}
+
+        LOG.debug("Device info: %(data)s." % {'data': data})
+
+        return data
+
+    def get_target_wwns(self, storageSystem, connector):
+        """Find target WWNs.
+
+        :param storageSystem: the storage system name
+        :param connector: the connector dict
+        :returns: targetWwns, the target WWN list
+        """
+        targetWwns = []
+
+        storageHardwareService = self.utils.find_storage_hardwareid_service(
+            self.conn, storageSystem)
+
+        hardwareIdInstances = self._find_storage_hardwareids(
+            connector, storageHardwareService)
+
+        LOG.debug(
+            "EMCGetTargetEndpoints: Service: %(service)s  "
+            "Storage HardwareIDs: %(hardwareIds)s."
+            % {'service': storageHardwareService,
+               'hardwareIds': hardwareIdInstances})
+
+        for hardwareIdInstance in hardwareIdInstances:
+            LOG.debug("HardwareID instance is  : %(hardwareIdInstance)s  "
+                      % {'hardwareIdInstance': hardwareIdInstance})
+            try:
+                rc, targetEndpoints = self.provision.get_target_endpoints(
+                    self.conn, storageHardwareService, hardwareIdInstance)
+            except Exception as ex:
+                LOG.error(_("Exception: %s") % six.text_type(ex))
+                errorMessage = (_(
+                    "Unable to get target endpoints for hardwareId "
+                    "%(hardwareIdInstance)s")
+                    % {'hardwareIdInstance': hardwareIdInstance})
+                LOG.error(errorMessage)
+                raise exception.VolumeBackendAPIException(data=errorMessage)
+
+            if targetEndpoints:
+                endpoints = targetEndpoints['TargetEndpoints']
+
+                LOG.debug("There are  %(len)lu endpoints "
+                          % {'len': len(endpoints)})
+                for targetendpoint in endpoints:
+                    wwn = targetendpoint['Name']
+                    # Add target wwn to the list if it is not already there
+                    if not any(d == wwn for d in targetWwns):
+                        targetWwns.append(wwn)
+            else:
+                LOG.error(_(
+                    "Target end points do not exist for hardware Id : "
+                    "%(hardwareIdInstance)s ")
+                    % {'hardwareIdInstance': hardwareIdInstance})
+
+        LOG.debug("Target WWNs: : %(targetWwns)s  "
+                  % {'targetWwns': targetWwns})
+
+        return targetWwns
+
+    def _find_storage_hardwareids(
+            self, connector, hardwareIdManagementService):
+        """Find the storage hardware ID instances.
+
+        :param connector: the connector dict
+        :param hardwareIdManagementService: the storage Hardware
+                                            management service
+        :returns: foundInstances, the list of storage hardware ID instances
+        """
+        foundInstances = []
+        wwpns = self._find_initiator_names(connector)
+
+        hardwareIdInstanceNames = (
+            self.utils.get_hardware_id_instance_names_from_array(
+                self.conn, hardwareIdManagementService))
+        for hardwareIdInstanceName in hardwareIdInstanceNames:
+            hardwareIdInstance = self.conn.GetInstance(hardwareIdInstanceName)
+            storageId = hardwareIdInstance['StorageID']
+            for wwpn in wwpns:
+                if wwpn.lower() == storageId.lower():
+                    foundInstances.append(hardwareIdInstance.path)
+                    break
+
+        LOG.debug("Storage Hardware IDs for %(wwpns)s is "
+                  "%(foundInstances)s."
+                  % {'wwpns': wwpns,
+                     'foundInstances': foundInstances})
+
+        return foundInstances
+
+    def _register_config_file_from_config_group(self, configGroupName):
+        """Given the config group name register the file.
+
+        :param configGroupName: the config group name
+        :returns: string configurationFile
+        """
+        if configGroupName is None:
+            self._set_ecom_credentials(CINDER_EMC_CONFIG_FILE)
+            return CINDER_EMC_CONFIG_FILE
+        if hasattr(self.configuration, 'cinder_emc_config_file'):
+            configurationFile = self.configuration.cinder_emc_config_file
+        else:
+            configurationFile = (
+                CINDER_EMC_CONFIG_FILE_PREFIX + configGroupName +
+                CINDER_EMC_CONFIG_FILE_POSTFIX)
+
+        # The file saved in self.configuration may not be the correct one,
+        # double check
+        if configGroupName not in configurationFile:
+            configurationFile = (
+                CINDER_EMC_CONFIG_FILE_PREFIX + configGroupName +
+                CINDER_EMC_CONFIG_FILE_POSTFIX)
+
+        self._set_ecom_credentials(configurationFile)
+        return configurationFile
+
+    def _set_ecom_credentials(self, configurationFile):
+        """Given the configuration file set the ecom credentials.
+
+        :param configurationFile: name of the file (String)
+        :raises: VolumeBackendAPIException
+        """
+        if os.path.isfile(configurationFile):
+            LOG.debug("Configuration file : %(configurationFile)s exists"
+                      % {'configurationFile': configurationFile})
+        else:
+            exceptionMessage = (_(
+                "Configuration file %(configurationFile)s does not exist ")
+                % {'configurationFile': configurationFile})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        ip, port = self.utils.get_ecom_server(configurationFile)
+        self.user, self.passwd = self.utils.get_ecom_cred(configurationFile)
+        self.url = 'http://' + ip + ':' + port
+        self.conn = self._get_ecom_connection()
+
+    def _initial_setup(self, volume):
+        """Necessary setup to accummulate the relevant information.
+
+        The volume object has a host in which we can parse the
+        config group name. The config group name is the key to our EMC
+        configuration file. The emc configuration file contains pool name
+        and array name which are mandatory fields.
+        FastPolicy is optional.
+        StripedMetaCount is an extra spec that determines whether
+        the composite volume should be concatenated or striped.
+
+        :param volume: the volume Object
+        :returns: tuple extra spec tuple
+        :returns: string the configuration file
+        """
+        try:
+            extraSpecs, configurationFile = (
+                self._set_config_file_and_get_extra_specs(volume))
+            poolName = None
+
+            try:
+                stripedMetaCount = extraSpecs[STRIPECOUNT]
+                extraSpecs[MEMBERCOUNT] = stripedMetaCount
+                extraSpecs[COMPOSITETYPE] = STRIPED
+
+                LOG.debug(
+                    "There are: %(stripedMetaCount)s striped metas in "
+                    "the extra specs"
+                    % {'stripedMetaCount': stripedMetaCount})
+            except Exception:
+                memberCount = '1'
+                extraSpecs[MEMBERCOUNT] = memberCount
+                extraSpecs[COMPOSITETYPE] = CONCATENATED
+                LOG.debug("StripedMetaCount is not in the extra specs")
+                pass
+
+            poolName = self.utils.parse_pool_name_from_file(configurationFile)
+            if poolName is None:
+                exceptionMessage = (_(
+                    "The pool cannot be null. The pool must be configured "
+                    "either in the extra specs or in the EMC configuration "
+                    "file corresponding to the Volume Type. "))
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+            arrayName = self.utils.parse_array_name_from_file(
+                configurationFile)
+            if arrayName is None:
+                exceptionMessage = (_(
+                    "The array cannot be null. The pool must be configured "
+                    "either as a cinder extra spec for multi-backend or in "
+                    "the EMC configuration file for the default case "))
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+            # Get the FAST policy from the file this value can be None if the
+            # user doesnt want to associate with any FAST policy
+            fastPolicyName = self.utils.parse_fast_policy_name_from_file(
+                configurationFile)
+            if fastPolicyName is not None:
+                LOG.debug("The fast policy name is : %(fastPolicyName)s. "
+                          % {'fastPolicyName': fastPolicyName})
+
+            extraSpecs[POOL] = poolName
+            extraSpecs[ARRAY] = arrayName
+            extraSpecs[FASTPOLICY] = fastPolicyName
+
+            LOG.debug("Pool is: %(pool)s "
+                      "Array is: %(array)s "
+                      "FastPolicy is: %(fastPolicy)s "
+                      "CompositeType is: %(compositeType)s "
+                      "MemberCount is: %(memberCount)s "
+                      % {'pool': extraSpecs[POOL],
+                         'array': extraSpecs[ARRAY],
+                         'fastPolicy': extraSpecs[FASTPOLICY],
+                         'compositeType': extraSpecs[COMPOSITETYPE],
+                         'memberCount': extraSpecs[MEMBERCOUNT]})
+
+        except Exception:
+            exceptionMessage = (_(
+                "Unable to get configuration information necessary to create "
+                "a volume. Please check that there is a configuration file "
+                "for each config group, if multi-backend is enabled. "
+                "The should be in the following format "
+                "/etc/cinder/cinder_emc_config_<CONFIG_GROUP>.xml"))
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return extraSpecs
+
+    def _get_pool_and_storage_system(self, extraSpecs):
+        """Given the extra specs get the pool and storage system name.
+
+        :params extraSpecs: the extra spec tuple
+        :returns: poolInstanceName The pool instance name
+        :returns: String  the storage system name
+        """
+
+        try:
+            array = extraSpecs[ARRAY]
+            poolInstanceName, storageSystemStr = self._find_pool_in_array(
+                array, extraSpecs[POOL])
+        except Exception:
+            exceptionMessage = (_(
+                "You must supply an array in your EMC configuration file "))
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        if poolInstanceName is None or storageSystemStr is None:
+            exceptionMessage = (_(
+                "Cannot get necessary pool or storage system information "))
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return poolInstanceName, storageSystemStr
+
+    def _populate_masking_dict(self, volume, connector, extraSpecs):
+        """Get all the names of the maskingView and subComponents.
+
+        :param volume: the volume object
+        :param connector: the connector object
+        :param extraSpecs: the extra spec tuple
+        :returns: tuple maskingViewDict a tuple with masking view information
+        """
+        maskingViewDict = {}
+        hostName = connector['host']
+        poolName = extraSpecs[POOL]
+        volumeName = volume['name']
+        protocol = self.utils.get_short_protocol_type(self.protocol)
+
+        shortHostName = self.utils.get_host_short_name(hostName)
+
+        volumeInstance = self._find_lun(volume)
+        storageSystemName = volumeInstance['SystemName']
+
+        maskingViewDict['controllerConfigService'] = (
+            self.utils.find_controller_configuration_service(
+                self.conn, storageSystemName))
+        maskingViewDict['sgGroupName'] = (
+            'OS-' + shortHostName + '-' + poolName + '-' + protocol + '-SG')
+        maskingViewDict['maskingViewName'] = (
+            'OS-' + shortHostName + '-' + poolName + '-' + protocol + '-MV')
+        # The portGroup is gotten from emc xml config file
+        maskingViewDict['pgGroupName'] = (
+            self.utils.parse_file_to_get_port_group_name(
+                self.configuration.cinder_emc_config_file))
+
+        maskingViewDict['igGroupName'] = (
+            'OS-' + shortHostName + '-' + protocol + '-IG')
+        maskingViewDict['connector'] = connector
+        maskingViewDict['volumeInstance'] = volumeInstance
+        maskingViewDict['volumeName'] = volumeName
+        maskingViewDict['fastPolicy'] = (
+            self.utils.parse_fast_policy_name_from_file(
+                self.configuration.cinder_emc_config_file))
+        maskingViewDict['storageSystemName'] = storageSystemName
+
+        return maskingViewDict
+
+    def _add_volume_to_default_storage_group_on_create(
+            self, volumeDict, volumeName, storageConfigService,
+            storageSystemName, fastPolicyName):
+        """Add the volume to the default storage group for that policy.
+
+        On a create when fast policy is enable add the volume to the default
+        storage group for that policy. If it fails do the necessary rollback
+
+        :param volumeDict: the volume dictionary
+        :param volumeName: the volume name (String)
+        :param storageConfigService: the storage configuration service
+        :param storageSystemName: the storage system name (String)
+        :param fastPolicyName: the fast policy name (String)
+        :returns: tuple maskingViewDict with masking view information
+        """
+        try:
+            volumeInstance = self.utils.find_volume_instance(
+                self.conn, volumeDict, volumeName)
+            controllerConfigurationService = (
+                self.utils.find_controller_configuration_service(
+                    self.conn, storageSystemName))
+
+            self.fast.add_volume_to_default_storage_group_for_fast_policy(
+                self.conn, controllerConfigurationService, volumeInstance,
+                volumeName, fastPolicyName)
+            foundStorageGroupInstanceName = (
+                self.utils.get_storage_group_from_volume(
+                    self.conn, volumeInstance.path))
+
+            if foundStorageGroupInstanceName is None:
+                exceptionMessage = (_(
+                    "Error adding Volume: %(volumeName)s.  "
+                    "with instance path: %(volumeInstancePath)s. ")
+                    % {'volumeName': volumeName,
+                       'volumeInstancePath': volumeInstance.path})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        except Exception as e:
+            # rollback by deleting the volume if adding the volume to the
+            # default storage group were to fail
+            LOG.error(_("Exception: %s") % six.text_type(e))
+            errorMessage = (_(
+                "Rolling back %(volumeName)s by deleting it. ")
+                % {'volumeName': volumeName})
+            LOG.error(errorMessage)
+            self.provision.delete_volume_from_pool(
+                self.conn, storageConfigService, volumeInstance.path,
+                volumeName)
+            raise exception.VolumeBackendAPIException(data=errorMessage)
+
+    def _create_and_get_unbound_volume(
+            self, conn, storageConfigService, compositeVolumeInstanceName,
+            additionalSize):
+        """Create an unbound volume.
+
+        Create an unbound volume so it is in the correct state to add to a
+        composite volume
+
+        :param conn: the connection information to the ecom server
+        :param storageConfigService: thestorage config service instance name
+        :param compositeVolumeInstanceName: the composite volume instance name
+        :param additionalSize: the size you want to increase the volume by
+        :returns: volume instance modifiedCompositeVolumeInstance
+        """
+        assocPoolInstanceName = self.utils.get_assoc_pool_from_volume(
+            conn, compositeVolumeInstanceName)
+        appendVolumeInstance = self._create_and_get_volume_instance(
+            conn, storageConfigService, assocPoolInstanceName, 'appendVolume',
+            additionalSize)
+        isVolumeBound = self.utils.is_volume_bound_to_pool(
+            conn, appendVolumeInstance)
+
+        if 'True' in isVolumeBound:
+            appendVolumeInstance = (
+                self._unbind_and_get_volume_from_storage_pool(
+                    conn, storageConfigService, assocPoolInstanceName,
+                    appendVolumeInstance.path, 'appendVolume'))
+
+        return appendVolumeInstance
+
+    def _create_and_get_volume_instance(
+            self, conn, storageConfigService, poolInstanceName,
+            volumeName, volumeSize):
+        """Create and get a new volume.
+
+        :params conn: the connection information to the ecom server
+        :params storageConfigService: the storage config service instance name
+        :params poolInstanceName: the pool instance name
+        :params volumeName: the volume name
+        :params volumeSize: the size to create the volume
+        :returns: volumeInstance the volume instance
+        """
+        volumeDict, rc = self.provision.create_volume_from_pool(
+            self.conn, storageConfigService, volumeName, poolInstanceName,
+            volumeSize)
+        volumeInstance = self.utils.find_volume_instance(
+            self.conn, volumeDict, volumeName)
+        return volumeInstance
+
+    def _unbind_and_get_volume_from_storage_pool(
+            self, conn, storageConfigService, poolInstanceName,
+            volumeInstanceName, volumeName):
+        """Unbind a volume from a pool and return the unbound volume.
+
+        :param conn: the connection information to the ecom server
+        :param storageConfigService: the storage config service instance name
+        :param poolInstanceName: the pool instance name
+        :param volumeInstanceName: the volume instance name
+        :param volumeName: string the volumeName
+        :returns: unboundVolumeInstance the unbound volume instance
+        """
+
+        rc, job = self.provision.unbind_volume_from_storage_pool(
+            conn, storageConfigService, poolInstanceName, volumeInstanceName,
+            volumeName)
+        volumeDict = self.provision.get_volume_dict_from_job(conn, job['Job'])
+        volumeInstance = self.utils.find_volume_instance(
+            self.conn, volumeDict, volumeName)
+        return volumeInstance
+
+    def _modify_and_get_composite_volume_instance(
+            self, conn, elementCompositionServiceInstanceName, volumeInstance,
+            appendVolumeInstanceName, volumeName, compositeType):
+        """Given an existing composite volume add a new composite volume to it.
+
+        :param conn: the connection information to the ecom server
+        :param elementCompositionServiceInstanceName: the storage element
+                                                      composition service
+                                                      instance name
+        :param volumeInstanceName: the volume instance name
+        :param appendVolumeInstanceName: the appended volume instance name
+        :param volumeName: the volume name
+        :param compositeType: concatenated
+        :returns: int rc the return code
+        :returns: modifiedVolumeDict the modified volume Dict
+        """
+        isComposite = self.utils.check_if_volume_is_composite(
+            self.conn, volumeInstance)
+        if 'True' in isComposite:
+            rc, job = self.provision.modify_composite_volume(
+                conn, elementCompositionServiceInstanceName,
+                volumeInstance.path, appendVolumeInstanceName)
+        elif 'False' in isComposite:
+            rc, job = self.provision.create_new_composite_volume(
+                conn, elementCompositionServiceInstanceName,
+                volumeInstance.path, appendVolumeInstanceName, compositeType)
+        else:
+            exception_message = (_(
+                "Unable to determine whether %(volumeName)s is "
+                "composite or not ")
+                % {'volumeName': volumeName})
+            LOG.error(exception_message)
+            raise
+
+        modifiedVolumeDict = self.provision.get_volume_dict_from_job(
+            conn, job['Job'])
+
+        return rc, modifiedVolumeDict
+
+    def _get_or_create_default_storage_group(
+            self, conn, storageSystemName, volumeDict, volumeName,
+            fastPolicyName):
+        """Get or create a default storage group for a fast policy.
+
+        :param conn: the connection information to the ecom server
+        :param storageSystemName: the storage system name
+        :param volumeDict: the volume dictionary
+        :param volumeName: the volume name
+        :param fastPolicyName: the fast policy name
+        :returns: defaultStorageGroupInstanceName
+        """
+        controllerConfigService = (
+            self.utils.find_controller_configuration_service(
+                self.conn, storageSystemName))
+
+        volumeInstance = self.utils.find_volume_instance(
+            self.conn, volumeDict, volumeName)
+        defaultStorageGroupInstanceName = (
+            self.fast.get_or_create_default_storage_group(
+                self.conn, controllerConfigService, fastPolicyName,
+                volumeInstance))
+        return defaultStorageGroupInstanceName
+
+    def _create_cloned_volume(
+            self, cloneVolume, sourceVolume, isSnapshot=False):
+        """Create a clone volume from the source volume.
+
+        :param cloneVolume: clone volume
+        :param sourceVolume: source of the clone volume
+        :returns: cloneDict the cloned volume dictionary
+        """
+        extraSpecs = self._initial_setup(cloneVolume)
+
+        sourceName = sourceVolume['name']
+        cloneName = cloneVolume['name']
+
+        LOG.info(_("Create a Clone from Volume: Clone Volume: %(cloneName)s  "
+                   "Source Volume: %(sourceName)s")
+                 % {'cloneName': cloneName,
+                    'sourceName': sourceName})
+
+        self.conn = self._get_ecom_connection()
+
+        sourceInstance = self._find_lun(sourceVolume)
+        storageSystem = sourceInstance['SystemName']
+
+        LOG.debug("Create Cloned Volume: Volume: %(cloneName)s  "
+                  "Source Volume: %(sourceName)s  Source Instance: "
+                  "%(sourceInstance)s  Storage System: %(storageSystem)s."
+                  % {'cloneName': cloneName,
+                     'sourceName': sourceName,
+                     'sourceInstance': sourceInstance.path,
+                     'storageSystem': storageSystem})
+
+        repServiceInstanceName = self.utils.find_replication_service(
+            self.conn, storageSystem)
+
+        LOG.debug("Create Cloned Volume: Volume: %(cloneName)s  "
+                  "Source Volume: %(sourceName)s  "
+                  "Method: CreateElementReplica  "
+                  "ReplicationService: %(service)s  ElementName: "
+                  "%(elementname)s  SyncType: 8  SourceElement: "
+                  "%(sourceelement)s"
+                  % {'cloneName': cloneName,
+                     'sourceName': sourceName,
+                     'service': repServiceInstanceName,
+                     'elementname': cloneName,
+                     'sourceelement': sourceInstance.path})
+
+        # Create a Clone from source volume
+        rc, job = self.provision.create_element_replica(
+            self.conn, repServiceInstanceName, cloneName, sourceName,
+            sourceInstance)
+
+        cloneDict = self.provision.get_volume_dict_from_job(
+            self.conn, job['Job'])
+
+        cloneVolume['provider_location'] = six.text_type(cloneDict)
+        syncInstanceName, storageSystemName = (
+            self._find_storage_sync_sv_sv(cloneVolume, sourceVolume))
+
+        # Remove the Clone relationship so it can be used as a regular lun
+        # 8 - Detach operation
+        rc, job = self.provision.delete_clone_relationship(
+            self.conn, repServiceInstanceName, syncInstanceName, cloneName,
+            sourceName)
+
+        # if FAST enabled place clone volume or volume from snapshot to
+        # default storage group
+        if extraSpecs[FASTPOLICY] is not None:
+            LOG.debug("Adding volume: %(cloneName)s to default storage group "
+                      "for FAST policy: %(fastPolicyName)s "
+                      % {'cloneName': cloneName,
+                         'fastPolicyName': extraSpecs[FASTPOLICY]})
+
+            storageConfigService = (
+                self.utils.find_storage_configuration_service(
+                    self.conn, storageSystemName))
+
+            defaultStorageGroupInstanceName = (
+                self._get_or_create_default_storage_group(
+                    self.conn, storageSystemName, cloneDict, cloneName,
+                    extraSpecs[FASTPOLICY]))
+            if defaultStorageGroupInstanceName is None:
+                exceptionMessage = (_(
+                    "Unable to create or get default storage group for FAST "
+                    "policy: %(fastPolicyName)s. ")
+                    % {'fastPolicyName': extraSpecs[FASTPOLICY]})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+            self._add_volume_to_default_storage_group_on_create(
+                cloneDict, cloneName, storageConfigService, storageSystemName,
+                extraSpecs[FASTPOLICY])
+
+        LOG.debug("Leaving _create_cloned_volume: Volume: "
+                  "%(cloneName)s Source Volume: %(sourceName)s  "
+                  "Return code: %(rc)lu."
+                  % {'cloneName': cloneName,
+                     'sourceName': sourceName,
+                     'rc': rc})
+
+        return cloneDict
+
+    def _delete_volume(self, volume):
+        """Helper function to delete the specified volume.
+
+        :param volume: volume object to be deleted
+        :returns: cloneDict the cloned volume dictionary
+        """
+
+        volumeName = volume['name']
+        rc = -1
+        errorRet = (rc, volumeName)
+
+        extraSpecs = self._initial_setup(volume)
+        self.conn = self._get_ecom_connection()
+
+        volumeInstance = self._find_lun(volume)
+        if volumeInstance is None:
+            LOG.error(_("Volume %(name)s not found on the array. "
+                        "No volume to delete.")
+                      % {'name': volumeName})
+            return errorRet
+
+        storageSystemName = volumeInstance['SystemName']
+
+        storageConfigservice = self.utils.find_storage_configuration_service(
+            self.conn, storageSystemName)
+        controllerConfigurationService = (
+            self.utils.find_controller_configuration_service(
+                self.conn, storageSystemName))
+
+        deviceId = volumeInstance['DeviceID']
+
+        fastPolicyName = extraSpecs[FASTPOLICY]
+        if fastPolicyName is not None:
+            defaultStorageGroupInstanceName = (
+                self.masking.remove_device_from_default_storage_group(
+                    self.conn, controllerConfigurationService,
+                    volumeInstance.path, volumeName, fastPolicyName))
+            if defaultStorageGroupInstanceName is None:
+                warnMessage = (_(
+                    "The volume: %(volumename)s. was not first part of the "
+                    "default storage group for FAST policy %(fastPolicyName)s"
+                    ".")
+                    % {'volumename': volumeName,
+                       'fastPolicyName': fastPolicyName})
+                LOG.warn(warnMessage)
+                # check if it is part of another storage group
+                self._pre_check_for_deletion(controllerConfigurationService,
+                                             volumeInstance.path, volumeName)
+
+        else:
+            # check if volume is part of a storage group
+            self._pre_check_for_deletion(controllerConfigurationService,
+                                         volumeInstance.path, volumeName)
+
+        LOG.debug("Delete Volume: %(name)s  Method: EMCReturnToStoragePool "
+                  "ConfigServic: %(service)s  TheElement: %(vol_instance)s "
+                  "DeviceId: %(deviceId)s "
+                  % {'service': storageConfigservice,
+                     'name': volumeName,
+                     'vol_instance': volumeInstance.path,
+                     'deviceId': deviceId})
+        try:
+            rc = self.provision.delete_volume_from_pool(
+                self.conn, storageConfigservice, volumeInstance.path,
+                volumeName)
+
+        except Exception as e:
+            # if we cannot successfully delete the volume then we want to
+            # return the volume to the default storage group
+            if (fastPolicyName is not None and
+                    defaultStorageGroupInstanceName is not None and
+                    storageSystemName is not None):
+                assocDefaultStorageGroupName = (
+                    self.fast
+                    .add_volume_to_default_storage_group_for_fast_policy(
+                        self.conn, controllerConfigurationService,
+                        volumeInstance, volumeName, fastPolicyName))
+                if assocDefaultStorageGroupName is None:
+                    errorMsg = (_(
+                        "Failed to Roll back to re-add volume %(volumeName)s "
+                        "to default storage group for fast policy "
+                        "%(fastPolicyName)s: Please contact your sysadmin to "
+                        "get the volume returned to the default storage group")
+                        % {'volumeName': volumeName,
+                           'fastPolicyName': fastPolicyName})
+                    LOG.error(errorMsg)
+
+            LOG.error(_("Exception: %s") % six.text_type(e))
+            errorMessage = (_("Failed to delete volume %(volumeName)s")
+                            % {'volumeName': volumeName})
+            LOG.error(errorMessage)
+            raise exception.VolumeBackendAPIException(data=errorMessage)
+
+        return (rc, volumeName)
+
+    def _pre_check_for_deletion(self, controllerConfigurationService,
+                                volumeInstanceName, volumeName):
+        """Check is volume is part of a storage group prior to delete
+
+        Log a warning if volume is part of storage group
+
+        :param controllerConfigurationService: controller configuration service
+        :param volumeInstanceName: volume instance name
+        :param volumeName: volume name (string)
+        """
+
+        storageGroupInstanceName = (
+            self.masking.get_associated_masking_group_from_device(
+                self.conn, volumeInstanceName))
+        if storageGroupInstanceName is not None:
+            LOG.warn(_("Pre check for deletion "
+                       "Volume: %(volumeName)s is part of a storage group "
+                       "Attempting removal from %(storageGroupInstanceName)s ")
+                     % {'volumeName': volumeName,
+                        'storageGroupInstanceName': storageGroupInstanceName})
+            self.provision.remove_device_from_storage_group(
+                self.conn, controllerConfigurationService,
+                storageGroupInstanceName,
+                volumeInstanceName, volumeName)
+
+    def _find_lunmasking_scsi_protocol_controller(self, storageSystemName,
+                                                  connector):
+        """Find LunMaskingSCSIProtocolController for the local host
+
+        Find out how many volumes are mapped to a host
+        associated to the LunMaskingSCSIProtocolController
+
+        :param connector: volume object to be deleted
+        :param storageSystemName: the storage system name
+        :returns: foundCtrl
+        """
+
+        foundCtrl = None
+        initiators = self._find_initiator_names(connector)
+        controllers = self.conn.EnumerateInstanceNames(
+            'EMC_LunMaskingSCSIProtocolController')
+        for ctrl in controllers:
+            if storageSystemName != ctrl['SystemName']:
+                continue
+            associators = self.conn.Associators(
+                ctrl, ResultClass='EMC_StorageHardwareID')
+            for assoc in associators:
+                # if EMC_StorageHardwareID matches the initiator,
+                # we found the existing EMC_LunMaskingSCSIProtocolController
+                # (Storage Group for VNX)
+                # we can use for masking a new LUN
+                hardwareid = assoc['StorageID']
+                for initiator in initiators:
+                    if hardwareid.lower() == initiator.lower():
+                        foundCtrl = ctrl
+                        break
+
+                if foundCtrl is not None:
+                    break
+
+            if foundCtrl is not None:
+                break
+
+        LOG.debug("LunMaskingSCSIProtocolController for storage system "
+                  "%(storage_system)s and initiator %(initiator)s is  "
+                  "%(ctrl)s."
+                  % {'storage_system': storageSystemName,
+                     'initiator': initiators,
+                     'ctrl': foundCtrl})
+        return foundCtrl
+
+    def get_num_volumes_mapped(self, volume, connector):
+        """Returns how many volumes are in the same zone as the connector.
+
+        Find out how many volumes are mapped to a host
+        associated to the LunMaskingSCSIProtocolController
+
+        :param volume: volume object to be deleted
+        :param connector: volume object to be deleted
+        :returns: int numVolumesMapped
+        """
+
+        volumename = volume['name']
+        vol_instance = self._find_lun(volume)
+        if vol_instance is None:
+            msg = ("Volume %(name)s not found on the array. "
+                   "Cannot determine if there are volumes mapped."
+                   % {'name': volumename})
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        storage_system = vol_instance['SystemName']
+
+        ctrl = self._find_lunmasking_scsi_protocol_controller(
+            storage_system,
+            connector)
+
+        LOG.debug("LunMaskingSCSIProtocolController for storage system "
+                  "%(storage)s and %(connector)s is %(ctrl)s."
+                  % {'storage': storage_system,
+                     'connector': connector,
+                     'ctrl': ctrl})
+
+        # return 0 if masking view does not exist
+        if ctrl is None:
+            return 0
+
+        associators = self.conn.Associators(
+            ctrl,
+            ResultClass='EMC_StorageVolume')
+
+        numVolumesMapped = len(associators)
+
+        LOG.debug("Found %(numVolumesMapped)d volumes on storage system "
+                  "%(storage)s mapped to %(connector)s."
+                  % {'numVolumesMapped': numVolumesMapped,
+                     'storage': storage_system,
+                     'connector': connector})
+
+        return numVolumesMapped
diff --git a/cinder/volume/drivers/emc/emc_vmax_fast.py b/cinder/volume/drivers/emc/emc_vmax_fast.py
new file mode 100644 (file)
index 0000000..9e3cbc5
--- /dev/null
@@ -0,0 +1,767 @@
+# Copyright (c) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.emc import emc_vmax_provision
+from cinder.volume.drivers.emc import emc_vmax_utils
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_SG_PREFIX = 'OS_default_'
+DEFAULT_SG_POSTFIX = '_SG'
+
+
+class EMCVMAXFast(object):
+    """FAST Class for SMI-S based EMC volume drivers.
+
+    This FAST class is for EMC volume drivers based on SMI-S.
+    It supports VMAX arrays.
+    """
+    def __init__(self, prtcl):
+        self.protocol = prtcl
+        self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
+        self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl)
+
+    def _check_if_fast_supported(self, conn, storageSystemInstanceName):
+        """Check to see if fast is supported on the array.
+
+        :param conn: the connection to the ecom server
+        :param storageSystemInstanceName: the storage system Instance name
+        """
+
+        tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
+            conn, storageSystemInstanceName)
+        isTieringPolicySupported = self.is_tiering_policy_enabled(
+            conn, tierPolicyServiceInstanceName)
+        if isTieringPolicySupported is None:
+            errorMessage = (_("Cannot determine whether "
+                              "Tiering Policy is support on this array."))
+            LOG.error(errorMessage)
+
+        if isTieringPolicySupported is False:
+            errorMessage = (_("Tiering Policy is not "
+                              "supported on this array."))
+            LOG.error(errorMessage)
+        return isTieringPolicySupported
+
+    def is_tiering_policy_enabled(self, conn, tierPolicyServiceInstanceName):
+        """Checks to see if tiering policy is supported.
+
+        We will only check if there is a fast policy specified in
+        the config file.
+
+        :param conn: the connection information to the ecom server
+        :param tierPolicyServiceInstanceName: the tier policy service
+                                              instance name
+        :returns: foundIsSupportsTieringPolicies - True/False
+        """
+        foundIsSupportsTieringPolicies = None
+        tierPolicyCapabilityInstanceNames = conn.AssociatorNames(
+            tierPolicyServiceInstanceName,
+            ResultClass='CIM_TierPolicyServiceCapabilities',
+            AssocClass='CIM_ElementCapabilities')
+
+        tierPolicyCapabilityInstanceName = tierPolicyCapabilityInstanceNames[0]
+        tierPolicyCapabilityInstance = conn.GetInstance(
+            tierPolicyCapabilityInstanceName, LocalOnly=False)
+        propertiesList = (tierPolicyCapabilityInstance
+                          .properties.items())  # ['SupportsTieringPolicies']
+        for properties in propertiesList:
+            if properties[0] == 'SupportsTieringPolicies':
+                cimProperties = properties[1]
+                foundIsSupportsTieringPolicies = cimProperties.value
+                break
+
+        if foundIsSupportsTieringPolicies is None:
+            exception_message = (_("Cannot determine if Tiering Policies "
+                                   "are supported"))
+            LOG.error(exception_message)
+
+        return foundIsSupportsTieringPolicies
+
+    def get_and_verify_default_storage_group(
+            self, conn, controllerConfigService, volumeInstanceName,
+            volumeName, fastPolicyName):
+        """Retrieves and verifies the default storage group for a volume.
+
+        Given the volumeInstanceName get any associated storage group and
+        check that it is the default storage group. The default storage group
+        should have been already created. If not found error is logged.
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller config service
+        :param volumeInstanceName: the volume instance name
+        :param volumeName: the volume name (String)
+        :param fastPolicyName: the fast policy name (String)
+        :returns: foundDefaultStorageGroupInstanceName
+        """
+        foundDefaultStorageGroupInstanceName = None
+        storageSystemInstanceName = self.utils.find_storage_system(
+            conn, controllerConfigService)
+
+        if not self._check_if_fast_supported(conn, storageSystemInstanceName):
+            exceptionMessage = (_(
+                "FAST is not supported on this array "))
+            LOG.error(exceptionMessage)
+            raise
+
+        assocStorageGroupInstanceName = (
+            self.utils.get_storage_group_from_volume(conn, volumeInstanceName))
+        defaultSgGroupName = (DEFAULT_SG_PREFIX + fastPolicyName +
+                              DEFAULT_SG_POSTFIX)
+        defaultStorageGroupInstanceName = (
+            self.utils.find_storage_masking_group(conn,
+                                                  controllerConfigService,
+                                                  defaultSgGroupName))
+        if defaultStorageGroupInstanceName is None:
+            exceptionMessage = (_(
+                "Unable to find default storage group "
+                "for FAST policy : %(fastPolicyName)s ")
+                % {'fastPolicyName': fastPolicyName})
+            LOG.error(exceptionMessage)
+            raise
+
+        if assocStorageGroupInstanceName == defaultStorageGroupInstanceName:
+            foundDefaultStorageGroupInstanceName = (
+                assocStorageGroupInstanceName)
+        else:
+            exceptionMessage = (_(
+                "Volume: %(volumeName)s Does not belong "
+                "to storage storage group %(defaultSgGroupName)s. ")
+                % {'volumeName': volumeName,
+                   'defaultSgGroupName': defaultSgGroupName})
+            LOG.warn(exceptionMessage)
+        return foundDefaultStorageGroupInstanceName
+
+    def add_volume_to_default_storage_group_for_fast_policy(
+            self, conn, controllerConfigService, volumeInstance,
+            volumeName, fastPolicyName):
+        """Add a volume to the default storage group for FAST policy.
+
+        The storage group must pre-exist.  Once added to the storage group,
+        check the association to make sure it has been successfully added.
+
+        :param conn: the ecom connection
+        :param controllerConfigService: the controller configuration service
+        :param volumeInstance: the volume instance
+        :param volumeName: the volume name (String)
+        :param fastPolicyName: the fast policy name (String)
+        :returns: assocStorageGroupInstanceName - the storage group
+                                                  associated with the volume
+        """
+        failedRet = None
+        defaultSgGroupName = (DEFAULT_SG_PREFIX + fastPolicyName +
+                              DEFAULT_SG_POSTFIX)
+        storageGroupInstanceName = self.utils.find_storage_masking_group(
+            conn, controllerConfigService, defaultSgGroupName)
+        if storageGroupInstanceName is None:
+            exceptionMessage = (_(
+                "Unable to create default storage group for"
+                " FAST policy : %(fastPolicyName)s ")
+                % {'fastPolicyName': fastPolicyName})
+            LOG.error(exceptionMessage)
+            return failedRet
+
+        self.provision.add_members_to_masking_group(
+            conn, controllerConfigService, storageGroupInstanceName,
+            volumeInstance.path, volumeName)
+        # check to see if the volume is in the storage group
+        assocStorageGroupInstanceName = (
+            self.utils.get_storage_group_from_volume(conn,
+                                                     volumeInstance.path))
+        return assocStorageGroupInstanceName
+
+    def _create_default_storage_group(self, conn, controllerConfigService,
+                                      fastPolicyName, storageGroupName,
+                                      volumeInstance):
+        """Create a first volume for the storage group.
+
+        This is necessary because you cannot remove a volume if it is the
+        last in the group. Create the default storage group for the FAST policy
+        Associate the storage group with the tier policy rule.
+
+        :param conn: the connection information to the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param fastPolicyName: the fast policy name (String)
+        :param storageGroupName: the storage group name (String)
+        :param volumeInstance: the volume instance
+        :returns: defaultstorageGroupInstanceName - instance name of the
+                                                    default storage group
+        """
+        failedRet = None
+        firstVolumeInstance = self._create_volume_for_default_volume_group(
+            conn, controllerConfigService, volumeInstance.path)
+        if firstVolumeInstance is None:
+            exceptionMessage = (_(
+                "Failed to create a first volume for storage"
+                " group : %(storageGroupName)s ")
+                % {'storageGroupName': storageGroupName})
+            LOG.error(exceptionMessage)
+            return failedRet
+
+        defaultStorageGroupInstanceName = (
+            self.provision.create_and_get_storage_group(
+                conn, controllerConfigService, storageGroupName,
+                firstVolumeInstance.path))
+        if defaultStorageGroupInstanceName is None:
+            exceptionMessage = (_(
+                "Failed to create default storage group for "
+                "FAST policy : %(fastPolicyName)s ")
+                % {'fastPolicyName': fastPolicyName})
+            LOG.error(exceptionMessage)
+            return failedRet
+
+        storageSystemInstanceName = (
+            self.utils.find_storage_system(conn, controllerConfigService))
+        tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
+            conn, storageSystemInstanceName)
+
+        # get the fast policy instance name
+        tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
+            conn, tierPolicyServiceInstanceName, fastPolicyName)
+        if tierPolicyRuleInstanceName is None:
+            exceptionMessage = (_(
+                "Unable to get policy rule for fast policy: "
+                "%(fastPolicyName)s ")
+                % {'fastPolicyName': fastPolicyName})
+            LOG.error(exceptionMessage)
+            return failedRet
+
+        # now associate it with a FAST policy
+        self.add_storage_group_to_tier_policy_rule(
+            conn, tierPolicyServiceInstanceName,
+            defaultStorageGroupInstanceName, tierPolicyRuleInstanceName,
+            storageGroupName, fastPolicyName)
+
+        return defaultStorageGroupInstanceName
+
+    def _create_volume_for_default_volume_group(
+            self, conn, controllerConfigService, volumeInstanceName):
+        """Creates a volume for the default storage group for a fast policy.
+
+        Creates a small first volume for the default storage group for a
+        fast policy.  This is necessary because you cannot remove
+        the last volume from a storage group and this scenario is likely
+
+        :param conn: the connection information to the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param volumeInstanceName: the volume instance name
+        :returns: firstVolumeInstanceName - instance name of the first volume
+                                            in the storage group
+        """
+        failedRet = None
+        storageSystemName = self.utils.find_storage_system_name_from_service(
+            controllerConfigService)
+        storageConfigurationInstanceName = (
+            self.utils.find_storage_configuration_service(
+                conn, storageSystemName))
+
+        poolInstanceName = self.utils.get_assoc_pool_from_volume(
+            conn, volumeInstanceName)
+        if poolInstanceName is None:
+            exceptionMessage = (_("Unable to get associated pool of volume"))
+            LOG.error(exceptionMessage)
+            return failedRet
+
+        volumeName = 'vol1'
+        volumeSize = '1'
+        volumeDict, rc = self.provision.create_volume_from_pool(
+            conn, storageConfigurationInstanceName, volumeName,
+            poolInstanceName, volumeSize)
+        firstVolumeInstanceName = self.utils.find_volume_instance(
+            conn, volumeDict, volumeName)
+        return firstVolumeInstanceName
+
+    def add_storage_group_to_tier_policy_rule(
+            self, conn, tierPolicyServiceInstanceName,
+            storageGroupInstanceName, tierPolicyRuleInstanceName,
+            storageGroupName, fastPolicyName):
+        """Add the storage group to the tier policy rule.
+
+        :param conn: the connection information to the ecom server
+        :param tierPolicyServiceInstanceName: tier policy service
+        :param storageGroupInstanceName: storage group instance name
+        :param tierPolicyRuleInstanceName: tier policy instance name
+        :param storageGroupName: the storage group name (String)
+        :param fastPolicyName: the fast policy name (String)
+        """
+        # 5 is ("Add InElements to Policy")
+        modificationType = '5'
+
+        rc, job = conn.InvokeMethod(
+            'ModifyStorageTierPolicyRule', tierPolicyServiceInstanceName,
+            PolicyRule=tierPolicyRuleInstanceName,
+            Operation=self.utils.get_num(modificationType, '16'),
+            InElements=[storageGroupInstanceName])
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error associating storage group : %(storageGroupName)s. "
+                    "To fast Policy: %(fastPolicyName)s with error "
+                    "description: %(errordesc)s")
+                    % {'storageGroupName': storageGroupName,
+                       'fastPolicyName': fastPolicyName,
+                       'errordesc': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        return rc
+
+    def _get_service_level_tier_policy(
+            self, conn, tierPolicyServiceInstanceName, fastPolicyName):
+        """Returns the existing tier policies for a storage system instance.
+
+        Given the storage system instance name, get the existing tier
+        policies on that array
+
+        :param conn: the connection information to the ecom server
+        :param tierPolicyServiceInstanceName: the policy service
+        :param fastPolicyName: the fast policy name e.g BRONZE1
+        :returns: foundTierPolicyRuleInstanceName - the short name,
+                                                    everything after the :
+        """
+        foundTierPolicyRuleInstanceName = None
+
+        tierPolicyRuleInstanceNames = self._get_existing_tier_policies(
+            conn, tierPolicyServiceInstanceName)
+
+        for tierPolicyRuleInstanceName in tierPolicyRuleInstanceNames:
+            policyRuleName = tierPolicyRuleInstanceName['PolicyRuleName']
+            if fastPolicyName == policyRuleName:
+                foundTierPolicyRuleInstanceName = tierPolicyRuleInstanceName
+                break
+
+        return foundTierPolicyRuleInstanceName
+
+    def _get_existing_tier_policies(self, conn, tierPolicyServiceInstanceName):
+        """Given the tier policy service, get the existing tier policies.
+
+        :param conn: the connection information to the ecom server
+        :param tierPolicyServiceInstanceName: the tier policy service
+                                              instance Name
+        :returns: tierPolicyRuleInstanceNames - the tier policy rule
+                                                instance names
+        """
+        tierPolicyRuleInstanceNames = conn.AssociatorNames(
+            tierPolicyServiceInstanceName, ResultClass='Symm_TierPolicyRule')
+
+        return tierPolicyRuleInstanceNames
+
+    def get_associated_tier_policy_from_storage_group(
+            self, conn, storageGroupInstanceName):
+        """Given the tier policy instance name get the storage groups.
+
+        :param conn: the connection information to the ecom server
+        :param storageGroupInstanceName: the storage group instance name
+        :returns: tierPolicyInstanceNames - the list of tier policy
+                                            instance names
+        """
+        tierPolicyInstanceName = None
+
+        tierPolicyInstanceNames = conn.AssociatorNames(
+            storageGroupInstanceName,
+            AssocClass='CIM_TierPolicySetAppliesToElement',
+            ResultClass='CIM_TierPolicyRule')
+
+        if (len(tierPolicyInstanceNames) > 0 and
+                len(tierPolicyInstanceNames) < 2):
+            tierPolicyInstanceName = tierPolicyInstanceNames[0]
+
+        return tierPolicyInstanceName
+
+    def get_associated_tier_from_tier_policy(
+            self, conn, tierPolicyRuleInstanceName):
+        """Given the tierPolicyInstanceName get the associated tiers.
+
+        :param conn: the connection information to the ecom server
+        :param tierPolicyRuleInstanceName: the tier policy rule instance name
+        :returns: storageTierInstanceNames - a list of storage tier
+                                             instance names
+        """
+        storageTierInstanceNames = conn.AssociatorNames(
+            tierPolicyRuleInstanceName,
+            AssocClass='CIM_AssociatedTierPolicy')
+
+        if len(storageTierInstanceNames) == 0:
+            storageTierInstanceNames = None
+            LOG.warn(_("Unable to get storage tiers from tier policy rule  "))
+
+        return storageTierInstanceNames
+
+    def get_policy_default_storage_group(
+            self, conn, controllerConfigService, policyName):
+        """Returns the default storage group for a tier policy.
+
+        Given the tier policy instance name get the associated default
+        storage group.
+
+        :param conn: the connection information to the ecom server
+        :param controllerConfigService: ControllerConfigurationService
+                                        instance name
+        :param policyName: string value
+        :returns: storageGroupInstanceName - instance name of the default
+                                             storage group
+        """
+        storageMaskingGroupInstanceNames = conn.AssociatorNames(
+            controllerConfigService, ResultClass='CIM_DeviceMaskingGroup')
+
+        for storageMaskingGroupInstanceName in \
+                storageMaskingGroupInstanceNames:
+            storageMaskingGroupInstance = conn.GetInstance(
+                storageMaskingGroupInstanceName)
+            if ('_default_' in storageMaskingGroupInstance['ElementName'] and
+                    policyName in storageMaskingGroupInstance['ElementName']):
+                return storageMaskingGroupInstanceName
+
+        return None
+
+    def _get_associated_storage_groups_from_tier_policy(
+            self, conn, tierPolicyInstanceName):
+        """Given the tier policy instance name get the storage groups.
+
+        :param conn: the connection information to the ecom server
+        :param tierPolicyInstanceName: tier policy instance name
+        :returns: managedElementInstanceNames - the list of storage
+                                                instance names
+        """
+        managedElementInstanceNames = conn.AssociatorNames(
+            tierPolicyInstanceName,
+            AssocClass='CIM_TierPolicySetAppliesToElement',
+            ResultClass='CIM_DeviceMaskingGroup')
+
+        return managedElementInstanceNames
+
+    def get_associated_pools_from_tier(
+            self, conn, storageTierInstanceName):
+        """Given the storage tier instance name get the storage pools.
+
+        :param conn: the connection information to the ecom server
+        :param storageTierInstanceName: the storage tier instance name
+        :returns: storagePoolInstanceNames - a list of storage tier
+                                             instance names
+        """
+        storagePoolInstanceNames = conn.AssociatorNames(
+            storageTierInstanceName,
+            AssocClass='CIM_MemberOfCollection',
+            ResultClass='CIM_StoragePool')
+
+        return storagePoolInstanceNames
+
+    def add_storage_group_and_verify_tier_policy_assoc(
+            self, conn, controllerConfigService, storageGroupInstanceName,
+            storageGroupName, fastPolicyName):
+        """Adds a storage group to a tier policy and verifies success.
+
+        Add a storage group to a tier policy rule and verify that it was
+        successful by getting the association
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller config service
+        :param storageGroupInstanceName: the storage group instance name
+        :param storageGroupName: the storage group name (String)
+        :param fastPolicyName: the fast policy name (String)
+        :returns: assocTierPolicyInstanceName
+        """
+        failedRet = None
+        assocTierPolicyInstanceName = None
+        storageSystemInstanceName = self.utils.find_storage_system(
+            conn, controllerConfigService)
+        tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
+            conn, storageSystemInstanceName)
+        # get the fast policy instance name
+        tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
+            conn, tierPolicyServiceInstanceName, fastPolicyName)
+        if tierPolicyRuleInstanceName is None:
+            errorMessage = (_(
+                "Cannot find the fast policy %(fastPolicyName)s")
+                % {'fastPolicyName': fastPolicyName})
+
+            LOG.error(errorMessage)
+            return failedRet
+        else:
+            LOG.debug(
+                "Adding storage group %(storageGroupInstanceName)s to"
+                " tier policy rule %(tierPolicyRuleInstanceName)s"
+                % {'storageGroupInstanceName': storageGroupInstanceName,
+                   'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName})
+
+            # Associate the new storage group with the existing fast policy
+            try:
+                self.add_storage_group_to_tier_policy_rule(
+                    conn, tierPolicyServiceInstanceName,
+                    storageGroupInstanceName, tierPolicyRuleInstanceName,
+                    storageGroupName, fastPolicyName)
+            except Exception as ex:
+                LOG.error(_("Exception: %s") % six.text_type(ex))
+                errorMessage = (_(
+                    "Failed to add storage group %(storageGroupInstanceName)s "
+                    " to tier policy rule %(tierPolicyRuleInstanceName)s")
+                    % {'storageGroupInstanceName': storageGroupInstanceName,
+                       'tierPolicyRuleInstanceName':
+                       tierPolicyRuleInstanceName})
+                LOG.error(errorMessage)
+                return failedRet
+
+            # check that the storage group has been associated with with the
+            # tier policy rule
+            assocTierPolicyInstanceName = (
+                self.get_associated_tier_policy_from_storage_group(
+                    conn, storageGroupInstanceName))
+
+            LOG.debug(
+                "AssocTierPolicyInstanceName is "
+                "%(assocTierPolicyInstanceName)s "
+                % {'assocTierPolicyInstanceName': assocTierPolicyInstanceName})
+        return assocTierPolicyInstanceName
+
+    def get_associated_policy_from_storage_group(
+            self, conn, storageGroupInstanceName):
+        """Get the tier policy instance name for a storage group instance name.
+
+        :param conn: the connection information to the ecom server
+        :param storageGroupInstanceName: storage group instance name
+        :returns: foundTierPolicyInstanceName - instance name of the
+                                                tier policy object
+        """
+        foundTierPolicyInstanceName = None
+
+        tierPolicyInstanceNames = conn.AssociatorNames(
+            storageGroupInstanceName,
+            ResultClass='Symm_TierPolicyRule',
+            AssocClass='Symm_TierPolicySetAppliesToElement')
+
+        if len(tierPolicyInstanceNames) > 0:
+            foundTierPolicyInstanceName = tierPolicyInstanceNames[0]
+
+        return foundTierPolicyInstanceName
+
+    def delete_storage_group_from_tier_policy_rule(
+            self, conn, tierPolicyServiceInstanceName,
+            storageGroupInstanceName, tierPolicyRuleInstanceName):
+        """Disassociate the storage group from its tier policy rule.
+
+        :param conn: connection the ecom server
+        :param tierPolicyServiceInstanceName: instance name of the tier policy
+                                              service
+        :param storageGroupInstanceName: instance name of the storage group
+        :param tierPolicyRuleInstanceName: instance name of the tier policy
+                                           associated with the storage group
+        """
+        modificationType = '6'
+        LOG.debug("Invoking ModifyStorageTierPolicyRule"
+                  " %s" % tierPolicyRuleInstanceName)
+        try:
+            rc, job = conn.InvokeMethod(
+                'ModifyStorageTierPolicyRule', tierPolicyServiceInstanceName,
+                PolicyRule=tierPolicyRuleInstanceName,
+                Operation=self.utils.get_num(modificationType, '16'),
+                InElements=[storageGroupInstanceName])
+            if rc != 0L:
+                rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+                if rc != 0L:
+                    LOG.error(_("Error disassociating storage group from "
+                              "policy: %s") % errordesc)
+                else:
+                    LOG.debug("Disassociated storage group from policy %s")
+            else:
+                LOG.debug("ModifyStorageTierPolicyRule completed")
+        except Exception as e:
+            LOG.info(_("Storage group not associated with the policy %s")
+                     % six.text_type(e))
+
+    def get_pool_associated_to_policy(
+            self, conn, fastPolicyName, arraySN,
+            storageConfigService, poolInstanceName):
+        """Given a FAST policy check that the pool is linked to the policy.
+
+        If it's associated return the pool instance, if not return None.
+        First check if FAST is enabled on the array
+
+        :param conn: the ecom connection
+        :param fastPolicyName: the fast policy name (String)
+        :param arraySN: the array serial number (String)
+        :param storageConfigService: the storage Config Service
+        :param poolInstanceName: the pool instance we want to check for
+                                 association with the fast storage tier
+        :returns: foundPoolInstanceName
+        """
+        storageSystemInstanceName = self.utils.find_storage_system(
+            conn, storageConfigService)
+
+        if not self._check_if_fast_supported(conn, storageSystemInstanceName):
+            errorMessage = (_(
+                "FAST is not supported on this array "))
+            LOG.error(errorMessage)
+            exception.VolumeBackendAPIException(data=errorMessage)
+
+        tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
+            conn, storageSystemInstanceName)
+
+        tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
+            conn, tierPolicyServiceInstanceName, fastPolicyName)
+        # Get the associated storage tiers from the tier policy rule
+        storageTierInstanceNames = self.get_associated_tier_from_tier_policy(
+            conn, tierPolicyRuleInstanceName)
+
+        # For each gold storage tier get the associated pools
+        foundPoolInstanceName = None
+        for storageTierInstanceName in storageTierInstanceNames:
+            assocStoragePoolInstanceNames = (
+                self.get_associated_pools_from_tier(conn,
+                                                    storageTierInstanceName))
+            for assocStoragePoolInstanceName in assocStoragePoolInstanceNames:
+                if poolInstanceName == assocStoragePoolInstanceName:
+                    foundPoolInstanceName = poolInstanceName
+                    break
+            if foundPoolInstanceName is not None:
+                break
+
+        return foundPoolInstanceName
+
+    def is_tiering_policy_enabled_on_storage_system(
+            self, conn, storageSystemInstanceName):
+        """Checks if tiering policy in enabled on a storage system.
+
+        True if FAST policy enabled on the given storage system;
+        False otherwise.
+
+        :param storageSystemInstanceName: a storage system instance name
+        :returns: boolean
+        """
+        try:
+            tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
+                conn, storageSystemInstanceName)
+            isTieringPolicySupported = self.is_tiering_policy_enabled(
+                conn, tierPolicyServiceInstanceName)
+        except Exception as e:
+            LOG.error(_("Exception: %s") % six.text_type(e))
+            return False
+
+        return isTieringPolicySupported
+
+    def get_tier_policy_by_name(
+            self, conn, arrayName, policyName):
+        """Given the name of the policy, get the TierPolicyRule instance name.
+
+        :param policyName: the name of policy rule, a string value
+        :returns: tierPolicyInstanceName - tier policy instance name
+        """
+        tierPolicyInstanceNames = conn.EnumerateInstanceNames(
+            'Symm_TierPolicyRule')
+        for policy in tierPolicyInstanceNames:
+            if (policyName == policy['PolicyRuleName'] and
+                    arrayName in policy['SystemName']):
+                return policy
+        return None
+
+    def get_capacities_associated_to_policy(self, conn, arrayName, policyName):
+        """Gets the total and un-used capacities for all pools in a policy.
+
+        Given the name of the policy, get the total capcity and un-used
+        capacity in GB of all the storage pools associated with the policy.
+
+        :param policyName: the name of policy rule, a string value
+        :returns: total_capacity_gb - total capacity in GB of all pools
+                                      associated with the policy
+        :returns: free_capacity_gb  - (total capacity-EMCSubscribedCapacity)
+                                      in GB of all pools associated with
+                                      the policy
+        """
+        policyInstanceName = self.get_tier_policy_by_name(
+            conn, arrayName, policyName)
+
+        total_capacity_gb = 0
+        allocated_capacity_gb = 0
+
+        tierInstanceNames = self.get_associated_tier_from_tier_policy(
+            conn, policyInstanceName)
+        for tierInstanceName in tierInstanceNames:
+            poolInsttanceNames = self.get_associated_pools_from_tier(
+                conn, tierInstanceName)
+            for poolInstanceName in poolInsttanceNames:
+                storagePoolInstance = conn.GetInstance(
+                    poolInstanceName, LocalOnly=False)
+                total_capacity_gb += self.utils.convert_bits_to_gbs(
+                    storagePoolInstance['TotalManagedSpace'])
+                allocated_capacity_gb += self.utils.convert_bits_to_gbs(
+                    storagePoolInstance['EMCSubscribedCapacity'])
+                LOG.debug(
+                    "policyName:%(policyName)s, pool: %(poolInstanceName)s, "
+                    "allocated_capacity_gb = %(allocated_capacity_gb)lu"
+                    % {'policyName': policyName,
+                       'poolInstanceName': poolInstanceName,
+                       'allocated_capacity_gb': allocated_capacity_gb})
+
+        free_capacity_gb = total_capacity_gb - allocated_capacity_gb
+        return (total_capacity_gb, free_capacity_gb)
+
+    def get_or_create_default_storage_group(
+            self, conn, controllerConfigService, fastPolicyName,
+            volumeInstance):
+        """Create or get a default storage group for FAST policy.
+
+        :param conn: the ecom connection
+        :param controllerConfigService: the controller configuration service
+        :param fastPolicyName: the fast policy name (String)
+        :param volumeInstance: the volume instance
+        :returns: defaultStorageGroupInstanceName - the default storage group
+                                                    instance name
+        """
+        defaultSgGroupName = (DEFAULT_SG_PREFIX + fastPolicyName +
+                              DEFAULT_SG_POSTFIX)
+        defaultStorageGroupInstanceName = (
+            self.utils.find_storage_masking_group(conn,
+                                                  controllerConfigService,
+                                                  defaultSgGroupName))
+        if defaultStorageGroupInstanceName is None:
+            # create it and associate it with the FAST policy in question
+            defaultStorageGroupInstanceName = (
+                self._create_default_storage_group(conn,
+                                                   controllerConfigService,
+                                                   fastPolicyName,
+                                                   defaultSgGroupName,
+                                                   volumeInstance))
+
+        return defaultStorageGroupInstanceName
+
+    def _get_associated_tier_policy_from_pool(self, conn, poolInstanceName):
+        """Given the pool instance name get the associated FAST tier policy.
+
+        :param conn: the connection information to the ecom server
+        :param poolInstanceName: the pool instance name
+        :param fastPolicyName: the FAST Policy name (if it exists)
+        """
+        fastPolicyName = None
+
+        storageTierInstanceNames = conn.AssociatorNames(
+            poolInstanceName,
+            AssocClass='CIM_MemberOfCollection',
+            ResultClass='CIM_StorageTier')
+
+        if len(storageTierInstanceNames) > 0:
+            tierPolicyInstanceNames = conn.AssociatorNames(
+                storageTierInstanceNames[0],
+                AssocClass='CIM_AssociatedTierPolicy')
+
+            if len(tierPolicyInstanceNames) > 0:
+                tierPolicyInstanceName = tierPolicyInstanceNames[0]
+                fastPolicyName = tierPolicyInstanceName['PolicyRuleName']
+
+        return fastPolicyName
similarity index 70%
rename from cinder/volume/drivers/emc/emc_smis_fc.py
rename to cinder/volume/drivers/emc/emc_vmax_fc.py
index cd39777bf94d8f9d3c88ea7ac6ede21b66e42cb0..6d83a4f781fedd6cd54ebdb1b8dfb41d7c3d72c1 100644 (file)
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-"""
-FC Drivers for EMC VNX and VMAX arrays based on SMI-S.
-
-"""
+import six
 
 from cinder import context
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
-from cinder.volume.drivers.emc import emc_smis_common
+from cinder.volume.drivers.emc import emc_vmax_common
 from cinder.zonemanager import utils as fczm_utils
 
+
 LOG = logging.getLogger(__name__)
 
 
-class EMCSMISFCDriver(driver.FibreChannelDriver):
-    """EMC FC Drivers for VMAX and VNX using SMI-S.
+class EMCVMAXFCDriver(driver.FibreChannelDriver):
+    """EMC FC Drivers for VMAX using SMI-S.
 
     Version history:
         1.0.0 - Initial driver
         1.1.0 - Multiple pools and thick/thin provisioning,
                 performance enhancement.
+        2.0.0 - Add driver requirement functions
     """
 
-    VERSION = "1.1.0"
+    VERSION = "2.0.0"
 
     def __init__(self, *args, **kwargs):
 
-        super(EMCSMISFCDriver, self).__init__(*args, **kwargs)
-        self.common = emc_smis_common.EMCSMISCommon(
+        super(EMCVMAXFCDriver, self).__init__(*args, **kwargs)
+        self.common = emc_vmax_common.EMCVMAXCommon(
             'FC',
             configuration=self.configuration)
 
@@ -52,7 +51,7 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
         volpath = self.common.create_volume(volume)
 
         model_update = {}
-        volume['provider_location'] = str(volpath)
+        volume['provider_location'] = six.text_type(volpath)
         model_update['provider_location'] = volume['provider_location']
         return model_update
 
@@ -61,7 +60,7 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
         volpath = self.common.create_volume_from_snapshot(volume, snapshot)
 
         model_update = {}
-        volume['provider_location'] = str(volpath)
+        volume['provider_location'] = six.text_type(volpath)
         model_update['provider_location'] = volume['provider_location']
         return model_update
 
@@ -70,7 +69,7 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
         volpath = self.common.create_cloned_volume(volume, src_vref)
 
         model_update = {}
-        volume['provider_location'] = str(volpath)
+        volume['provider_location'] = six.text_type(volpath)
         model_update['provider_location'] = volume['provider_location']
         return model_update
 
@@ -89,7 +88,7 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
         volpath = self.common.create_snapshot(snapshot, volume)
 
         model_update = {}
-        snapshot['provider_location'] = str(volpath)
+        snapshot['provider_location'] = six.text_type(volpath)
         model_update['provider_location'] = snapshot['provider_location']
         return model_update
 
@@ -130,7 +129,6 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
         The target_wwn can be a single entry or a list of wwns that
         correspond to the list of remote wwn(s) that will export the volume.
         Example return values:
-
             {
                 'driver_volume_type': 'fibre_channel'
                 'data': {
@@ -150,10 +148,9 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
                     'target_wwn': ['1234567890123', '0987654321321'],
                 }
             }
-
         """
-        device_info = self.common.initialize_connection(volume,
-                                                        connector)
+        device_info, ipAddress = self.common.initialize_connection(
+            volume, connector)
         device_number = device_info['hostlunid']
         storage_system = device_info['storagesystem']
         target_wwns, init_targ_map = self._build_initiator_target_map(
@@ -165,26 +162,41 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
                          'target_wwn': target_wwns,
                          'initiator_target_map': init_targ_map}}
 
-        LOG.debug('Return FC data: %(data)s.'
+        LOG.debug("Return FC data: %(data)s."
                   % {'data': data})
 
         return data
 
     @fczm_utils.RemoveFCZone
     def terminate_connection(self, volume, connector, **kwargs):
-        """Disallow connection from connector."""
+        """Disallow connection from connector.
+
+        Return empty data if other volumes are in the same zone.
+        The FibreChannel ZoneManager doesn't remove zones
+        if there isn't an initiator_target_map in the
+        return of terminate_connection.
+
+        :returns: data - the target_wwns and initiator_target_map if the
+                         zone is to be removed, otherwise empty
+        """
         self.common.terminate_connection(volume, connector)
 
         loc = volume['provider_location']
         name = eval(loc)
         storage_system = name['keybindings']['SystemName']
-        target_wwns, init_targ_map = self._build_initiator_target_map(
-            storage_system, connector)
-        data = {'driver_volume_type': 'fibre_channel',
-                'data': {'target_wwn': target_wwns,
-                         'initiator_target_map': init_targ_map}}
 
-        LOG.debug('Return FC data: %(data)s.'
+        numVolumes = self.common.get_num_volumes_mapped(volume, connector)
+        if numVolumes > 0:
+            data = {'driver_volume_type': 'fibre_channel',
+                    'data': {}}
+        else:
+            target_wwns, init_targ_map = self._build_initiator_target_map(
+                storage_system, connector)
+            data = {'driver_volume_type': 'fibre_channel',
+                    'data': {'target_wwn': target_wwns,
+                             'initiator_target_map': init_targ_map}}
+
+        LOG.debug("Return FC data: %(data)s."
                   % {'data': data})
 
         return data
@@ -220,8 +232,33 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
         """Retrieve stats info from volume group."""
         LOG.debug("Updating volume stats")
         data = self.common.update_volume_stats()
-        backend_name = self.configuration.safe_get('volume_backend_name')
-        data['volume_backend_name'] = backend_name or 'EMCSMISFCDriver'
         data['storage_protocol'] = 'FC'
         data['driver_version'] = self.VERSION
         self._stats = data
+
+    def migrate_volume(self, ctxt, volume, host):
+        """Migrate a volume from one Volume Backend to another.
+
+        :param self: reference to class
+        :param ctxt:
+        :param volume: the volume object including the volume_type_id
+        :param host: the host dict holding the relevant target(destination)
+                     information
+        :returns: moved
+        :returns: list
+        """
+        return self.common.migrate_volume(ctxt, volume, host)
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Migrate volume to another host using retype.
+
+        :param self: reference to class
+        :param ctxt:
+        :param volume: the volume object including the volume_type_id
+        :param new_type: the new volume type.
+        :param host: the host dict holding the relevant
+                     target(destination) information
+        :returns: moved
+        "returns: list
+        """
+        return self.common.retype(ctxt, volume, new_type, diff, host)
similarity index 68%
rename from cinder/volume/drivers/emc/emc_smis_iscsi.py
rename to cinder/volume/drivers/emc/emc_vmax_iscsi.py
index 18cb350055847d31d6b61e55699a78afbd8ceba5..c3f595dfdc3410d7122833ba3bf10965cb34d32f 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 """
-ISCSI Drivers for EMC VNX and VMAX arrays based on SMI-S.
+ISCSI Drivers for EMC VMAX arrays based on SMI-S.
 
 """
-
+import six
 
 from cinder import context
 from cinder import exception
 from cinder.i18n import _
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
-from cinder.volume.drivers.emc import emc_smis_common
+from cinder.volume.drivers.emc import emc_vmax_common
 
 LOG = logging.getLogger(__name__)
 
 
-class EMCSMISISCSIDriver(driver.ISCSIDriver):
-    """EMC ISCSI Drivers for VMAX and VNX using SMI-S.
+class EMCVMAXISCSIDriver(driver.ISCSIDriver):
+    """EMC ISCSI Drivers for VMAX using SMI-S.
 
     Version history:
         1.0.0 - Initial driver
         1.1.0 - Multiple pools and thick/thin provisioning,
                 performance enhancement.
+        2.0.0 - Add driver requirement functions
     """
 
-    VERSION = "1.1.0"
+    VERSION = "2.0.0"
 
     def __init__(self, *args, **kwargs):
 
-        super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
+        super(EMCVMAXISCSIDriver, self).__init__(*args, **kwargs)
         self.common =\
-            emc_smis_common.EMCSMISCommon('iSCSI',
+            emc_vmax_common.EMCVMAXCommon('iSCSI',
                                           configuration=self.configuration)
 
     def check_for_setup_error(self):
@@ -54,7 +55,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
         volpath = self.common.create_volume(volume)
 
         model_update = {}
-        volume['provider_location'] = str(volpath)
+        volume['provider_location'] = six.text_type(volpath)
         model_update['provider_location'] = volume['provider_location']
         return model_update
 
@@ -63,7 +64,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
         volpath = self.common.create_volume_from_snapshot(volume, snapshot)
 
         model_update = {}
-        volume['provider_location'] = str(volpath)
+        volume['provider_location'] = six.text_type(volpath)
         model_update['provider_location'] = volume['provider_location']
         return model_update
 
@@ -72,7 +73,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
         volpath = self.common.create_cloned_volume(volume, src_vref)
 
         model_update = {}
-        volume['provider_location'] = str(volpath)
+        volume['provider_location'] = six.text_type(volpath)
         model_update['provider_location'] = volume['provider_location']
         return model_update
 
@@ -91,7 +92,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
         volpath = self.common.create_snapshot(snapshot, volume)
 
         model_update = {}
-        snapshot['provider_location'] = str(volpath)
+        snapshot['provider_location'] = six.text_type(volpath)
         model_update['provider_location'] = snapshot['provider_location']
         return model_update
 
@@ -127,7 +128,6 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
         The iscsi driver returns a driver_volume_type of 'iscsi'.
         the format of the driver data is defined in smis_get_iscsi_properties.
         Example return value::
-
             {
                 'driver_volume_type': 'iscsi'
                 'data': {
@@ -137,121 +137,104 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
                     'volume_id': '12345678-1234-4321-1234-123456789012',
                 }
             }
-
         """
-        self.common.initialize_connection(volume, connector)
+        devInfo, ipAddress = self.common.initialize_connection(
+            volume, connector)
 
-        iscsi_properties = self.smis_get_iscsi_properties(volume, connector)
+        iscsi_properties = self.smis_get_iscsi_properties(
+            volume, connector, ipAddress)
+
+        LOG.info(_("Leaving initialize_connection: %s") % (iscsi_properties))
         return {
             'driver_volume_type': 'iscsi',
             'data': iscsi_properties
         }
 
-    def _do_iscsi_discovery(self, volume):
+    def smis_do_iscsi_discovery(self, volume, ipAddress):
 
         LOG.warn(_("ISCSI provider_location not stored, using discovery"))
 
         (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
                                     '-t', 'sendtargets', '-p',
-                                    self.configuration.iscsi_ip_address,
+                                    ipAddress,
                                     run_as_root=True)
+
+        LOG.info(_(
+            "smis_do_iscsi_discovery is: %(out)s")
+            % {'out': out})
         targets = []
         for target in out.splitlines():
             targets.append(target)
 
         return targets
 
-    def smis_get_iscsi_properties(self, volume, connector):
+    def smis_get_iscsi_properties(self, volume, connector, ipAddress):
         """Gets iscsi configuration.
 
         We ideally get saved information in the volume entity, but fall back
         to discovery if need be. Discovery may be completely removed in future
         The properties are:
-
         :target_discovered:    boolean indicating whether discovery was used
-
         :target_iqn:    the IQN of the iSCSI target
-
         :target_portal:    the portal of the iSCSI target
-
         :target_lun:    the lun of the iSCSI target
-
         :volume_id:    the UUID of the volume
-
         :auth_method:, :auth_username:, :auth_password:
-
             the authentication details. Right now, either auth_method is not
             present meaning no authentication, or auth_method == `CHAP`
             meaning use CHAP with the specified credentials.
         """
         properties = {}
 
-        location = self._do_iscsi_discovery(volume)
+        location = self.smis_do_iscsi_discovery(volume, ipAddress)
         if not location:
             raise exception.InvalidVolume(_("Could not find iSCSI export "
-                                          " for volume %s") %
-                                          (volume['name']))
+                                          " for volume %(volumeName)s")
+                                          % {'volumeName': volume['name']})
 
         LOG.debug("ISCSI Discovery: Found %s" % (location))
         properties['target_discovered'] = True
 
         device_info = self.common.find_device_number(volume, connector)
+
         if device_info is None or device_info['hostlunid'] is None:
-            exception_message = (_("Cannot find device number for volume %s")
-                                 % volume['name'])
+            exception_message = (_("Cannot find device number for volume "
+                                 "%(volumeName)s")
+                                 % {'volumeName': volume['name']})
             raise exception.VolumeBackendAPIException(data=exception_message)
 
         device_number = device_info['hostlunid']
-        storage_system = device_info['storagesystem']
-
-        # sp is "SP_A" or "SP_B"
-        sp = device_info['owningsp']
-        endpoints = []
-        if sp:
-            # endpoints example:
-            # [iqn.1992-04.com.emc:cx.apm00123907237.a8,
-            # iqn.1992-04.com.emc:cx.apm00123907237.a9]
-            endpoints = self.common._find_iscsi_protocol_endpoints(
-                sp, storage_system)
-
-        foundEndpoint = False
+
+        LOG.info(_(
+            "location is: %(location)s") % {'location': location})
+
         for loc in location:
             results = loc.split(" ")
             properties['target_portal'] = results[0].split(",")[0]
             properties['target_iqn'] = results[1]
-            # owning sp is None for VMAX
-            # for VNX, find the target_iqn that matches the endpoint
-            # target_iqn example: iqn.1992-04.com.emc:cx.apm00123907237.a8
-            # or iqn.1992-04.com.emc:cx.apm00123907237.b8
-            if not sp:
-                break
-            for endpoint in endpoints:
-                if properties['target_iqn'] == endpoint:
-                    LOG.debug("Found iSCSI endpoint: %s" % endpoint)
-                    foundEndpoint = True
-                    break
-            if foundEndpoint:
-                break
-
-        if sp and not foundEndpoint:
-            LOG.warn(_("ISCSI endpoint not found for SP %(sp)s on "
-                     "storage system %(storage)s.")
-                     % {'sp': sp,
-                        'storage': storage_system})
 
         properties['target_lun'] = device_number
 
         properties['volume_id'] = volume['id']
 
-        LOG.debug("ISCSI properties: %s" % (properties))
+        LOG.info(_("ISCSI properties: %(properties)s")
+                 % {'properties': properties})
+        LOG.info(_("ISCSI volume is: %(volume)s")
+                 % {'volume': volume})
 
-        auth = volume['provider_auth']
-        if auth:
-            (auth_method, auth_username, auth_secret) = auth.split()
+        if 'provider_auth' in volume:
+            auth = volume['provider_auth']
+            LOG.info(_("AUTH properties: %(authProps)s")
+                     % {'authProps': auth})
 
-            properties['auth_method'] = auth_method
-            properties['auth_username'] = auth_username
-            properties['auth_password'] = auth_secret
+            if auth is not None:
+                (auth_method, auth_username, auth_secret) = auth.split()
+
+                properties['auth_method'] = auth_method
+                properties['auth_username'] = auth_username
+                properties['auth_password'] = auth_secret
+
+                LOG.info(_("AUTH properties: %s") % (properties))
 
         return properties
 
@@ -277,8 +260,32 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
         """Retrieve stats info from volume group."""
         LOG.debug("Updating volume stats")
         data = self.common.update_volume_stats()
-        backend_name = self.configuration.safe_get('volume_backend_name')
-        data['volume_backend_name'] = backend_name or 'EMCSMISISCSIDriver'
         data['storage_protocol'] = 'iSCSI'
         data['driver_version'] = self.VERSION
         self._stats = data
+
+    def migrate_volume(self, ctxt, volume, host):
+        """Migrate a volume from one Volume Backend to another.
+        :param self: reference to class
+        :param ctxt:
+        :param volume: the volume object including the volume_type_id
+        :param host: the host dict holding the relevant target(destination)
+                     information
+        :returns: moved
+        :returns: list
+        """
+        return self.common.migrate_volume(ctxt, volume, host)
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Migrate volume to another host using retype.
+
+        :param self: reference to class
+        :param ctxt:
+        :param volume: the volume object including the volume_type_id
+        :param new_type: the new volume type.
+        :param host: the host dict holding the relevant target(destination)
+                     information
+        :returns: moved
+        {}
+        """
+        return self.common.retype(ctxt, volume, new_type, diff, host)
diff --git a/cinder/volume/drivers/emc/emc_vmax_masking.py b/cinder/volume/drivers/emc/emc_vmax_masking.py
new file mode 100644 (file)
index 0000000..de79658
--- /dev/null
@@ -0,0 +1,1398 @@
+# Copyright (c) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.emc import emc_vmax_fast
+from cinder.volume.drivers.emc import emc_vmax_provision
+from cinder.volume.drivers.emc import emc_vmax_utils
+
+LOG = logging.getLogger(__name__)
+
+STORAGEGROUPTYPE = 4
+POSTGROUPTYPE = 3
+INITIATORGROUPTYPE = 2
+
+ISCSI = 'iscsi'
+FC = 'fc'
+
+EMC_ROOT = 'root/emc'
+
+
+class EMCVMAXMasking(object):
+    """Masking class for SMI-S based EMC volume drivers.
+
+    Masking code to dynamically create a masking view
+    This masking class is for EMC volume drivers based on SMI-S.
+    It supports VMAX arrays.
+    """
+    def __init__(self, prtcl):
+        self.protocol = prtcl
+        self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
+        self.fast = emc_vmax_fast.EMCVMAXFast(prtcl)
+        self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl)
+
+    def get_or_create_masking_view_and_map_lun(self, conn, maskingViewDict):
+        """Get or Create a masking view.
+
+        Given a masking view tuple either get or create a masking view and add
+        the volume to the associated storage group
+
+        :param conn: the connection to  ecom
+        :para maskingViewDict: the masking view tuple
+        :returns: dict rollbackDict
+        """
+        rollbackDict = {}
+
+        controllerConfigService = maskingViewDict['controllerConfigService']
+        sgGroupName = maskingViewDict['sgGroupName']
+        volumeInstance = maskingViewDict['volumeInstance']
+        igGroupName = maskingViewDict['igGroupName']
+        connector = maskingViewDict['connector']
+        storageSystemName = maskingViewDict['storageSystemName']
+        maskingViewName = maskingViewDict['maskingViewName']
+        volumeName = maskingViewDict['volumeName']
+        pgGroupName = maskingViewDict['pgGroupName']
+
+        fastPolicyName = maskingViewDict['fastPolicy']
+        defaultStorageGroupInstanceName = None
+
+        # we need a rollback scenario for FAST.
+        # We must make sure that volume is returned to default storage
+        # group if anything goes wrong
+        if fastPolicyName is not None:
+            defaultStorageGroupInstanceName = (
+                self.fast.get_and_verify_default_storage_group(
+                    conn, controllerConfigService, volumeInstance.path,
+                    volumeName, fastPolicyName))
+            if defaultStorageGroupInstanceName is None:
+                exceptionMessage = (_(
+                    "Cannot get the default storage group for FAST policy: "
+                    "%(fastPolicyName)s. ")
+                    % {'fastPolicyName': fastPolicyName})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+            retStorageGroupInstanceName = (
+                self.remove_device_from_default_storage_group(
+                    conn, controllerConfigService, volumeInstance.path,
+                    volumeName, fastPolicyName))
+            if retStorageGroupInstanceName is None:
+                exceptionMessage = (_(
+                    "Failed to remove volume %(volumeName)s from default SG: "
+                    "%(volumeName)s. ")
+                    % {'volumeName': volumeName})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        try:
+            maskingViewInstanceName = self._find_masking_view(
+                conn, maskingViewName, storageSystemName)
+            if maskingViewInstanceName is None:
+                storageGroupInstanceName = (
+                    self._get_storage_group_instance_name(
+                        conn, controllerConfigService, volumeInstance,
+                        volumeName, sgGroupName, fastPolicyName,
+                        storageSystemName, defaultStorageGroupInstanceName))
+                if storageGroupInstanceName is None:
+                    exceptionMessage = (_(
+                        "Cannot get or create a storage group: %(sgGroupName)s"
+                        " for volume %(volumeName)s ")
+                        % {'sgGroupName': sgGroupName,
+                           'volumeName': volumeName})
+                    LOG.error(exceptionMessage)
+                    raise
+
+                portGroupInstanceName = self._get_port_group_instance_name(
+                    conn, controllerConfigService, pgGroupName)
+                if portGroupInstanceName is None:
+                    exceptionMessage = (_(
+                        "Cannot get port group: %(pgGroupName)s. ")
+                        % {'pgGroupName': pgGroupName})
+                    LOG.error(exceptionMessage)
+                    raise
+
+                initiatorGroupInstanceName = (
+                    self._get_initiator_group_instance_name(
+                        conn, controllerConfigService, igGroupName, connector,
+                        storageSystemName))
+                if initiatorGroupInstanceName is None:
+                    exceptionMessage = (_(
+                        "Cannot get or create initiator group: "
+                        "%(igGroupName)s. ")
+                        % {'igGroupName': igGroupName})
+                    LOG.error(exceptionMessage)
+                    raise
+
+                maskingViewInstanceName = (
+                    self._get_masking_view_instance_name(
+                        conn, controllerConfigService, maskingViewName,
+                        storageGroupInstanceName, portGroupInstanceName,
+                        initiatorGroupInstanceName))
+                if maskingViewInstanceName is None:
+                    exceptionMessage = (_(
+                        "Cannot create masking view: %(maskingViewName)s. ")
+                        % {'maskingViewName': maskingViewName})
+                    LOG.error(exceptionMessage)
+                    raise
+
+            else:
+                # first verify that the initiator group matches the initiators
+                if not self._verify_initiator_group_from_masking_view(
+                        conn, controllerConfigService, maskingViewName,
+                        connector, storageSystemName, igGroupName):
+                    exceptionMessage = (_(
+                        "Unable to verify initiator group: %(igGroupName)s"
+                        "in masking view %(maskingViewName)s ")
+                        % {'igGroupName': igGroupName,
+                           'maskingViewName': maskingViewName})
+                    LOG.error(exceptionMessage)
+                    raise
+
+                # get the storage from the masking view and add the
+                # volume to it.
+                storageGroupInstanceName = (
+                    self._get_storage_group_from_masking_view(
+                        conn, maskingViewName, storageSystemName))
+
+                if storageGroupInstanceName is None:
+                    exceptionMessage = (_(
+                        "Cannot get storage group from masking view: "
+                        "%(maskingViewName)s. ")
+                        % {'maskingViewName': maskingViewName})
+                    LOG.error(exceptionMessage)
+                    raise
+
+                if self._is_volume_in_storage_group(
+                        conn, storageGroupInstanceName,
+                        volumeInstance):
+                    LOG.warn(_(
+                        "Volume: %(volumeName)s is already part "
+                        "of storage group %(sgGroupName)s ")
+                        % {'volumeName': volumeName,
+                           'sgGroupName': sgGroupName})
+                else:
+                    self.add_volume_to_storage_group(
+                        conn, controllerConfigService,
+                        storageGroupInstanceName, volumeInstance, volumeName,
+                        sgGroupName, fastPolicyName, storageSystemName)
+
+        except Exception as e:
+            # rollback code if we cannot complete any of the steps above
+            # successfully then we must roll back by adding the volume back to
+            # the default storage group for that fast policy
+            if (fastPolicyName is not None and
+                    defaultStorageGroupInstanceName is not None):
+                # if the exception happened before the volume was removed from
+                # the default storage group no action
+                self._check_if_rollback_action_for_masking_required(
+                    conn, controllerConfigService, volumeInstance, volumeName,
+                    fastPolicyName, defaultStorageGroupInstanceName)
+
+            LOG.error(_("Exception: %s") % six.text_type(e))
+            errorMessage = (_(
+                "Failed to get or create masking view %(maskingViewName)s ")
+                % {'maskingViewName': maskingViewName})
+            LOG.error(errorMessage)
+            exception.VolumeBackendAPIException(data=errorMessage)
+
+        rollbackDict['controllerConfigService'] = controllerConfigService
+        rollbackDict['defaultStorageGroupInstanceName'] = (
+            defaultStorageGroupInstanceName)
+        rollbackDict['volumeInstance'] = volumeInstance
+        rollbackDict['volumeName'] = volumeName
+        rollbackDict['fastPolicyName'] = fastPolicyName
+        return rollbackDict
+
+    def _is_volume_in_storage_group(
+            self, conn, storageGroupInstanceName, volumeInstance):
+        """Check if the volume is already part of the storage group.
+
+        Check if the volume is already part of the storage group,
+        if it is no need to re-add it.
+
+        :param conn: the connection to  ecom
+        :param storageGroupInstanceName: the storage group instance name
+        :param volumeInstance: the volume instance
+        :returns: boolean True/False
+        """
+        foundStorageGroupInstanceName = (
+            self.utils.get_storage_group_from_volume(
+                conn, volumeInstance.path))
+
+        storageGroupInstance = conn.GetInstance(
+            storageGroupInstanceName, LocalOnly=False)
+
+        LOG.debug(
+            "The existing storage group instance element name is: "
+            "%(existingElement)s. "
+            % {'existingElement': storageGroupInstance['ElementName']})
+
+        if foundStorageGroupInstanceName is not None:
+            foundStorageGroupInstance = conn.GetInstance(
+                foundStorageGroupInstanceName, LocalOnly=False)
+            LOG.debug(
+                "The found storage group instance element name is: "
+                "%(foundElement)s. "
+                % {'foundElement': foundStorageGroupInstance['ElementName']})
+            if (foundStorageGroupInstance['ElementName'] == (
+                    storageGroupInstance['ElementName'])):
+                LOG.warn(_(
+                    "The volume is already part of storage group: "
+                    "%(storageGroupInstanceName)s. ")
+                    % {'storageGroupInstanceName': storageGroupInstanceName})
+                return True
+
+        return False
+
+    def _find_masking_view(self, conn, maskingViewName, storageSystemName):
+        """Given the masking view name get the masking view instance.
+
+        :param conn: connection to the ecom server
+        :param maskingViewName: the masking view name
+        :param storageSystemName: the storage system name(String)
+        :returns: foundMaskingViewInstanceName masking view instance name
+        """
+        foundMaskingViewInstanceName = None
+        maskingViewInstanceNames = conn.EnumerateInstanceNames(
+            'EMC_LunMaskingSCSIProtocolController')
+
+        for maskingViewInstanceName in maskingViewInstanceNames:
+            if storageSystemName == maskingViewInstanceName['SystemName']:
+                instance = conn.GetInstance(
+                    maskingViewInstanceName, LocalOnly=False)
+                if maskingViewName == instance['ElementName']:
+                    foundMaskingViewInstanceName = maskingViewInstanceName
+                    break
+
+        if foundMaskingViewInstanceName is not None:
+            infoMessage = (_(
+                "Found existing masking view: %(maskingViewName)s ")
+                % {'maskingViewName': maskingViewName})
+            LOG.info(infoMessage)
+        return foundMaskingViewInstanceName
+
+    def _create_storage_group(
+            self, conn, controllerConfigService, storageGroupName,
+            volumeInstance, fastPolicyName, volumeName, storageSystemName,
+            defaultStorageGroupInstanceName):
+        """Create a new storage group that doesn't already exist.
+
+        If fastPolicyName is not none we attempt to remove it from the
+        default storage group of that policy and associate to the new storage
+        group that will be part of the masking view.
+        Will not handle any exception in this method it will be handled
+        up the stack
+
+        :param conn: connection the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param storageGroupName: the proposed group name (String)
+        :param volumeInstance: useful information on the volume
+        :param fastPolicyName: the fast policy name (String) can be None
+        :param volumeName: the volume name (String)
+        :param storageSystemName: the storage system name (String)
+        :param defaultStorageGroupInstanceName: the default storage group
+                                          instance name (Can be None)
+        :returns: foundStorageGroupInstanceName the instance Name of the
+                                                storage group
+        """
+        failedRet = None
+        foundStorageGroupInstanceName = (
+            self.provision.create_and_get_storage_group(
+                conn, controllerConfigService, storageGroupName,
+                volumeInstance.path))
+        if foundStorageGroupInstanceName is None:
+            LOG.error(_(
+                "Cannot get storage Group from job : %(storageGroupName)s. ")
+                % {'storageGroupName': storageGroupName})
+            return failedRet
+        else:
+            LOG.info(_(
+                "Created new storage group: %(storageGroupName)s ")
+                % {'storageGroupName': storageGroupName})
+
+        if (fastPolicyName is not None and
+                defaultStorageGroupInstanceName is not None):
+            assocTierPolicyInstanceName = (
+                self.fast.add_storage_group_and_verify_tier_policy_assoc(
+                    conn, controllerConfigService,
+                    foundStorageGroupInstanceName,
+                    storageGroupName, fastPolicyName))
+            if assocTierPolicyInstanceName is None:
+                LOG.error(_(
+                    "Cannot add and verify tier policy association for storage"
+                    " group : %(storageGroupName)s to FAST policy : "
+                    "%(fastPolicyName)s. ")
+                    % {'storageGroupName': storageGroupName,
+                       'fastPolicyName': fastPolicyName})
+                return failedRet
+
+        return foundStorageGroupInstanceName
+
+    def _find_port_group(self, conn, controllerConfigService, portGroupName):
+        """Given the port Group name get the port group instance name.
+
+        :param conn: connection to the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param portGroupName: the name of the port group you are getting
+        :returns: foundPortGroup storage group instance name
+        """
+        foundPortGroupInstanceName = None
+        portMaskingGroupInstanceNames = conn.AssociatorNames(
+            controllerConfigService, resultClass='CIM_TargetMaskingGroup')
+
+        for portMaskingGroupInstanceName in portMaskingGroupInstanceNames:
+            instance = conn.GetInstance(
+                portMaskingGroupInstanceName, LocalOnly=False)
+            if portGroupName == instance['ElementName']:
+                foundPortGroupInstanceName = portMaskingGroupInstanceName
+                break
+
+        if foundPortGroupInstanceName is None:
+            LOG.error(_(
+                "Could not find port group : %(portGroupName)s. Check that the"
+                " EMC configuration file has the correct port group name. ")
+                % {'portGroupName': portGroupName})
+
+        return foundPortGroupInstanceName
+
+    def _create_or_get_initiator_group(
+            self, conn, controllerConfigService, igGroupName,
+            connector, storageSystemName):
+        """Attempt to create a initiatorGroup.
+
+        If one already exists with the same Initiator/wwns then get it
+
+        Check to see if an initiatorGroup already exists, that matches the
+        connector information
+        NOTE:  An initiator/wwn can only belong to one initiatorGroup.
+        If we were to attempt to create one with an initiator/wwn that
+        is already belong to another initiatorGroup, it would fail
+
+        :param conn: connection to the ecom server
+        :param controllerConfigService: the controller config Servicer
+        :param igGroupName: the proposed name of the initiator group
+        :param connector: the connector information to the host
+        :param storageSystemName: the storage system name (String)
+        :returns: foundInitiatorGroupInstanceName
+        """
+        failedRet = None
+        initiatorNames = self._find_initiator_names(conn, connector)
+        LOG.debug("The initiator name(s) are: %(initiatorNames)s "
+                  % {'initiatorNames': initiatorNames})
+
+        foundInitiatorGroupInstanceName = self._find_initiator_masking_group(
+            conn, controllerConfigService, initiatorNames)
+
+        # If you cannot find an initiatorGroup that matches the connector
+        # info create a new initiatorGroup
+        if foundInitiatorGroupInstanceName is None:
+            # check that our connector information matches the
+            # hardwareId(s) on the symm
+            storageHardwareIDInstanceNames = (
+                self._get_storage_hardware_id_instance_names(
+                    conn, initiatorNames, storageSystemName))
+            if not storageHardwareIDInstanceNames:
+                LOG.error(_(
+                    "Initiator Name(s) %(initiatorNames)s are not on array "
+                    "%(storageSystemName)s ")
+                    % {'initiatorNames': initiatorNames,
+                       'storageSystemName': storageSystemName})
+                return failedRet
+
+            foundInitiatorGroupInstanceName = self._create_initiator_Group(
+                conn, controllerConfigService, igGroupName,
+                storageHardwareIDInstanceNames)
+
+            LOG.info("Created new initiator group name: %(igGroupName)s "
+                     % {'igGroupName': igGroupName})
+        else:
+            LOG.info("Using existing initiator group name: %(igGroupName)s "
+                     % {'igGroupName': igGroupName})
+
+        return foundInitiatorGroupInstanceName
+
+    def _find_initiator_names(self, conn, connector):
+        """check the connector object for initiators(ISCSI) or wwpns(FC).
+
+        :param conn: the connection to the ecom
+        :param connector: the connector object
+        :returns list foundinitiatornames list of string initiator names
+        """
+        foundinitiatornames = []
+        name = 'initiator name'
+        if (self.protocol.lower() == ISCSI and connector['initiator']):
+            foundinitiatornames.append(connector['initiator'])
+        elif (self.protocol.lower() == FC and connector['wwpns']):
+            for wwn in connector['wwpns']:
+                foundinitiatornames.append(wwn)
+            name = 'world wide port names'
+
+        if (foundinitiatornames is None or len(foundinitiatornames) == 0):
+            msg = (_('Error finding %s.') % name)
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        LOG.debug("Found %(name)s: %(initiator)s."
+                  % {'name': name,
+                     'initiator': foundinitiatornames})
+
+        return foundinitiatornames
+
+    def _find_initiator_masking_group(
+            self, conn, controllerConfigService, initiatorNames):
+        """Check to see if an initiatorGroup already exists.
+
+        NOTE:  An initiator/wwn can only belong to one initiatorGroup.
+        If we were to attempt to create one with an initiator/wwn that is
+        already belong to another initiatorGroup, it would fail
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param initiatorName: the list of initiator names
+        :returns: foundInitiatorMaskingGroup
+        """
+        foundInitiatorMaskingGroupName = None
+
+        initiatorMaskingGroupNames = (
+            conn.AssociatorNames(controllerConfigService,
+                                 ResultClass='CIM_InitiatorMaskingGroup'))
+
+        for initiatorMaskingGroupName in initiatorMaskingGroupNames:
+            initiatorMaskingGroup = conn.GetInstance(
+                initiatorMaskingGroupName, LocalOnly=False)
+            associators = (
+                conn.Associators(initiatorMaskingGroup.path,
+                                 ResultClass='EMC_StorageHardwareID'))
+            for assoc in associators:
+                # if EMC_StorageHardwareID matches the initiator,
+                # we found the existing EMC_LunMaskingSCSIProtocolController
+                # (Storage Group for VNX)
+                # we can use for masking a new LUN
+                hardwareid = assoc['StorageID']
+                for initiator in initiatorNames:
+                    if six.text_type(hardwareid).lower() == \
+                            six.text_type(initiator).lower():
+                        foundInitiatorMaskingGroupName = (
+                            initiatorMaskingGroupName)
+                        break
+
+                if foundInitiatorMaskingGroupName is not None:
+                    break
+
+            if foundInitiatorMaskingGroupName is not None:
+                break
+        return foundInitiatorMaskingGroupName
+
+    def _get_storage_hardware_id_instance_names(
+            self, conn, initiatorNames, storageSystemName):
+        """Given a list of initiator names find CIM_StorageHardwareID instance.
+
+        :param conn: the connection to the ecom server
+        :param initiatorName: the list of initiator names
+        :param storageSystemName: the storage system name
+        :returns: foundHardwardIDsInstanceNames
+        """
+        foundHardwardIDsInstanceNames = []
+
+        hardwareIdManagementService = (
+            self.utils.find_storage_hardwareid_service(
+                conn, storageSystemName))
+
+        hardwareIdInstanceNames = (
+            self.utils.get_hardware_id_instance_names_from_array(
+                conn, hardwareIdManagementService))
+
+        for hardwareIdInstanceName in hardwareIdInstanceNames:
+            hardwareIdInstance = conn.GetInstance(hardwareIdInstanceName)
+            storageId = hardwareIdInstance['StorageID']
+            for initiatorName in initiatorNames:
+                LOG.debug("The storage Id is : %(storageId)s "
+                          % {'storageId': storageId.lower()})
+                LOG.debug("The initiatorName is : %(initiatorName)s "
+                          % {'initiatorName': initiatorName.lower()})
+                if storageId.lower() == initiatorName.lower():
+                    foundHardwardIDsInstanceNames.append(
+                        hardwareIdInstanceName)
+                    break
+
+        LOG.debug(
+            "The found hardware IDs are : %(foundHardwardIDsInstanceNames)s "
+            % {'foundHardwardIDsInstanceNames': foundHardwardIDsInstanceNames})
+
+        return foundHardwardIDsInstanceNames
+
+    def _get_initiator_group_from_job(self, conn, job):
+        """After creating an new intiator group find it and return it
+
+        :param conn: the connection to the ecom server
+        :param job: the create initiator group job
+        :returns: dict initiatorDict
+        """
+        associators = conn.Associators(
+            job['Job'],
+            ResultClass='CIM_InitiatorMaskingGroup')
+        volpath = associators[0].path
+        initiatorDict = {}
+        initiatorDict['classname'] = volpath.classname
+        keys = {}
+        keys['CreationClassName'] = volpath['CreationClassName']
+        keys['SystemName'] = volpath['SystemName']
+        keys['DeviceID'] = volpath['DeviceID']
+        keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
+        initiatorDict['keybindings'] = keys
+        return initiatorDict
+
+    def _create_masking_view(
+            self, conn, configService, maskingViewName, deviceMaskingGroup,
+            targetMaskingGroup, initiatorMaskingGroup):
+        """After creating an new intiator group find it and return it.
+
+        :param conn: the connection to the ecom server
+        :param configService: the create initiator group job
+        :param maskingViewName: the masking view name string
+        :param deviceMaskingGroup: device(storage) masking group (instanceName)
+        :param targetMaskingGroup: target(port) masking group (instanceName)
+        :param initiatorMaskingGroup: initiator masking group (instanceName)
+        :returns: int rc return code
+        :returns: dict job
+        """
+        rc, job = conn.InvokeMethod(
+            'CreateMaskingView', configService, ElementName=maskingViewName,
+            InitiatorMaskingGroup=initiatorMaskingGroup,
+            DeviceMaskingGroup=deviceMaskingGroup,
+            TargetMaskingGroup=targetMaskingGroup)
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Create Masking View: %(groupName)s. "
+                    "Return code: %(rc)lu. Error: %(error)s")
+                    % {'groupName': maskingViewName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        LOG.info(_("Created new masking view : %(maskingViewName)s ")
+                 % {'maskingViewName': maskingViewName})
+        return rc, job
+
+    def find_new_masking_view(self, conn, jobDict):
+        """Find the newly created volume
+
+        :param conn: the connection to the ecom server
+        :param jobDict: the job tuple
+        :returns: instance maskingViewInstance
+        """
+        associators = conn.Associators(
+            jobDict['Job'],
+            ResultClass='Symm_LunMaskingView')
+        mvpath = associators[0].path
+        maskingViewInstance = {}
+        maskingViewInstance['classname'] = mvpath.classname
+        keys = {}
+        keys['CreationClassName'] = mvpath['CreationClassName']
+        keys['SystemName'] = mvpath['SystemName']
+        keys['DeviceID'] = mvpath['DeviceID']
+        keys['SystemCreationClassName'] = mvpath['SystemCreationClassName']
+        maskingViewInstance['keybindings'] = keys
+        return maskingViewInstance
+
+    def _get_storage_group_from_masking_view(
+            self, conn, maskingViewName, storageSystemName):
+        """Gets the Device Masking Group from masking view.
+
+        :param conn: the connection to the ecom server
+        :param maskingViewName: the masking view name (String)
+        :param storageSystemName: storage system name (String)
+        :returns: instance name foundStorageGroupInstanceName
+        """
+        foundStorageGroupInstanceName = None
+        maskingviews = conn.EnumerateInstanceNames(
+            'EMC_LunMaskingSCSIProtocolController')
+        for view in maskingviews:
+            if storageSystemName == view['SystemName']:
+                instance = conn.GetInstance(view, LocalOnly=False)
+                if maskingViewName == instance['ElementName']:
+                    foundView = view
+                    break
+
+        groups = conn.AssociatorNames(
+            foundView,
+            ResultClass='CIM_DeviceMaskingGroup')
+        if groups[0] > 0:
+            foundStorageGroupInstanceName = groups[0]
+
+        LOG.debug("Masking view: %(view)s DeviceMaskingGroup: %(masking)s."
+                  % {'view': maskingViewName,
+                     'masking': foundStorageGroupInstanceName})
+
+        return foundStorageGroupInstanceName
+
+    def _get_storage_group_instance_name(
+            self, conn, controllerConfigService, volumeInstance, volumeName,
+            sgGroupName, fastPolicyName, storageSystemName,
+            defaultStorageGroupInstanceName):
+        """Gets the storage group instance name.
+
+        If fastPolicy name is None
+        then NON FAST is assumed.  If it is a valid fastPolicy name
+        then associate the new storage group with the fast policy.
+        If we are using an existing storage group then we must check that
+        it is associated with the correct fast policy
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller configuration server
+        :param volumeInstance: the volume instance
+        :param volumeName: the volume name (String)
+        :param sgGroupName: the storage group name (String)
+        :param fastPolicyName: the fast policy name (String): can be None
+        :param storageSystemName: the storage system name (String)
+        :param defaultStorageGroupInstanceName: default storage group instance
+                                                name (can be None for Non FAST)
+        :returns: instance name storageGroupInstanceName
+        """
+        storageGroupInstanceName = self.utils.find_storage_masking_group(
+            conn, controllerConfigService, sgGroupName)
+
+        if storageGroupInstanceName is None:
+            storageGroupInstanceName = self._create_storage_group(
+                conn, controllerConfigService, sgGroupName, volumeInstance,
+                fastPolicyName, volumeName, storageSystemName,
+                defaultStorageGroupInstanceName)
+            if storageGroupInstanceName is None:
+                errorMessage = (_(
+                    "Cannot create or find an storage group with name "
+                    "%(sgGroupName)s")
+                    % {'sgGroupName': sgGroupName})
+                LOG.error(errorMessage)
+                raise exception.VolumeBackendAPIException(data=errorMessage)
+        else:
+            if self._is_volume_in_storage_group(
+                    conn, storageGroupInstanceName, volumeInstance):
+                LOG.warn(_("Volume: %(volumeName)s is already "
+                           "part of storage group %(sgGroupName)s ")
+                         % {'volumeName': volumeName,
+                            'sgGroupName': sgGroupName})
+            else:
+                self.add_volume_to_storage_group(
+                    conn, controllerConfigService, storageGroupInstanceName,
+                    volumeInstance, volumeName, sgGroupName, fastPolicyName,
+                    storageSystemName)
+
+        return storageGroupInstanceName
+
+    def _get_port_group_instance_name(
+            self, conn, controllerConfigService, pgGroupName):
+        """Gets the port group instance name.
+
+        The portGroup name has been defined in the EMC Config file if it
+        does not exist the operation should fail
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller configuration server
+        :param pgGroupName: the port group name
+        :returns: instance name foundPortGroupInstanceName
+        """
+        foundPortGroupInstanceName = self._find_port_group(
+            conn, controllerConfigService, pgGroupName)
+        if foundPortGroupInstanceName is None:
+            errorMessage = (_(
+                "Cannot find a portGroup with name %(pgGroupName)s. "
+                "The port group for a masking view must be pre-defined")
+                % {'pgGroupName': pgGroupName})
+            LOG.error(errorMessage)
+            return foundPortGroupInstanceName
+
+        LOG.info(_(
+            "Port group instance name is %(foundPortGroupInstanceName)s")
+            % {'foundPortGroupInstanceName': foundPortGroupInstanceName})
+
+        return foundPortGroupInstanceName
+
+    def _get_initiator_group_instance_name(
+            self, conn, controllerConfigService, igGroupName, connector,
+            storageSystemName):
+        """Gets the initiator group instance name.
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller configuration server
+        :param igGroupName: the port group name
+        :param connector: the connector object
+        :param storageSystemName = the storage system name
+        :returns: instance name foundInitiatorGroupInstanceName
+        """
+        foundInitiatorGroupInstanceName = (self._create_or_get_initiator_group(
+            conn, controllerConfigService, igGroupName, connector,
+            storageSystemName))
+        if foundInitiatorGroupInstanceName is None:
+            errorMessage = (_(
+                "Cannot create or find an initiator group with "
+                "name %(igGroupName)s")
+                % {'igGroupName': igGroupName})
+            LOG.error(errorMessage)
+
+        return foundInitiatorGroupInstanceName
+
+    def _get_masking_view_instance_name(
+            self, conn, controllerConfigService, maskingViewName,
+            storageGroupInstanceName, portGroupInstanceName,
+            initiatorGroupInstanceName):
+        """Gets the masking view instance name
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller configuration server
+        :param maskingViewName: the masking view name (String)
+        :param storageGroupInstanceName: the storage group instance name
+        :param portGroupInstanceName: the port group instance name
+        :param initiatorGroupInstanceName: the initiator group instance name
+        :returns: instance name foundMaskingViewInstanceName
+        """
+        rc, job = self._create_masking_view(
+            conn, controllerConfigService, maskingViewName,
+            storageGroupInstanceName, portGroupInstanceName,
+            initiatorGroupInstanceName)
+        foundMaskingViewInstanceName = self.find_new_masking_view(conn, job)
+        if foundMaskingViewInstanceName is None:
+            errorMessage = (_(
+                "Cannot find the new masking view just created with name "
+                "%(maskingViewName)s")
+                % {'maskingViewName': maskingViewName})
+            LOG.error(errorMessage)
+
+        return foundMaskingViewInstanceName
+
+    def _check_if_rollback_action_for_masking_required(
+            self, conn, controllerConfigService, volumeInstance,
+            volumeName, fastPolicyName, defaultStorageGroupInstanceName):
+        """This is a rollback action for FAST.
+
+        We need to be able to return the volume to the default storage group
+        if anything has gone wrong. The volume can also potentially belong to
+        a storage group that is not the default depending on where
+        the exception occurred.
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller config service
+        :param volumeInstanceName: the volume instance name
+        :param volumeName: the volume name (String)
+        :param fastPolicyName: the fast policy name (String)
+        :param defaultStorageGroupInstanceName: the default storage group
+                                          instance name
+        """
+        try:
+            foundStorageGroupInstanceName = (
+                self.utils.get_storage_group_from_volume(
+                    conn, volumeInstance.path))
+            # volume is not associated with any storage group so add it back
+            # to the default
+            if len(foundStorageGroupInstanceName) == 0:
+                infoMessage = (_(
+                    "Performing rollback on Volume: %(volumeName)s "
+                    "To return it to the default storage group for FAST policy"
+                    " %(fastPolicyName)s. ")
+                    % {'volumeName': volumeName,
+                       'fastPolicyName': fastPolicyName})
+                LOG.warn("No storage group found. " + infoMessage)
+                assocDefaultStorageGroupName = (
+                    self.fast
+                    .add_volume_to_default_storage_group_for_fast_policy(
+                        conn, controllerConfigService, volumeInstance,
+                        volumeName, fastPolicyName))
+                if assocDefaultStorageGroupName is None:
+                    errorMsg = (_(
+                        "Failed to Roll back to re-add volume %(volumeName)s "
+                        "to default storage group for fast policy "
+                        "%(fastPolicyName)s: Please contact your sys admin to "
+                        "get the volume re-added manually ")
+                        % {'volumeName': volumeName,
+                           'fastPolicyName': fastPolicyName})
+                    LOG.error(errorMsg)
+            if len(foundStorageGroupInstanceName) > 0:
+                errorMsg = (_(
+                    "The storage group found is "
+                    "%(foundStorageGroupInstanceName)s: ")
+                    % {'foundStorageGroupInstanceName':
+                        foundStorageGroupInstanceName})
+                LOG.info(errorMsg)
+
+                # check the name see is it the default storage group or another
+                if (foundStorageGroupInstanceName !=
+                        defaultStorageGroupInstanceName):
+                    # remove it from its current masking view and return it
+                    # to its default masking view if fast is enabled
+                    self.remove_and_reset_members(
+                        conn, controllerConfigService, volumeInstance,
+                        fastPolicyName, volumeName)
+        except Exception as e:
+            LOG.error(_("Exception: %s") % six.text_type(e))
+            errorMessage = (_(
+                "Rollback for Volume: %(volumeName)s has failed. "
+                "Please contact your system administrator to manually return "
+                "your volume to the default storage group for fast policy "
+                "%(fastPolicyName)s failed ")
+                % {'volumeName': volumeName,
+                   'fastPolicyName': fastPolicyName})
+            LOG.error(errorMessage)
+            raise exception.VolumeBackendAPIException(data=errorMessage)
+
+    def _find_new_initiator_group(self, conn, maskingGroupDict):
+        """After creating an new initiator group find it and return it.
+
+        :param conn: connection the ecom server
+        :param maskingGroupDict: the maskingGroupDict dict
+        :param storageGroupName: storage group name (String)
+        :returns: instance name foundInitiatorGroupInstanceName
+        """
+        foundInitiatorGroupInstanceName = None
+
+        if 'MaskingGroup' in maskingGroupDict:
+            foundInitiatorGroupInstanceName = maskingGroupDict['MaskingGroup']
+
+        return foundInitiatorGroupInstanceName
+
+    def _get_initiator_group_from_masking_view(
+            self, conn, maskingViewName, storageSystemName):
+        """Given the masking view name get the inititator group from it.
+
+        :param conn: connection the the ecom server
+        :param maskingViewName: the name of the masking view
+        :param storageSystemName: the storage system name
+        :returns: instance name foundInitiatorMaskingGroupInstanceName
+        """
+        foundInitiatorMaskingGroupInstanceName = None
+
+        maskingviews = conn.EnumerateInstanceNames(
+            'EMC_LunMaskingSCSIProtocolController')
+        for view in maskingviews:
+            if storageSystemName == view['SystemName']:
+                instance = conn.GetInstance(view, LocalOnly=False)
+                if maskingViewName == instance['ElementName']:
+                    foundView = view
+                    break
+
+        groups = conn.AssociatorNames(
+            foundView,
+            ResultClass='CIM_InitiatorMaskingGroup')
+        if len(groups):
+            foundInitiatorMaskingGroupInstanceName = groups[0]
+
+        LOG.debug(
+            "Masking view: %(view)s InitiatorMaskingGroup: %(masking)s."
+            % {'view': maskingViewName,
+               'masking': foundInitiatorMaskingGroupInstanceName})
+
+        return foundInitiatorMaskingGroupInstanceName
+
+    def _verify_initiator_group_from_masking_view(
+            self, conn, controllerConfigService, maskingViewName, connector,
+            storageSystemName, igGroupName):
+        """Check that the initiator group contains the correct initiators.
+
+        If using an existing masking view check that the initiator group
+        contains the correct initiators.  If it does not contain the correct
+        initiators then we delete the initiator group from the masking view,
+        re-create it with the correct initiators and add it to the masking view
+        NOTE:  EMC does not support ModifyMaskingView so we must first
+               delete the masking view and recreate it.
+
+        :param conn: connection the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param maskingViewName: maskingview name (String)
+        :param connector: the connector dict
+        :param storageSystemName: the storage System Name (string)
+        :param igGroupName: the initiator group name (String)
+        """
+        initiatorNames = self._find_initiator_names(conn, connector)
+        foundInitiatorGroupFromConnector = self._find_initiator_masking_group(
+            conn, controllerConfigService, initiatorNames)
+
+        foundInitiatorGroupFromMaskingView = (
+            self._get_initiator_group_from_masking_view(
+                conn, maskingViewName, storageSystemName))
+
+        if (foundInitiatorGroupFromConnector !=
+                foundInitiatorGroupFromMaskingView):
+            if foundInitiatorGroupFromMaskingView is not None:
+                maskingViewInstanceName = self._find_masking_view(
+                    conn, maskingViewName, storageSystemName)
+                if foundInitiatorGroupFromConnector is None:
+                    storageHardwareIDInstanceNames = (
+                        self._get_storage_hardware_id_instance_names(
+                            conn, initiatorNames, storageSystemName))
+                    if not storageHardwareIDInstanceNames:
+                        LOG.error(_(
+                            "Initiator Name(s) %(initiatorNames)s are not on "
+                            "array %(storageSystemName)s ")
+                            % {'initiatorNames': initiatorNames,
+                               'storageSystemName': storageSystemName})
+                        return False
+
+                    foundInitiatorGroupFromConnector = (
+                        self._create_initiator_Group(
+                            conn, controllerConfigService, igGroupName,
+                            storageHardwareIDInstanceNames))
+                storageGroupInstanceName = (
+                    self._get_storage_group_from_masking_view(
+                        conn, maskingViewName, storageSystemName))
+                portGroupInstanceName = self._get_port_group_from_masking_view(
+                    conn, maskingViewName, storageSystemName)
+                if (foundInitiatorGroupFromConnector is not None and
+                        storageGroupInstanceName is not None and
+                        portGroupInstanceName is not None):
+                    self._delete_masking_view(
+                        conn, controllerConfigService, maskingViewName,
+                        maskingViewInstanceName)
+                    newMaskingViewInstanceName = (
+                        self._get_masking_view_instance_name(
+                            conn, controllerConfigService, maskingViewName,
+                            storageGroupInstanceName, portGroupInstanceName,
+                            foundInitiatorGroupFromConnector))
+                    if newMaskingViewInstanceName is not None:
+                        LOG.debug(
+                            "The old masking view has been replaced: "
+                            "%(maskingViewName)s.  "
+                            % {'maskingViewName': maskingViewName})
+                else:
+                    LOG.error(_(
+                        "One of the components of the original masking view "
+                        "%(maskingViewName)s cannot be retrieved so "
+                        "please contact your system administrator to check "
+                        "that the correct initiator(s) are part of masking ")
+                        % {'maskingViewName': maskingViewName})
+                    return False
+        return True
+
+    def _create_initiator_Group(
+            self, conn, controllerConfigService, igGroupName,
+            hardwareIdinstanceNames):
+        """Create a new initiator group
+
+        Given a list of hardwareId Instance name create a new
+        initiator group
+
+        :param conn: connection the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param igGroupName: the initiator group name (String)
+        :param hardwareIdinstanceNames: one or more hardware id instance names
+        """
+        rc, job = conn.InvokeMethod(
+            'CreateGroup', controllerConfigService, GroupName=igGroupName,
+            Type=self.utils.get_num(INITIATORGROUPTYPE, '16'),
+            Members=[hardwareIdinstanceNames[0]])
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Create Group: %(groupName)s.  "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'groupName': igGroupName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        foundInitiatorGroupInstanceName = self._find_new_initiator_group(
+            conn, job)
+
+        numHardwareIDInstanceNames = len(hardwareIdinstanceNames)
+        if numHardwareIDInstanceNames > 1:
+            for j in range(1, numHardwareIDInstanceNames):
+                rc, job = conn.InvokeMethod(
+                    'AddMembers', controllerConfigService,
+                    MaskingGroup=foundInitiatorGroupInstanceName,
+                    Members=[hardwareIdinstanceNames[j]])
+
+                if rc != 0L:
+                    rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+                    if rc != 0L:
+                        exceptionMessage = (_(
+                            "Error adding initiator to group : %(groupName)s. "
+                            "Return code: %(rc)lu.  Error: %(error)s")
+                            % {'groupName': igGroupName,
+                               'rc': rc,
+                               'error': errordesc})
+                        LOG.error(exceptionMessage)
+                        raise exception.VolumeBackendAPIException(
+                            data=exceptionMessage)
+                j = j + 1
+
+        return foundInitiatorGroupInstanceName
+
+    def _get_port_group_from_masking_view(
+            self, conn, maskingViewName, storageSystemName):
+        """Given the masking view name get the port group from it
+
+        :param conn: connection the the ecom server
+        :param maskingViewName: the name of the masking view
+        :param storageSystemName: the storage system name
+        :returns: instance name foundPortMaskingGroupInstanceName
+        """
+        foundPortMaskingGroupInstanceName = None
+
+        maskingviews = conn.EnumerateInstanceNames(
+            'EMC_LunMaskingSCSIProtocolController')
+        for view in maskingviews:
+            if storageSystemName == view['SystemName']:
+                instance = conn.GetInstance(view, LocalOnly=False)
+                if maskingViewName == instance['ElementName']:
+                    foundView = view
+                    break
+
+        groups = conn.AssociatorNames(
+            foundView,
+            ResultClass='CIM_TargetMaskingGroup')
+        if len(groups) > 0:
+            foundPortMaskingGroupInstanceName = groups[0]
+
+        LOG.debug(
+            "Masking view: %(view)s InitiatorMaskingGroup: %(masking)s."
+            % {'view': maskingViewName,
+               'masking': foundPortMaskingGroupInstanceName})
+
+        return foundPortMaskingGroupInstanceName
+
+    def _delete_masking_view(
+            self, conn, controllerConfigService, maskingViewName,
+            maskingViewInstanceName):
+        """Delete a masking view
+
+        :param conn: connection the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param maskingViewName: maskingview name (String)
+        :param maskingViewInstanceName: the masking view instance name
+        """
+        rc, job = conn.InvokeMethod('DeleteMaskingView',
+                                    controllerConfigService,
+                                    ProtocolController=maskingViewInstanceName)
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Modifying masking view : %(groupName)s. "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'groupName': maskingViewName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+    def get_masking_view_from_storage_group(
+            self, conn, storageGroupInstanceName):
+        """Get the associated maskingview instance name
+
+        Given storage group instance name, get the associated masking
+        view instance name
+
+        :param conn: connection the ecom server
+        :param storageGroupInstanceName: the storage group instance name
+        :returns: instance name foundMaskingViewInstanceName
+        """
+        foundMaskingViewInstanceName = None
+        maskingViews = conn.AssociatorNames(
+            storageGroupInstanceName,
+            ResultClass='Symm_LunMaskingView')
+        if len(maskingViews) > 0:
+            foundMaskingViewInstanceName = maskingViews[0]
+
+        return foundMaskingViewInstanceName
+
+    def add_volume_to_storage_group(
+            self, conn, controllerConfigService, storageGroupInstanceName,
+            volumeInstance, volumeName, sgGroupName, fastPolicyName,
+            storageSystemName=None):
+        """Add a volume to an existing storage group
+
+        :param conn: connection to ecom server
+        :param controllerConfigService: the controller configuration service
+        :param storageGroup: storage group instance
+        :param volumeInstance: the volume instance
+        :param volumeName: the name of the volume (String)
+        :param sgGroupName: the name of the storage group (String)
+        :param fastPolicyName: the fast policy name (String) can be None
+        :param storageSystemName: the storage system name (Optional Parameter),
+                            if None plain operation assumed
+        :returns: int rc the return code of the job
+        :returns: dict the job dict
+        """
+        self.provision.add_members_to_masking_group(
+            conn, controllerConfigService, storageGroupInstanceName,
+            volumeInstance.path, volumeName)
+
+        infoMessage = (_(
+            "Added volume: %(volumeName)s to existing storage group "
+            "%(sgGroupName)s. ")
+            % {'volumeName': volumeName,
+               'sgGroupName': sgGroupName})
+        LOG.info(infoMessage)
+
+    def remove_device_from_default_storage_group(
+            self, conn, controllerConfigService, volumeInstanceName,
+            volumeName, fastPolicyName):
+        """Remove the volume from the default storage group.
+
+        Remove the volume from the default storage group for the FAST
+        policy and return the default storage group instance name
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller config service
+        :param volumeInstanceName: the volume instance name
+        :param volumeName: the volume name (String)
+        :param fastPolicyName: the fast policy name (String)
+        :returns: instance name defaultStorageGroupInstanceName
+        """
+        failedRet = None
+        defaultStorageGroupInstanceName = (
+            self.fast.get_and_verify_default_storage_group(
+                conn, controllerConfigService, volumeInstanceName,
+                volumeName, fastPolicyName))
+
+        if defaultStorageGroupInstanceName is None:
+            errorMessage = (_(
+                "Volume %(volumeName)s was not first part of the default "
+                "storage group for the FAST Policy")
+                % {'volumeName': volumeName})
+            LOG.warn(errorMessage)
+            return failedRet
+
+        assocVolumeInstanceNames = self.get_devices_from_storage_group(
+            conn, defaultStorageGroupInstanceName)
+
+        LOG.debug(
+            "There are %(length)lu associated with the default storage group "
+            "for fast before removing volume %(volumeName)s"
+            % {'length': len(assocVolumeInstanceNames),
+               'volumeName': volumeName})
+
+        self.provision.remove_device_from_storage_group(
+            conn, controllerConfigService, defaultStorageGroupInstanceName,
+            volumeInstanceName, volumeName)
+
+        assocVolumeInstanceNames = self.get_devices_from_storage_group(
+            conn, defaultStorageGroupInstanceName)
+        LOG.debug(
+            "There are %(length)lu associated with the default storage group "
+            "for fast after removing volume %(volumeName)s"
+            % {'length': len(assocVolumeInstanceNames),
+               'volumeName': volumeName})
+
+        # required for unit tests
+        emptyStorageGroupInstanceName = (
+            self._wrap_get_storage_group_from_volume(conn, volumeInstanceName))
+
+        if emptyStorageGroupInstanceName is not None:
+            errorMessage = (_(
+                "Failed to remove %(volumeName)s from the default storage "
+                "group for the FAST Policy")
+                % {'volumeName': volumeName})
+            LOG.error(errorMessage)
+            return failedRet
+
+        return defaultStorageGroupInstanceName
+
+    def _wrap_get_storage_group_from_volume(self, conn, volumeInstanceName):
+
+        """Wrapper for get_storage_group_from_volume.
+
+        Needed for override in tests
+
+        :param conn: the connection to the ecom server
+        :param volumeInstanceName: the volume instance name
+        :returns: emptyStorageGroupInstanceName
+        """
+        return self.utils.get_storage_group_from_volume(
+            conn, volumeInstanceName)
+
+    def get_devices_from_storage_group(
+            self, conn, storageGroupInstanceName):
+        """Get the associated volume Instance names
+
+        Given the storage group instance name get the associated volume
+        Instance names
+
+        :param conn: connection the the ecom server
+        :param storageGroupInstanceName: the storage group instance name
+        :returns: list volumeInstanceNames list of volume instance names
+        """
+        volumeInstanceNames = conn.AssociatorNames(
+            storageGroupInstanceName,
+            ResultClass='EMC_StorageVolume')
+
+        return volumeInstanceNames
+
+    def get_associated_masking_group_from_device(
+            self, conn, volumeInstanceName):
+        maskingGroupInstanceNames = conn.AssociatorNames(
+            volumeInstanceName,
+            ResultClass='CIM_DeviceMaskingGroup',
+            AssocClass='CIM_OrderedMemberOfCollection')
+        if len(maskingGroupInstanceNames) > 0:
+            return maskingGroupInstanceNames[0]
+        else:
+            return None
+
+    def remove_and_reset_members(
+            self, conn, controllerConfigService, volumeInstance,
+            fastPolicyName, volumeName):
+        """Part of unmap device or rollback.
+
+        Removes volume from the Device Masking Group that belongs to a
+        Masking View. Check if fast policy is in the extra specs, if it isn't
+        we do not need to do any thing for FAST. Assume that
+        isTieringPolicySupported is False unless the FAST policy is in
+        the extra specs and tiering is enabled on the array
+
+        :param conn: connection the the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param volumeInstance: the volume Instance
+        :param fastPolicyName: the fast policy name (if it exists)
+        :param volumeName: the volume name
+        :returns: list volumeInstanceNames list of volume instance names
+        """
+        rc = -1
+        maskingGroupInstanceName = (
+            self.get_associated_masking_group_from_device(
+                conn, volumeInstance.path))
+
+        volumeInstanceNames = self.get_devices_from_storage_group(
+            conn, maskingGroupInstanceName)
+        storageSystemInstanceName = self.utils.find_storage_system(
+            conn, controllerConfigService)
+
+        isTieringPolicySupported = False
+        if fastPolicyName is not None:
+            tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
+                conn, storageSystemInstanceName)
+
+            isTieringPolicySupported = self.fast.is_tiering_policy_enabled(
+                conn, tierPolicyServiceInstanceName)
+            LOG.debug(
+                "FAST policy enabled on %(storageSystem)s: %(isSupported)s"
+                % {'storageSystem': storageSystemInstanceName,
+                   'isSupported': isTieringPolicySupported})
+
+        numVolInMaskingView = len(volumeInstanceNames)
+        LOG.debug(
+            "There are %(numVol)d volumes in the masking view %(maskingGroup)s"
+            % {'numVol': numVolInMaskingView,
+               'maskingGroup': maskingGroupInstanceName})
+
+        if numVolInMaskingView == 1:  # last volume in the storage group
+            # delete masking view
+            mvInstanceName = self.get_masking_view_from_storage_group(
+                conn, maskingGroupInstanceName)
+            LOG.debug(
+                "Last volume in the storage group, deleting masking view "
+                "%(mvInstanceName)s"
+                % {'mvInstanceName': mvInstanceName})
+            conn.DeleteInstance(mvInstanceName)
+
+            # disassociate storage group from FAST policy
+            if fastPolicyName is not None and isTieringPolicySupported is True:
+                tierPolicyInstanceName = self.fast.get_tier_policy_by_name(
+                    conn, storageSystemInstanceName['Name'], fastPolicyName)
+
+                LOG.info(_(
+                    "policy:%(policy)s, policy service:%(service)s, "
+                    "masking group=%(maskingGroup)s")
+                    % {'policy': tierPolicyInstanceName,
+                       'service': tierPolicyServiceInstanceName,
+                       'maskingGroup': maskingGroupInstanceName})
+
+                self.fast.delete_storage_group_from_tier_policy_rule(
+                    conn, tierPolicyServiceInstanceName,
+                    maskingGroupInstanceName, tierPolicyInstanceName)
+
+            rc = self.provision.remove_device_from_storage_group(
+                conn, controllerConfigService, maskingGroupInstanceName,
+                volumeInstance.path, volumeName)
+
+            LOG.debug(
+                "Remove the last volume %(volumeName)s completed successfully."
+                % {'volumeName': volumeName})
+
+            # Delete storage group
+            conn.DeleteInstance(maskingGroupInstanceName)
+
+            if isTieringPolicySupported:
+                self._cleanup_tiering(
+                    conn, controllerConfigService, fastPolicyName,
+                    volumeInstance, volumeName)
+        else:
+            # not the last volume
+            LOG.debug("start: number of volumes in masking storage group: "
+                      "%(numVol)d" % {'numVol': len(volumeInstanceNames)})
+            rc = self.provision.remove_device_from_storage_group(
+                conn, controllerConfigService, maskingGroupInstanceName,
+                volumeInstance.path, volumeName)
+
+            LOG.debug(
+                "RemoveMembers for volume %(volumeName)s completed "
+                "successfully." % {'volumeName': volumeName})
+
+            # if FAST POLICY enabled, move the volume to the default SG
+            if fastPolicyName is not None and isTieringPolicySupported:
+                self._cleanup_tiering(
+                    conn, controllerConfigService, fastPolicyName,
+                    volumeInstance, volumeName)
+
+            # validation
+            volumeInstanceNames = self.get_devices_from_storage_group(
+                conn, maskingGroupInstanceName)
+            LOG.debug(
+                "end: number of volumes in masking storage group: %(numVol)d"
+                % {'numVol': len(volumeInstanceNames)})
+
+        return rc
+
+    def _cleanup_tiering(
+            self, conn, controllerConfigService, fastPolicyName,
+            volumeInstance, volumeName):
+        """Cleanup tiering
+
+        :param conn: the ecom connection
+        :param controllerConfigService: the controller configuration service
+        :param fastPolicyName: the fast policy name
+        :param volumeInstance: volume instance
+        :param volumeName: the volume name
+        """
+        defaultStorageGroupInstanceName = (
+            self.fast.get_policy_default_storage_group(
+                conn, controllerConfigService, fastPolicyName))
+        volumeInstanceNames = self.get_devices_from_storage_group(
+            conn, defaultStorageGroupInstanceName)
+        LOG.debug(
+            "start: number of volumes in default storage group: %(numVol)d"
+            % {'numVol': len(volumeInstanceNames)})
+        defaultStorageGroupInstanceName = (
+            self.fast.add_volume_to_default_storage_group_for_fast_policy(
+                conn, controllerConfigService, volumeInstance, volumeName,
+                fastPolicyName))
+        # check default storage group number of volumes
+        volumeInstanceNames = self.get_devices_from_storage_group(
+            conn, defaultStorageGroupInstanceName)
+        LOG.debug(
+            "end: number of volumes in default storage group: %(numVol)d"
+            % {'numVol': len(volumeInstanceNames)})
diff --git a/cinder/volume/drivers/emc/emc_vmax_provision.py b/cinder/volume/drivers/emc/emc_vmax_provision.py
new file mode 100644 (file)
index 0000000..9d9ffa1
--- /dev/null
@@ -0,0 +1,649 @@
+# Copyright (c) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.emc import emc_vmax_utils
+
+LOG = logging.getLogger(__name__)
+
+STORAGEGROUPTYPE = 4
+POSTGROUPTYPE = 3
+
+EMC_ROOT = 'root/emc'
+THINPROVISIONINGCOMPOSITE = 32768
+THINPROVISIONING = 5
+
+
+class EMCVMAXProvision(object):
+    """Provisioning Class for SMI-S based EMC volume drivers.
+
+    This Provisioning class is for EMC volume drivers based on SMI-S.
+    It supports VMAX arrays.
+    """
+    def __init__(self, prtcl):
+        self.protocol = prtcl
+        self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
+
+    def delete_volume_from_pool(
+            self, conn, storageConfigservice, volumeInstanceName, volumeName):
+        """Given the volume instance remove it from the pool.
+
+        :param conn: connection the the ecom server
+        :param storageConfigservice: volume created from job
+        :param volumeInstanceName: the volume instance name
+        :param volumeName: the volume name (String)
+        :param rc: return code
+        """
+        rc, job = conn.InvokeMethod(
+            'EMCReturnToStoragePool', storageConfigservice,
+            TheElements=[volumeInstanceName])
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Delete Volume: %(volumeName)s.  "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'volumeName': volumeName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        return rc
+
+    def create_volume_from_pool(
+            self, conn, storageConfigService, volumeName,
+            poolInstanceName, volumeSize):
+        """Create the volume in the specified pool.
+
+        :param conn: the connection information to the ecom server
+        :param storageConfigService: the storage configuration service
+        :param volumeName: the volume name (String)
+        :param poolInstanceName: the pool instance name to create
+                                 the dummy volume in
+        :param volumeSize: volume size (String)
+        :returns: volumeDict - the volume dict
+        """
+        rc, job = conn.InvokeMethod(
+            'CreateOrModifyElementFromStoragePool',
+            storageConfigService, ElementName=volumeName,
+            InPool=poolInstanceName,
+            ElementType=self.utils.get_num(THINPROVISIONING, '16'),
+            Size=self.utils.get_num(volumeSize, '64'),
+            EMCBindElements=False)
+
+        LOG.debug("Create Volume: %(volumename)s  Return code: %(rc)lu"
+                  % {'volumename': volumeName,
+                     'rc': rc})
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Create Volume: %(volumeName)s.  "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'volumeName': volumeName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        # Find the newly created volume
+        volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
+
+        return volumeDict, rc
+
+    def create_and_get_storage_group(self, conn, controllerConfigService,
+                                     storageGroupName, volumeInstanceName):
+        """Create a storage group and return it.
+
+        :param conn: the connection information to the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param storageGroupName: the storage group name (String
+        :param volumeInstanceName: the volume instance name
+        :returns: foundStorageGroupInstanceName - instance name of the
+                                                  default storage group
+        """
+        rc, job = conn.InvokeMethod(
+            'CreateGroup', controllerConfigService, GroupName=storageGroupName,
+            Type=self.utils.get_num(STORAGEGROUPTYPE, '16'),
+            Members=[volumeInstanceName])
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Create Group: %(groupName)s.  "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'groupName': storageGroupName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        foundStorageGroupInstanceName = self._find_new_storage_group(
+            conn, job, storageGroupName)
+
+        return foundStorageGroupInstanceName
+
+    def create_storage_group_no_members(
+            self, conn, controllerConfigService, groupName):
+        """Create a new storage group that has no members.
+
+        :param conn: connection the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param groupName: the proposed group name
+        :returns: foundStorageGroupInstanceName - the instance Name of
+                                                  the storage group
+        """
+        rc, job = conn.InvokeMethod(
+            'CreateGroup', controllerConfigService, GroupName=groupName,
+            Type=self.utils.get_num(STORAGEGROUPTYPE, '16'),
+            DeleteWhenBecomesUnassociated=False)
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Create Group: %(groupName)s.  "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'groupName': groupName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        foundStorageGroupInstanceName = self._find_new_storage_group(
+            conn, job, groupName)
+
+        return foundStorageGroupInstanceName
+
+    def _find_new_storage_group(
+            self, conn, maskingGroupDict, storageGroupName):
+        """After creating an new storage group find it and return it.
+
+        :param conn: connection the ecom server
+        :param maskingGroupDict: the maskingGroupDict dict
+        :param storageGroupName: storage group name (String)
+        :returns: maskingGroupDict['MaskingGroup']
+        """
+        foundStorageGroupInstanceName = None
+        if 'MaskingGroup' in maskingGroupDict:
+            foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
+
+        return foundStorageGroupInstanceName
+
+    def get_volume_dict_from_job(self, conn, jobInstance):
+        """Given the jobInstance determine the volume Instance.
+
+        :param conn: the ecom connection
+        :param jobInstance: the instance of a job
+        :returns: volumeDict - an instance of a volume
+        """
+        associators = conn.Associators(
+            jobInstance,
+            ResultClass='EMC_StorageVolume')
+        volpath = associators[0].path
+        volumeDict = {}
+        volumeDict['classname'] = volpath.classname
+        keys = {}
+        keys['CreationClassName'] = volpath['CreationClassName']
+        keys['SystemName'] = volpath['SystemName']
+        keys['DeviceID'] = volpath['DeviceID']
+        keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
+        volumeDict['keybindings'] = keys
+
+        return volumeDict
+
+    def remove_device_from_storage_group(
+            self, conn, controllerConfigService, storageGroupInstanceName,
+            volumeInstanceName, volumeName):
+        """Remove a volume from a storage group.
+
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param storageGroupInstanceName: the instance name of the storage group
+        :param volumeInstanceName: the instance name of the volume
+        :param volumeName: the volume name (String)
+        :returns: rc - the return code of the job
+        """
+        rc, jobDict = conn.InvokeMethod('RemoveMembers',
+                                        controllerConfigService,
+                                        MaskingGroup=storageGroupInstanceName,
+                                        Members=[volumeInstanceName])
+        if rc != 0L:
+            rc, errorDesc = self.utils.wait_for_job_complete(conn, jobDict)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error removing volume %(vol)s. %(error)s")
+                    % {'vol': volumeName, 'error': errorDesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        return rc
+
+    def add_members_to_masking_group(
+            self, conn, controllerConfigService, storageGroupInstanceName,
+            volumeInstanceName, volumeName):
+        """Add a member to a masking group group.
+        :param conn: the connection to the ecom server
+        :param controllerConfigService: the controller configuration service
+        :param storageGroupInstanceName: the instance name of the storage group
+        :param volumeInstanceName: the instance name of the volume
+        :param volumeName: the volume name (String)
+        """
+        rc, job = conn.InvokeMethod(
+            'AddMembers', controllerConfigService,
+            MaskingGroup=storageGroupInstanceName,
+            Members=[volumeInstanceName])
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error mapping volume %(vol)s. %(error)s")
+                    % {'vol': volumeName, 'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+    def unbind_volume_from_storage_pool(
+            self, conn, storageConfigService, poolInstanceName,
+            volumeInstanceName, volumeName):
+        """Unbind a volume from a pool and return the unbound volume.
+
+        :param conn: the connection information to the ecom server
+        :param storageConfigService: the storage configuration service
+                                     instance name
+        :param poolInstanceName: the pool instance name
+        :param volumeInstanceName: the volume instance name
+        :param volumeName: the volume name
+        :returns: unboundVolumeInstance - the unbound volume instance
+        """
+        rc, job = conn.InvokeMethod(
+            'EMCUnBindElement',
+            storageConfigService,
+            InPool=poolInstanceName,
+            TheElement=volumeInstanceName)
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error unbinding volume %(vol)s from pool. %(error)s")
+                    % {'vol': volumeName, 'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        return rc, job
+
+    def modify_composite_volume(
+            self, conn, elementCompositionService, theVolumeInstanceName,
+            inVolumeInstanceName):
+
+        """Given a composite volume add a storage volume to it.
+
+        :param conn: the connection to the ecom
+        :param elementCompositionService: the element composition service
+        :param theVolumeInstanceName: the existing composite volume
+        :param inVolumeInstanceName: the volume you wish to add to the
+                                     composite volume
+        :returns: rc - return code
+        :returns: job - job
+        """
+        rc, job = conn.InvokeMethod(
+            'CreateOrModifyCompositeElement',
+            elementCompositionService,
+            TheElement=theVolumeInstanceName,
+            InElements=[inVolumeInstanceName])
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error adding volume to composite volume. "
+                    "Error is: %(error)s")
+                    % {'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        return rc, job
+
+    def create_composite_volume(
+            self, conn, elementCompositionService, volumeSize, volumeName,
+            poolInstanceName, compositeType, numMembers):
+        """Create a new volume using the auto meta feature.
+
+        :param conn: the connection the the ecom server
+        :param elementCompositionService: the element composition service
+        :param volumeSize: the size of the volume
+        :param volumeName: user friendly name
+        :param poolInstanceName: the pool to bind the composite volume to
+        :param compositeType: the proposed composite type of the volume
+                              e.g striped/concatenated
+        :param numMembers: the number of meta members to make up the composite.
+                           If it is 1 then a non composite is created
+        :returns: rc
+        :returns: errordesc
+        """
+        newMembers = 2
+
+        LOG.debug(
+            "Parameters for CreateOrModifyCompositeElement: "
+            "elementCompositionService: %(elementCompositionService)s  "
+            "provisioning: %(provisioning)lu "
+            "volumeSize: %(volumeSize)s "
+            "newMembers: %(newMembers)lu "
+            "poolInstanceName: %(poolInstanceName)s "
+            "compositeType: %(compositeType)lu "
+            "numMembers: %(numMembers)s "
+            % {'elementCompositionService': elementCompositionService,
+               'provisioning': THINPROVISIONINGCOMPOSITE,
+               'volumeSize': volumeSize,
+               'newMembers': newMembers,
+               'poolInstanceName': poolInstanceName,
+               'compositeType': compositeType,
+               'numMembers': numMembers})
+
+        rc, job = conn.InvokeMethod(
+            'CreateOrModifyCompositeElement', elementCompositionService,
+            ElementType=self.utils.get_num(THINPROVISIONINGCOMPOSITE, '16'),
+            Size=self.utils.get_num(volumeSize, '64'),
+            ElementSource=self.utils.get_num(newMembers, '16'),
+            EMCInPools=[poolInstanceName],
+            CompositeType=self.utils.get_num(compositeType, '16'),
+            EMCNumberOfMembers=self.utils.get_num(numMembers, '32'))
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Create Volume: %(volumename)s.  "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'volumename': volumeName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        # Find the newly created volume
+        volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
+
+        return volumeDict, rc
+
+    def create_new_composite_volume(
+            self, conn, elementCompositionService, compositeHeadInstanceName,
+            compositeMemberInstanceName, compositeType):
+        """Creates a new composite volume.
+
+        Given a bound composite head and an unbound composite member
+        create a new composite volume.
+
+        :param conn: the connection the the ecom server
+        :param elementCompositionService: the element composition service
+        :param compositeHeadInstanceName: the composite head. This can be bound
+        :param compositeMemberInstanceName: the composite member.
+                                            This must be unbound
+        :param compositeType: the composite type e.g striped or concatenated
+        :returns: rc - return code
+        :returns: errordesc - descriptions of the error
+        """
+        rc, job = conn.InvokeMethod(
+            'CreateOrModifyCompositeElement', elementCompositionService,
+            ElementType=self.utils.get_num('2', '16'),
+            InElements=(
+                [compositeHeadInstanceName, compositeMemberInstanceName]),
+            CompositeType=self.utils.get_num(compositeType, '16'))
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Creating new composite Volume Return code: %(rc)lu."
+                    "Error: %(error)s")
+                    % {'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        return rc, job
+
+    def _migrate_volume(
+            self, conn, storageRelocationServiceInstanceName,
+            volumeInstanceName, targetPoolInstanceName):
+        """Migrate a volume to another pool.
+
+        :param conn: the connection to the ecom server
+        :param storageRelocationServiceInstanceName: the storage relocation
+                                                     service
+        :param volumeInstanceName: the volume to be migrated
+        :param targetPoolInstanceName: the target pool to migrate the volume to
+        :returns: rc - return code
+        """
+        rc, job = conn.InvokeMethod(
+            'RelocateStorageVolumesToStoragePool',
+            storageRelocationServiceInstanceName,
+            TheElements=[volumeInstanceName],
+            TargetPool=targetPoolInstanceName)
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Migrating volume from one pool to another. "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        return rc
+
+    def migrate_volume_to_storage_pool(
+            self, conn, storageRelocationServiceInstanceName,
+            volumeInstanceName, targetPoolInstanceName):
+        """Given the storage system name, get the storage relocation service.
+
+        :param conn: the connection to the ecom server
+        :param storageRelocationServiceInstanceName: the storage relocation
+                                                     service
+        :param volumeInstanceName: the volume to be migrated
+        :param targetPoolInstanceName: the target pool to migrate the
+                                       volume to.
+        :returns: rc
+        """
+        LOG.debug(
+            "Volume instance name is %(volumeInstanceName)s. "
+            "Pool instance name is : %(targetPoolInstanceName)s. "
+            % {'volumeInstanceName': volumeInstanceName,
+               'targetPoolInstanceName': targetPoolInstanceName})
+        rc = -1
+        try:
+            rc = self._migrate_volume(
+                conn, storageRelocationServiceInstanceName,
+                volumeInstanceName, targetPoolInstanceName)
+        except Exception as ex:
+            if 'source of a migration session' in six.text_type(ex):
+                try:
+                    rc = self._terminate_migrate_session(
+                        conn, volumeInstanceName)
+                except Exception as ex:
+                    LOG.error(_("Exception: %s") % six.text_type(ex))
+                    exceptionMessage = (_(
+                        "Failed to terminate migrate session"))
+                    LOG.error(exceptionMessage)
+                    raise exception.VolumeBackendAPIException(
+                        data=exceptionMessage)
+                try:
+                    rc = self._migrate_volume(
+                        conn, storageRelocationServiceInstanceName,
+                        volumeInstanceName, targetPoolInstanceName)
+                except Exception as ex:
+                    LOG.error(_("Exception: %s") % six.text_type(ex))
+                    exceptionMessage = (_(
+                        "Failed to migrate volume for the second time"))
+                    LOG.error(exceptionMessage)
+                    raise exception.VolumeBackendAPIException(
+                        data=exceptionMessage)
+
+            else:
+                LOG.error(_("Exception: %s") % six.text_type(ex))
+                exceptionMessage = (_(
+                    "Failed to migrate volume for the first time"))
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+
+        return rc
+
+    def _terminate_migrate_session(self, conn, volumeInstanceName):
+        """Given the volume instance terminate a migrate session.
+
+        :param conn: the connection to the ecom server
+        :param volumeInstanceName: the volume to be migrated
+        :returns: rc
+        """
+        rc, job = conn.InvokeMethod(
+            'RequestStateChange', volumeInstanceName,
+            RequestedState=self.utils.get_num(32769, '16'))
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Terminating migrate session. "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        return rc
+
+    def create_element_replica(
+            self, conn, repServiceInstanceName, cloneName,
+            sourceName, sourceInstance):
+        """Make SMI-S call to create replica for source element.
+
+        :param conn: the connection to the ecom server
+        :param repServiceInstanceName: instance name of the replication service
+        :param cloneName: replica name
+        :param sourceName: source volume name
+        :param sourceInstance: source volume instance
+        :returns: rc - return code
+        :returns: job - job object of the replica creation operation
+        """
+        rc, job = conn.InvokeMethod(
+            'CreateElementReplica', repServiceInstanceName,
+            ElementName=cloneName,
+            SyncType=self.utils.get_num(8, '16'),
+            SourceElement=sourceInstance.path)
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error Create Cloned Volume: "
+                    "Volume: %(cloneName)s  Source Volume:"
+                    "%(sourceName)s.  Return code: %(rc)lu. "
+                    "Error: %(error)s")
+                    % {'cloneName': cloneName,
+                       'sourceName': sourceName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        return rc, job
+
+    def delete_clone_relationship(
+            self, conn, repServiceInstanceName, syncInstanceName,
+            cloneName, sourceName):
+        """Deletes the relationship between the clone and source volume.
+
+        Makes an SMI-S call to break clone relationship between the clone
+        volume and the source
+
+        :param conn: the connection to the ecom server
+        :param repServiceInstanceName: instance name of the replication service
+        :param syncInstanceName: instance name of the
+                                 SE_StorageSynchronized_SV_SV object
+        :param cloneName: replica name
+        :param sourceName: source volume name
+        :param sourceInstance: source volume instance
+        :returns: rc - return code
+        :returns: job - job object of the replica creation operation
+        """
+
+        '''
+        8/Detach - Delete the synchronization between two storage objects.
+        Treat the objects as independent after the synchronization is deleted.
+        '''
+        rc, job = conn.InvokeMethod(
+            'ModifyReplicaSynchronization', repServiceInstanceName,
+            Operation=self.utils.get_num(8, '16'),
+            Synchronization=syncInstanceName)
+
+        LOG.debug("Break clone relationship: Volume: %(cloneName)s  "
+                  "Source Volume: %(sourceName)s  Return code: %(rc)lu"
+                  % {'cloneName': cloneName,
+                     'sourceName': sourceName,
+                     'rc': rc})
+
+        if rc != 0L:
+            rc, errordesc = self.utils.wait_for_job_complete(conn, job)
+            if rc != 0L:
+                exceptionMessage = (_(
+                    "Error break clone relationship: "
+                    "Clone Volume: %(cloneName)s  "
+                    "Source Volume: %(sourceName)s.  "
+                    "Return code: %(rc)lu.  Error: %(error)s")
+                    % {'cloneName': cloneName,
+                       'sourceName': sourceName,
+                       'rc': rc,
+                       'error': errordesc})
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(
+                    data=exceptionMessage)
+        return rc, job
+
+    def get_target_endpoints(self, conn, storageHardwareService, hardwareId):
+        """Given the hardwareId get the
+
+        :param conn: the connection to the ecom server
+        :param storageHardwareService: the storage HardwareId Service
+        :param hardwareId: the hardware Id
+        :returns: rc
+        :returns: targetendpoints
+        """
+        rc, targetEndpoints = conn.InvokeMethod(
+            'EMCGetTargetEndpoints', storageHardwareService,
+            HardwareId=hardwareId)
+
+        if rc != 0L:
+            exceptionMessage = (_("Error finding Target WWNs."))
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return rc, targetEndpoints
diff --git a/cinder/volume/drivers/emc/emc_vmax_utils.py b/cinder/volume/drivers/emc/emc_vmax_utils.py
new file mode 100644 (file)
index 0000000..fc05bf8
--- /dev/null
@@ -0,0 +1,1187 @@
+# Copyright (c) 2012 - 2014 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import random
+import re
+from xml.dom.minidom import parseString
+
+import six
+
+from cinder import context
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder.volume import volume_types
+
+
+LOG = logging.getLogger(__name__)
+
+try:
+    import pywbem
+    pywbemAvailable = True
+except ImportError:
+    pywbemAvailable = False
+
+STORAGEGROUPTYPE = 4
+POSTGROUPTYPE = 3
+
+EMC_ROOT = 'root/emc'
+CONCATENATED = 'concatenated'
+CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_'
+CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml'
+ISCSI = 'iscsi'
+FC = 'fc'
+JOB_RETRIES = 60
+INTERVAL_10_SEC = 10
+
+
+class EMCVMAXUtils(object):
+    """Utility class for SMI-S based EMC volume drivers.
+
+    This Utility class is for EMC volume drivers based on SMI-S.
+    It supports VMAX arrays.
+    """
+
+    def __init__(self, prtcl):
+        if not pywbemAvailable:
+            LOG.info(_(
+                'Module PyWBEM not installed.  '
+                'Install PyWBEM using the python-pywbem package.'))
+        self.protocol = prtcl
+
+    def find_storage_configuration_service(self, conn, storageSystemName):
+        """Given the storage system name, get the storage configuration service
+
+        :param conn: connection to the ecom server
+        :param storageSystemName: the storage system name
+        :returns: foundconfigService
+        """
+        foundConfigService = None
+        configservices = conn.EnumerateInstanceNames(
+            'EMC_StorageConfigurationService')
+        for configservice in configservices:
+            if storageSystemName == configservice['SystemName']:
+                foundConfigService = configservice
+                LOG.debug("Found Storage Configuration Service: "
+                          "%(configservice)s"
+                          % {'configservice': configservice})
+                break
+
+        if foundConfigService is None:
+            exceptionMessage = (_("Storage Configuration Service not found "
+                                  "on %(storageSystemName)s")
+                                % {'storageSystemName': storageSystemName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return foundConfigService
+
+    def find_controller_configuration_service(self, conn, storageSystemName):
+        """Get the controller config by using the storage service name.
+
+        Given the storage system name, get the controller configuration
+        service.
+
+        :param conn: connection to the ecom server
+        :param storageSystemName: the storage system name
+        :returns: foundconfigService
+        """
+        foundConfigService = None
+        configservices = conn.EnumerateInstanceNames(
+            'EMC_ControllerConfigurationService')
+        for configservice in configservices:
+            if storageSystemName == configservice['SystemName']:
+                foundConfigService = configservice
+                LOG.debug("Found Controller Configuration Service: "
+                          "%(configservice)s"
+                          % {'configservice': configservice})
+                break
+
+        if foundConfigService is None:
+            exceptionMessage = (_("Controller Configuration Service not found "
+                                  "on %(storageSystemName)s")
+                                % {'storageSystemName': storageSystemName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return foundConfigService
+
+    def find_element_composition_service(self, conn, storageSystemName):
+        """Given the storage system name, get the element composition service.
+
+        :param conn: the connection to the ecom server
+        :param storageSystemName: the storage system name
+        :returns: foundElementCompositionService
+        """
+        foundElementCompositionService = None
+        elementCompositionServices = conn.EnumerateInstanceNames(
+            'Symm_ElementCompositionService')
+        for elementCompositionService in elementCompositionServices:
+            if storageSystemName == elementCompositionService['SystemName']:
+                foundElementCompositionService = elementCompositionService
+                LOG.debug("Found Element Composition Service:"
+                          "%(elementCompositionService)s"
+                          % {'elementCompositionService':
+                              elementCompositionService})
+                break
+        if foundElementCompositionService is None:
+            exceptionMessage = (_("Element Composition Service not found "
+                                  "on %(storageSystemName)s")
+                                % {'storageSystemName': storageSystemName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return foundElementCompositionService
+
+    def find_storage_relocation_service(self, conn, storageSystemName):
+        """Given the storage system name, get the storage relocation service.
+
+        :param conn: the connection to the ecom server
+        :param storageSystemName: the storage system name
+        :returns: foundStorageRelocationService
+        """
+        foundStorageRelocationService = None
+        storageRelocationServices = conn.EnumerateInstanceNames(
+            'Symm_StorageRelocationService')
+        for storageRelocationService in storageRelocationServices:
+            if storageSystemName == storageRelocationService['SystemName']:
+                foundStorageRelocationService = storageRelocationService
+                LOG.debug("Found Element Composition Service:"
+                          "%(storageRelocationService)s"
+                          % {'storageRelocationService':
+                             storageRelocationService})
+                break
+
+        if foundStorageRelocationService is None:
+            exceptionMessage = (_("Storage Relocation Service not found "
+                                  "on %(storageSystemName)s")
+                                % {'storageSystemName': storageSystemName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return foundStorageRelocationService
+
+    def find_storage_hardwareid_service(self, conn, storageSystemName):
+        """Given the storage system name, get the storage hardware service.
+
+        :param conn: the connection to the ecom server
+        :param storageSystemName: the storage system name
+        :returns: foundStorageRelocationService
+        """
+        foundHardwareService = None
+        storageHardwareservices = conn.EnumerateInstanceNames(
+            'EMC_StorageHardwareIDManagementService')
+        for storageHardwareservice in storageHardwareservices:
+            if storageSystemName == storageHardwareservice['SystemName']:
+                foundHardwareService = storageHardwareservice
+                LOG.debug("Found Storage Hardware ID Management Service:"
+                          "%(storageHardwareservice)s"
+                          % {'storageHardwareservice': storageHardwareservice})
+                break
+
+        if foundHardwareService is None:
+            exceptionMessage = (_("Storage HardwareId mgmt Service not found "
+                                  "on %(storageSystemName)s")
+                                % {'storageSystemName': storageSystemName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return foundHardwareService
+
+    def find_replication_service(self, conn, storageSystemName):
+        """Given the storage system name, get the replication service.
+
+        :param conn: the connection to the ecom server
+        :param storageSystemName: the storage system name
+        :returns: foundRepService
+        """
+        foundRepService = None
+        repservices = conn.EnumerateInstanceNames(
+            'EMC_ReplicationService')
+        for repservice in repservices:
+            if storageSystemName == repservice['SystemName']:
+                foundRepService = repservice
+                LOG.debug("Found Replication Service:"
+                          "%(repservice)s"
+                          % {'repservice': repservice})
+                break
+        if foundRepService is None:
+            exceptionMessage = (_("Replication Service not found "
+                                  "on %(storageSystemName)s")
+                                % {'storageSystemName': storageSystemName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return foundRepService
+
+    def get_tier_policy_service(self, conn, storageSystemInstanceName):
+        """Gets the tier policy service for a given storage system instance.
+
+        Given the storage system instance name, get the existing tier
+        policy service.
+
+        :param conn: the connection information to the ecom server
+        :param storageSystemInstanceName: the storageSystem instance Name
+        :returns: foundTierPolicyService - the tier policy
+                                                  service instance name
+        """
+        foundTierPolicyService = None
+        groups = conn.AssociatorNames(
+            storageSystemInstanceName,
+            ResultClass='Symm_TierPolicyService',
+            AssocClass='CIM_HostedService')
+
+        if len(groups) > 0:
+            foundTierPolicyService = groups[0]
+        if foundTierPolicyService is None:
+            exceptionMessage = (_(
+                "Tier Policy Service not found "
+                "for %(storageSystemName)s")
+                % {'storageSystemName': storageSystemInstanceName})
+            LOG.error(exceptionMessage)
+            raise exception.VolumeBackendAPIException(data=exceptionMessage)
+
+        return foundTierPolicyService
+
+    def wait_for_job_complete(self, conn, job):
+        """Given the job wait for it to complete.
+
+        :param conn: connection the ecom server
+        :param job: the job dict
+        :returns: rc - the return code
+        :returns: errorDesc - the error description string
+        """
+
+        jobInstanceName = job['Job']
+        self._wait_for_job_complete(conn, job)
+        jobinstance = conn.GetInstance(jobInstanceName,
+                                       LocalOnly=False)
+        rc = jobinstance['ErrorCode']
+        errorDesc = jobinstance['ErrorDescription']
+        LOG.debug('Return code is: %(rc)lu'
+                  'Error Description is: %(errorDesc)s'
+                  % {'rc': rc,
+                     'errorDesc': errorDesc})
+
+        return rc, errorDesc
+
+    def _wait_for_job_complete(self, conn, job):
+        """Given the job wait for it to complete.
+
+        :param conn: connection the ecom server
+        :param job: the job dict
+        """
+
+        def _wait_for_job_complete():
+            """Called at an interval until the job is finished"""
+            if self._is_job_finished(conn, job):
+                raise loopingcall.LoopingCallDone()
+            if self.retries > JOB_RETRIES:
+                LOG.error(_("_wait_for_job_complete failed after %(retries)d "
+                          "tries") % {'retries': self.retries})
+
+                raise loopingcall.LoopingCallDone()
+            try:
+                self.retries += 1
+                if not self.wait_for_job_called:
+                    if self._is_job_finished(conn, job):
+                        self.wait_for_job_called = True
+            except Exception as e:
+                LOG.error(_("Exception: %s") % six.text_type(e))
+                exceptionMessage = (_("Issue encountered waiting for job."))
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(exceptionMessage)
+
+        self.retries = 0
+        self.wait_for_job_called = False
+        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete)
+        timer.start(interval=INTERVAL_10_SEC).wait()
+
+    def _is_job_finished(self, conn, job):
+        """Check if the job is finished.
+        :param conn: connection the ecom server
+        :param job: the job dict
+
+        :returns: True if finished; False if not finished;
+        """
+
+        jobInstanceName = job['Job']
+        jobinstance = conn.GetInstance(jobInstanceName,
+                                       LocalOnly=False)
+        jobstate = jobinstance['JobState']
+        # From ValueMap of JobState in CIM_ConcreteJob
+        # 2L=New, 3L=Starting, 4L=Running, 32767L=Queue Pending
+        # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767,
+        # 32768..65535"),
+        # Values("New, Starting, Running, Suspended, Shutting Down,
+        # Completed, Terminated, Killed, Exception, Service,
+        # Query Pending, DMTF Reserved, Vendor Reserved")]
+        # NOTE(deva): string matching based on
+        #             http://ipmitool.cvs.sourceforge.net/
+        #               viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c
+        if jobstate in [2L, 3L, 4L, 32767L]:
+            return False
+        else:
+            return True
+
+    def wait_for_sync(self, conn, syncName):
+        """Given the sync name wait for it to fully synchronize.
+
+        :param conn: connection the ecom server
+        :param syncName: the syncName
+        """
+
+        def _wait_for_sync():
+            """Called at an interval until the synchronization is finished"""
+            if self._is_sync_complete(conn, syncName):
+                raise loopingcall.LoopingCallDone()
+            if self.retries > JOB_RETRIES:
+                LOG.error(_("_wait_for_sync failed after %(retries)d tries")
+                          % {'retries': self.retries})
+                raise loopingcall.LoopingCallDone()
+            try:
+                self.retries += 1
+                if not self.wait_for_sync_called:
+                    if self._is_sync_complete(conn, syncName):
+                        self.wait_for_sync_called = True
+            except Exception as e:
+                LOG.error(_("Exception: %s") % six.text_type(e))
+                exceptionMessage = (_("Issue encountered waiting for "
+                                      "synchronization."))
+                LOG.error(exceptionMessage)
+                raise exception.VolumeBackendAPIException(exceptionMessage)
+
+        self.retries = 0
+        self.wait_for_sync_called = False
+        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync)
+        timer.start(interval=INTERVAL_10_SEC).wait()
+
+    def _is_sync_complete(self, conn, syncName):
+        """Check if the job is finished.
+        :param conn: connection the ecom server
+        :param syncName: the sync name
+
+        :returns: True if fully synchronized; False if not;
+        """
+        syncInstance = conn.GetInstance(syncName,
+                                        LocalOnly=False)
+        percentSynced = syncInstance['PercentSynced']
+
+        if percentSynced < 100:
+            return False
+        else:
+            return True
+
+    def get_num(self, numStr, datatype):
+        """Get the ecom int from the number.
+
+        :param numStr: the number in string format
+        :param datatype: the type to convert it to
+        :returns: result
+        """
+        try:
+            result = {
+                '8': pywbem.Uint8(numStr),
+                '16': pywbem.Uint16(numStr),
+                '32': pywbem.Uint32(numStr),
+                '64': pywbem.Uint64(numStr)
+            }
+            result = result.get(datatype, numStr)
+        except NameError:
+            result = numStr
+
+        return result
+
+    def find_storage_system(self, conn, configService):
+        """Finds the storage system for a particular config service.
+
+        Given the storage configuration service get the CIM_StorageSystem
+        from it.
+
+        :param conn: the connection to the ecom server
+        :param storageConfigService: the storage configuration service
+        :returns: rc - the return code of the job
+        :returns: jobDict - the job dict
+        """
+        foundStorageSystemInstanceName = None
+        groups = conn.AssociatorNames(
+            configService,
+            AssocClass='CIM_HostedService')
+
+        if len(groups) > 0:
+            foundStorageSystemInstanceName = groups[0]
+        else:
+            exception_message = (_("Cannot get storage system"))
+            LOG.error(exception_message)
+            raise
+
+        return foundStorageSystemInstanceName
+
+    def get_storage_group_from_volume(self, conn, volumeInstanceName):
+        """Returns the storage group for a particular volume.
+
+        Given the volume instance name get the associated storage group if it
+        is belong to one
+
+        :param conn: connection the the ecom server
+        :param volumeInstanceName: the volume instance name
+        :returns: foundStorageGroupInstanceName - the storage group
+                                                  instance name
+        """
+        foundStorageGroupInstanceName = None
+
+        storageGroupInstanceNames = conn.AssociatorNames(
+            volumeInstanceName,
+            ResultClass='CIM_DeviceMaskingGroup')
+
+        if len(storageGroupInstanceNames) > 0:
+            foundStorageGroupInstanceName = storageGroupInstanceNames[0]
+
+        return foundStorageGroupInstanceName
+
+    def find_storage_masking_group(self, conn, controllerConfigService,
+                                   storageGroupName):
+        """Given the storage group name get the storage group.
+
+        :param conn: connection to the ecom server
+        :param controllerConfigService: the controllerConfigService
+        :param storageGroupName: the name of the storage group you are getting
+        :param foundStorageGroup: storage group instance name
+        """
+        foundStorageMaskingGroupInstanceName = None
+
+        storageMaskingGroupInstanceNames = (
+            conn.AssociatorNames(controllerConfigService,
+                                 ResultClass='CIM_DeviceMaskingGroup'))
+
+        for storageMaskingGroupInstanceName in \
+                storageMaskingGroupInstanceNames:
+            storageMaskingGroupInstance = conn.GetInstance(
+                storageMaskingGroupInstanceName)
+            if storageGroupName == storageMaskingGroupInstance['ElementName']:
+                foundStorageMaskingGroupInstanceName = (
+                    storageMaskingGroupInstanceName)
+                break
+        return foundStorageMaskingGroupInstanceName
+
+    def find_storage_system_name_from_service(self, configService):
+        """Given any service get the storage system name from it.
+
+        :param conn: connection the ecom server
+        :param configService: the configuration service
+        :returns: configService['SystemName'] - storage system name (String)
+        """
+        return configService['SystemName']
+
+    def find_volume_instance(self, conn, volumeDict, volumeName):
+        """Given the volumeDict get the instance from it.
+
+        :param conn: connection the the ecom server
+        :param volumeDict: the volume Dict
+        :param volumeName: the user friendly name of the volume
+        :returns: foundVolumeInstance - the volume instance
+        """
+        volumeInstanceName = self.get_instance_name(volumeDict['classname'],
+                                                    volumeDict['keybindings'])
+        foundVolumeInstance = conn.GetInstance(volumeInstanceName)
+
+        if foundVolumeInstance is None:
+            LOG.debug("Volume %(volumeName)s not found on the array."
+                      % {'volumeName': volumeName})
+        else:
+            LOG.debug("Volume name: %(volumeName)s  Volume instance: "
+                      "%(vol_instance)s."
+                      % {'volumeName': volumeName,
+                         'vol_instance': foundVolumeInstance.path})
+
+        return foundVolumeInstance
+
+    def get_host_short_name(self, hostName):
+        """Returns the short name for a given qualified host name.
+
+        Checks the host name to see if it is the fully qualified host name
+        and returns part before the dot. If there is no dot in the hostName
+        the full hostName is returned.
+
+        :param hostName: the fully qualified host name ()
+        :param shortHostName: the short hostName
+        """
+        shortHostName = None
+
+        hostArray = hostName.split('.')
+        if len(hostArray) > 2:
+            shortHostName = hostArray[0]
+        else:
+            shortHostName = hostName
+
+        return shortHostName
+
+    def get_instance_name(self, classname, bindings):
+        """Get the instance from the classname and bindings.
+
+        NOTE:  This exists in common too...will be moving it to other file
+        where both common and masking can access it
+
+        :param classname: connection the the ecom server
+        :param bindings: volume created from job
+        :returns: foundVolumeInstance - the volume instance
+
+        """
+        instanceName = None
+        try:
+            instanceName = pywbem.CIMInstanceName(
+                classname,
+                namespace=EMC_ROOT,
+                keybindings=bindings)
+        except NameError:
+            instanceName = None
+
+        return instanceName
+
+    def get_ecom_server(self, filename):
+        """Given the file name get the ecomPort and ecomIP from it.
+
+        :param filename: the path and file name of the emc configuration file
+        :returns: ecomIp - the ecom IP address
+        :returns: ecomPort - the ecom port
+        """
+        myFile = open(filename, 'r')
+        data = myFile.read()
+        myFile.close()
+        dom = parseString(data)
+        ecomIps = dom.getElementsByTagName('EcomServerIp')
+        if ecomIps is not None and len(ecomIps) > 0:
+            ecomIp = ecomIps[0].toxml().replace('<EcomServerIp>', '')
+            ecomIp = ecomIp.replace('</EcomServerIp>', '')
+        ecomPorts = dom.getElementsByTagName('EcomServerPort')
+        if ecomPorts is not None and len(ecomPorts) > 0:
+            ecomPort = ecomPorts[0].toxml().replace('<EcomServerPort>', '')
+            ecomPort = ecomPort.replace('</EcomServerPort>', '')
+        if ecomIp is not None and ecomPort is not None:
+            LOG.debug("Ecom IP: %(ecomIp)s Port: %(ecomPort)s",
+                      {'ecomIp': ecomIp, 'ecomPort': ecomPort})
+            return ecomIp, ecomPort
+        else:
+            LOG.debug("Ecom server not found.")
+            return None
+
+    def get_ecom_cred(self, filename):
+        """Given the filename get the ecomUser and ecomPasswd.
+
+        :param filename: the path and filename of the emc configuration file
+        :returns: ecomUser - the ecom user
+        :returns: ecomPasswd - the ecom password
+        """
+        myFile = open(filename, 'r')
+        data = myFile.read()
+        myFile.close()
+        dom = parseString(data)
+        ecomUsers = dom.getElementsByTagName('EcomUserName')
+        if ecomUsers is not None and len(ecomUsers) > 0:
+            ecomUser = ecomUsers[0].toxml().replace('<EcomUserName>', '')
+            ecomUser = ecomUser.replace('</EcomUserName>', '')
+        ecomPasswds = dom.getElementsByTagName('EcomPassword')
+        if ecomPasswds is not None and len(ecomPasswds) > 0:
+            ecomPasswd = ecomPasswds[0].toxml().replace('<EcomPassword>', '')
+            ecomPasswd = ecomPasswd.replace('</EcomPassword>', '')
+        if ecomUser is not None and ecomPasswd is not None:
+            return ecomUser, ecomPasswd
+        else:
+            LOG.debug("Ecom user not found.")
+            return None
+
+    def parse_file_to_get_port_group_name(self, fileName):
+        """Parses a file and chooses a port group randomly.
+
+        Given a file, parse it to get all the possible portGroups and choose
+        one randomly.
+
+        :param fileName: the path and name of the file
+        :returns: portGroupName - the name of the port group chosen
+        """
+        portGroupName = None
+        myFile = open(fileName, 'r')
+        data = myFile.read()
+        myFile.close()
+        dom = parseString(data)
+        portGroups = dom.getElementsByTagName('PortGroups')
+        if portGroups is not None and len(portGroups) > 0:
+            portGroupsXml = portGroups[0].toxml()
+            portGroupsXml = portGroupsXml.replace('<PortGroups>', '')
+            portGroupsXml = portGroupsXml.replace('</PortGroups>', '')
+            portGroupsXml = portGroupsXml.replace('<PortGroup>', '')
+            portGroupsXml = portGroupsXml.replace('</PortGroup>', '')
+            # convert the newline separated string to a list
+            portGroupNames = (
+                [s.strip() for s in portGroupsXml.split('\n') if s])
+
+            numPortGroups = len(portGroupNames)
+
+            portGroupName = (
+                portGroupNames[random.randint(0, numPortGroups - 1)])
+
+            return portGroupName
+        else:
+            exception_message = (_("Port Group name not found."))
+            LOG.error(exception_message)
+            raise exception.VolumeBackendAPIException(data=exception_message)
+
+    def parse_fast_policy_name_from_file(self, fileName):
+        """Parse the fast policy name from config file.  If it is not there
+        then NON FAST is assumed
+
+        :param fileName: the path and name of the file
+        :returns: fastPolicyName - the fast policy name
+        """
+        fastPolicyName = None
+        myFile = open(fileName, 'r')
+        data = myFile.read()
+        myFile.close()
+        dom = parseString(data)
+        fastPolicy = dom.getElementsByTagName('FastPolicy')
+        if fastPolicy is not None and len(fastPolicy) > 0:
+            fastPolicyXml = fastPolicy[0].toxml()
+            fastPolicyXml = fastPolicyXml.replace('<FastPolicy>', '')
+            fastPolicyName = fastPolicyXml.replace('</FastPolicy>', '')
+            LOG.debug("File %(fileName)s: Fast Policy is %(fastPolicyName)s"
+                      % {'fileName': fileName,
+                         'fastPolicyName': fastPolicyName})
+            return fastPolicyName
+        else:
+            LOG.info(_("Fast Policy not found."))
+            return None
+
+    def parse_array_name_from_file(self, fileName):
+        """Parse the array name from config file.
+
+        If it is not there then there should only be one array configured to
+        the ecom. If there is more than one then erroneous results can occur.
+
+        :param fileName: the path and name of the file
+        :returns: arrayName - the array name
+        """
+        arrayName = None
+        myFile = open(fileName, 'r')
+        data = myFile.read()
+        myFile.close()
+        dom = parseString(data)
+        array = dom.getElementsByTagName('Array')
+        if array is not None and len(array) > 0:
+            arrayXml = array[0].toxml()
+            arrayXml = arrayXml.replace('<Array>', '')
+            arrayName = arrayXml.replace('</Array>', '')
+            return arrayName
+        else:
+            LOG.debug("Array not found from config file.")
+            return None
+
+    def parse_pool_name_from_file(self, fileName):
+        """Parse the pool name from config file.
+
+        If it is not there then we will attempt to get it from extra specs.
+
+        :param fileName: the path and name of the file
+        :returns: poolName - the pool name
+        """
+        poolName = None
+        myFile = open(fileName, 'r')
+        data = myFile.read()
+        myFile.close()
+        dom = parseString(data)
+        pool = dom.getElementsByTagName('Pool')
+        if pool is not None and len(pool) > 0:
+            poolXml = pool[0].toxml()
+            poolXml = poolXml.replace('<Pool>', '')
+            poolName = poolXml.replace('</Pool>', '')
+            return poolName
+        else:
+            LOG.debug("Pool not found from config file.")
+
+        return poolName
+
+    def parse_pool_instance_id(self, poolInstanceId):
+        """Given the instance Id parse the pool name and system name from it.
+
+        Example of pool InstanceId: Symmetrix+0001233455555+U+Pool 0
+
+        :param poolInstanceId: the path and name of the file
+        :returns: poolName - the pool name
+        :returns: systemName - the system name
+        """
+        poolName = None
+        systemName = None
+        endp = poolInstanceId.rfind('+')
+        if endp > -1:
+            poolName = poolInstanceId[endp + 1:]
+
+        idarray = poolInstanceId.split('+')
+        if len(idarray) > 2:
+            systemName = idarray[0] + '+' + idarray[1]
+
+        LOG.debug("Pool name: %(poolName)s  System name: %(systemName)s."
+                  % {'poolName': poolName, 'systemName': systemName})
+        return poolName, systemName
+
+    def convert_gb_to_bits(self, strGbSize):
+        """Convert GB(string) to bits(string).
+
+        :param strGB: string -- The size in GB
+        :returns: strBitsSize string -- The size in bits
+        """
+        strBitsSize = six.text_type(int(strGbSize) * 1024 * 1024 * 1024)
+
+        LOG.debug("Converted %(strGbSize)s GBs to %(strBitsSize)s Bits"
+                  % {'strGbSize': strGbSize, 'strBitsSize': strBitsSize})
+
+        return strBitsSize
+
+    def check_if_volume_is_composite(self, conn, volumeInstance):
+        """Check if the volume is composite.
+
+        :param conn: the connection information to the ecom server
+        :param volumeInstance: the volume Instance
+        :returns: 'True', 'False' or 'Undetermined'
+        """
+        propertiesList = volumeInstance.properties.items()
+        for properties in propertiesList:
+            if properties[0] == 'IsComposite':
+                cimProperties = properties[1]
+
+                if 'True' in six.text_type(cimProperties.value):
+                    return 'True'
+                elif 'False' in six.text_type(cimProperties.value):
+                    return 'False'
+                else:
+                    return 'Undetermined'
+        return 'Undetermined'
+
+    def get_assoc_pool_from_volume(self, conn, volumeInstanceName):
+        """Give the volume instance get the associated pool instance
+
+        :param conn: connection to the ecom server
+        :param volumeInstanceName: the volume instance name
+        :returns: foundPoolInstanceName
+        """
+        foundPoolInstanceName = None
+        foundPoolInstanceNames = (
+            conn.AssociatorNames(volumeInstanceName,
+                                 ResultClass='EMC_VirtualProvisioningPool'))
+        if len(foundPoolInstanceNames) > 0:
+            foundPoolInstanceName = foundPoolInstanceNames[0]
+        return foundPoolInstanceName
+
+    def check_if_volume_is_concatenated(self, conn, volumeInstance):
+        """Checks if a volume is concatenated or not.
+
+        Check underlying CIM_StorageExtent to see if the volume is
+        concatenated or not.
+        If isConcatenated is true then it is a composite
+        If isConcatenated is False and isVolumeComposite is True then
+            it is a striped
+        If isConcatenated is False and isVolumeComposite is False then
+            it has no composite type and we can proceed.
+
+        :param conn: the connection information to the ecom server
+        :param volumeInstance: the volume instance
+        :returns: 'True', 'False' or 'Undetermined'
+        """
+        isConcatenated = None
+
+        isVolumeComposite = self.check_if_volume_is_composite(
+            conn, volumeInstance)
+
+        storageExtentInstanceNames = conn.AssociatorNames(
+            volumeInstance.path,
+            ResultClass='CIM_StorageExtent')
+
+        if len(storageExtentInstanceNames) > 0:
+            storageExtentInstanceName = storageExtentInstanceNames[0]
+            storageExtentInstance = conn.GetInstance(storageExtentInstanceName)
+
+            propertiesList = storageExtentInstance.properties.items()
+            for properties in propertiesList:
+                if properties[0] == 'IsConcatenated':
+                    cimProperties = properties[1]
+                    isConcatenated = six.text_type(cimProperties.value)
+
+                if isConcatenated is not None:
+                    break
+
+        if 'True' in isConcatenated:
+            return 'True'
+        elif 'False' in isConcatenated and 'True' in isVolumeComposite:
+            return 'False'
+        elif 'False' in isConcatenated and 'False' in isVolumeComposite:
+            return 'True'
+        else:
+            return 'Undetermined'
+
+    def get_composite_type(self, compositeTypeStr):
+        """Get the int value of composite type.
+
+        The default is '2' concatenated.
+
+        :param compositeTypeStr: 'concatenated' or 'striped'. Cannot be None
+        :returns: compositeType = 2 or 3
+        """
+        compositeType = 2
+        stripedStr = 'striped'
+        try:
+            if compositeTypeStr.lower() == stripedStr.lower():
+                compositeType = 3
+        except KeyError:
+            # Default to concatenated if not defined
+            pass
+
+        return compositeType
+
+    def is_volume_bound_to_pool(self, conn, volumeInstance):
+        '''Check if volume is bound to a pool.
+
+        :param conn: the connection information to the ecom server
+        :param storageServiceInstanceName: the storageSystem instance Name
+        :returns: foundIsSupportsTieringPolicies - true/false
+        '''
+        propertiesList = volumeInstance.properties.items()
+        for properties in propertiesList:
+            if properties[0] == 'EMCIsBound':
+                cimProperties = properties[1]
+
+                if 'True' in six.text_type(cimProperties.value):
+                    return 'True'
+                elif 'False' in six.text_type(cimProperties.value):
+                    return 'False'
+                else:
+                    return 'Undetermined'
+        return 'Undetermined'
+
+    def get_space_consumed(self, conn, volumeInstance):
+        '''Check the space consumed of a volume.
+
+        :param conn: the connection information to the ecom server
+        :param volumeInstance: the volume Instance
+        :returns: spaceConsumed
+        '''
+        foundSpaceConsumed = None
+        unitnames = conn.References(
+            volumeInstance, ResultClass='CIM_AllocatedFromStoragePool',
+            Role='Dependent')
+
+        for unitname in unitnames:
+            propertiesList = unitname.properties.items()
+            for properties in propertiesList:
+                if properties[0] == 'SpaceConsumed':
+                    cimProperties = properties[1]
+                    foundSpaceConsumed = cimProperties.value
+                    break
+            if foundSpaceConsumed is not None:
+                break
+
+        return foundSpaceConsumed
+
+    def get_volume_size(self, conn, volumeInstance):
+        '''Get the volume size.
+
+        ConsumableBlocks * BlockSize
+
+        :param conn: the connection information to the ecom server
+        :param volumeInstance: the volume Instance
+        :returns: volumeSizeOut
+        '''
+        volumeSizeOut = 'Undetermined'
+        numBlocks = 0
+        blockSize = 0
+
+        propertiesList = volumeInstance.properties.items()
+        for properties in propertiesList:
+            if properties[0] == 'ConsumableBlocks':
+                cimProperties = properties[1]
+                numBlocks = int(cimProperties.value)
+            if properties[0] == 'BlockSize':
+                cimProperties = properties[1]
+                blockSize = int(cimProperties.value)
+            if blockSize > 0 and numBlocks > 0:
+                break
+        if blockSize > 0 and numBlocks > 0:
+            volumeSizeOut = six.text_type(numBlocks * blockSize)
+
+        return volumeSizeOut
+
+    def determine_member_count(self, sizeStr, memberCount, compositeType):
+        '''Determines how many members a volume should contain.
+
+        Based on the size of the proposed volume, the compositeType and the
+        memberCount, determine (or validate) how many meta members there
+        should be in a volume.
+
+        :param sizeStr: the size in GBs of the proposed volume
+        :param memberCount: the initial member count
+        :param compositeType: the composite type
+        :returns: memberCount - string
+        :returns: errorDesc - the error description
+        '''
+        errorDesc = None
+        if compositeType in 'concatenated' and int(sizeStr) > 240:
+            newMemberCount = int(sizeStr) / 240
+            modular = int(sizeStr) % 240
+            if modular > 0:
+                newMemberCount += 1
+            memberCount = six.text_type(newMemberCount)
+
+        if compositeType in 'striped':
+            metaSize = int(sizeStr) / int(memberCount)
+            modular = int(sizeStr) % int(memberCount)
+            metaSize = metaSize + modular
+            if metaSize > 240:
+                errorDesc = ('Meta Size is greater than maximum allowed meta '
+                             'size')
+
+        return memberCount, errorDesc
+
+    def get_extra_specs_by_volume_type_name(self, volumeTypeName):
+        """Gets the extra specs associated with a volume type.
+
+        Given the string value of the volume type name, get the extra specs
+        object associated with the volume type
+
+        :param volumeTypeName: string value of the volume type name
+        :returns: extra_specs - extra specs object
+        """
+        ctxt = context.get_admin_context()
+        volume_type = volume_types.get_volume_type_by_name(
+            ctxt, volumeTypeName)
+        extra_specs = volume_type['extra_specs']
+        return extra_specs
+
+    def get_pool_capacities(self, conn, poolName, storageSystemName):
+        """Get the total and remaining capacity in GB for a storage pool.
+
+        Given the storage pool name, get the total capacity and remaining
+        capacity in GB
+
+        :param conn: connection the the ecom server
+        :param storagePoolName: string value of the storage pool name
+        :returns: total_capacity_gb - total capacity of the storage pool in GB
+        :returns: free_capacity_gb - remaining capacity of the
+                                     storage pool in GB
+        """
+        LOG.debug("Retrieving capacity for pool %(poolName)s on array "
+                  "%(array)s"
+                  % {'poolName': poolName,
+                     'array': storageSystemName})
+
+        poolInstanceName = self.get_pool_by_name(
+            conn, poolName, storageSystemName)
+        if poolInstanceName is None:
+            LOG.error("Unable to retrieve pool instance of %(poolName)s on "
+                      "array %(array)s"
+                      % {'poolName': poolName,
+                         'array': storageSystemName})
+            return (0, 0)
+        storagePoolInstance = conn.GetInstance(
+            poolInstanceName, LocalOnly=False)
+        total_capacity_gb = self.convert_bits_to_gbs(
+            storagePoolInstance['TotalManagedSpace'])
+        allocated_capacity_gb = self.convert_bits_to_gbs(
+            storagePoolInstance['EMCSubscribedCapacity'])
+        free_capacity_gb = total_capacity_gb - allocated_capacity_gb
+        return (total_capacity_gb, free_capacity_gb)
+
+    def get_pool_by_name(self, conn, storagePoolName, storageSystemName):
+        """Returns the instance name associated with a storage pool name.
+
+        :param conn: connection the the ecom server
+        :param storagePoolName: string value of the storage pool name
+        :param storageSystemName: string value of array
+        :returns: poolInstanceName - instance name of storage pool
+        """
+        poolInstanceName = None
+        LOG.debug("storagePoolName: %(poolName)s, storageSystemName: %(array)s"
+                  % {'poolName': storagePoolName,
+                     'array': storageSystemName})
+        poolInstanceNames = conn.EnumerateInstanceNames(
+            'EMC_VirtualProvisioningPool')
+        for pool in poolInstanceNames:
+            poolName, systemName = (
+                self.parse_pool_instance_id(pool['InstanceID']))
+            if (poolName == storagePoolName and
+                    storageSystemName in systemName):
+                poolInstanceName = pool
+
+        return poolInstanceName
+
+    def convert_bits_to_gbs(self, strBitSize):
+        """Convert Bits(string) to GB(string).
+
+        :param strBitSize: string -- The size in bits
+        :returns: gbSize string -- The size in GB
+        """
+        gbSize = int(strBitSize) / 1024 / 1024 / 1024
+        return gbSize
+
+    def compare_size(self, size1Str, size2Str):
+        """Compare the bit sizes to an approximate.
+
+        :param size1Str: the first bit size (String)
+        :param size2Str: the second bit size (String)
+        :returns: size1GBs - size2GBs (int)
+        """
+        size1GBs = self.convert_bits_to_gbs(size1Str)
+        size2GBs = self.convert_bits_to_gbs(size2Str)
+
+        return size1GBs - size2GBs
+
+    def get_volumetype_extraspecs(self, volume):
+        """Compare the bit sizes to an approximate.
+
+        :param volume: the volume dictionary
+        :returns: extraSpecs - the extra specs
+        """
+        extraSpecs = {}
+
+        try:
+            type_id = volume['volume_type_id']
+            if type_id is not None:
+                extraSpecs = volume_types.get_volume_type_extra_specs(type_id)
+
+        except Exception:
+            pass
+
+        return extraSpecs
+
+    def get_volume_type_name(self, volume):
+        """Get the volume type name.
+
+        :param volume: the volume dictionary
+        :returns: volumeTypeName - the volume type name
+        """
+        volumeTypeName = None
+
+        ctxt = context.get_admin_context()
+        typeId = volume['volume_type_id']
+        if typeId is not None:
+            volumeType = volume_types.get_volume_type(ctxt, typeId)
+            volumeTypeName = volumeType['name']
+
+        return volumeTypeName
+
+    def parse_volume_type_from_filename(self, emcConfigFile):
+        """Parse the volume type from the file (if it exists).
+
+        :param emcConfigFile: the EMC configuration file
+        :returns: volumeTypeName - the volume type name
+        """
+        volumeTypeName = None
+
+        m = re.search('/etc/cinder/cinder_emc_config_(.+?).xml', emcConfigFile)
+        if m:
+            volumeTypeName = m.group(1)
+
+        return volumeTypeName
+
+    def get_volumes_from_pool(self, conn, poolInstanceName):
+        '''Check the space consumed of a volume.
+
+        :param conn: the connection information to the ecom server
+        :param volumeInstance: the volume Instance
+        :returns: spaceConsumed
+        '''
+        return conn.AssociatorNames(
+            poolInstanceName, AssocClass='CIM_AllocatedFromStoragePool',
+            ResultClass='CIM_StorageVolume')
+
+    def check_is_volume_bound_to_pool(self, conn, volumeInstance):
+        '''Check the space consumed of a volume.
+
+        :param conn: the connection information to the ecom server
+        :param volumeInstance: the volume Instance
+        :returns: spaceConsumed
+        '''
+        foundSpaceConsumed = None
+        unitnames = conn.References(
+            volumeInstance, ResultClass='CIM_AllocatedFromStoragePool',
+            Role='Dependent')
+
+        for unitname in unitnames:
+            propertiesList = unitname.properties.items()
+            for properties in propertiesList:
+                if properties[0] == 'EMCBoundToThinStoragePool':
+                    cimProperties = properties[1]
+                    foundSpaceConsumed = cimProperties.value
+                    break
+            if foundSpaceConsumed is not None:
+                break
+        if 'True' in six.text_type(cimProperties.value):
+            return 'True'
+        elif 'False' in six.text_type(cimProperties.value):
+            return 'False'
+        else:
+            return 'Undetermined'
+
+    def get_short_protocol_type(self, protocol):
+        '''Given the protocol type, return I for iscsi and F for fc
+
+        :param protocol: iscsi or fc
+        :returns: 'I' or 'F'
+        '''
+        if protocol.lower() == ISCSI.lower():
+            return 'I'
+        elif protocol.lower() == FC.lower():
+            return 'F'
+        else:
+            return protocol
+
+    def get_hardware_id_instance_names_from_array(
+            self, conn, hardwareIdManagementService):
+        """Get all the hardware ids from an array.
+
+        :param conn: connection to the ecom
+        :param: hardwareIdManagementService - hardware id management service
+        :returns: hardwareIdInstanceNames - the list of hardware
+                                            id instance names
+        """
+        hardwareIdInstanceNames = (
+            conn.AssociatorNames(hardwareIdManagementService,
+                                 ResultClass='SE_StorageHardwareID'))
+
+        return hardwareIdInstanceNames
+
+    def find_ip_protocol_endpoint(self, conn, storageSystemName):
+        '''Find the IP protocol endpoint for ISCSI.
+
+        :param conn: the connection to the ecom server
+        :param storageSystemName: the storage system name
+        :returns: foundIpAddress
+        '''
+        foundIpAddress = None
+        ipProtocolEndpointInstances = conn.EnumerateInstances(
+            'CIM_IPProtocolEndpoint')
+
+        for ipProtocolEndpointInstance in ipProtocolEndpointInstances:
+            ipStorageSystemName = (
+                ipProtocolEndpointInstance.path['SystemName'])
+            if storageSystemName in ipStorageSystemName:
+                propertiesList = (
+                    ipProtocolEndpointInstance.properties.items())
+                for properties in propertiesList:
+                    if properties[0] == 'ElementName':
+                        cimProperties = properties[1]
+                        foundIpAddress = cimProperties.value
+                        break
+                if foundIpAddress is not None:
+                    break
+
+        return foundIpAddress
index 7cc9b416843cc346e070fab74ad9bc5daddc844c..55a75492bf73695717712c8ad176b76eabbf8cee 100644 (file)
 
 
 #
-# Options defined in cinder.volume.drivers.emc.emc_smis_common
+# Options defined in cinder.volume.drivers.emc.emc_vmax_common
 #
 
-# The configuration file for the Cinder EMC driver (string
+# use this file for cinder emc plugin config data (string
 # value)
 #cinder_emc_config_file=/etc/cinder/cinder_emc_config.xml