]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add volume drivers for Infortrend Storage
authorLee <jessy1092@gmail.com>
Fri, 24 Apr 2015 07:50:20 +0000 (15:50 +0800)
committerLee <jessy1092@gmail.com>
Wed, 17 Jun 2015 12:43:49 +0000 (20:43 +0800)
Infortrend implement ISCSI and FC volume drivers for
EonStor DS product.
It manages storage by Infortrend CLI tool.

common_cli.py implements the basic Cinder Driver API.
infortrend_fc_cli.py and infortrend_iscsi_cli.py use them to
provide FC and iSCSI specific support.

Support features:
- Volume Create/Delete
- Volume Attach/Detach
- Snapshot Create/Delete
- Create Volume from Snapshot
- Get Volume Stats
- Copy Image to Volume
- Copy Volume to Image
- Clone Volume
- Extend Volume

Change-Id: I830c5a48a5fb85707f02396b4634825e27455e8a
Implements: blueprint infortrend-iscsi-fc-volume-driver

cinder/exception.py
cinder/tests/unit/test_infortrend_cli.py [new file with mode: 0644]
cinder/tests/unit/test_infortrend_common.py [new file with mode: 0644]
cinder/volume/drivers/infortrend/__init__.py [new file with mode: 0644]
cinder/volume/drivers/infortrend/eonstor_ds_cli/__init__.py [new file with mode: 0644]
cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py [new file with mode: 0644]
cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py [new file with mode: 0644]
cinder/volume/drivers/infortrend/infortrend_fc_cli.py [new file with mode: 0644]
cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py [new file with mode: 0644]

index 19eaacfb1d917b430db3f83e86984aa12a5e83c3..389ccb63d35ec9ade36eb7306329b33df61bcaa2 100644 (file)
@@ -914,6 +914,12 @@ class StorPoolConfigurationInvalid(CinderException):
                 "of the /etc/storpool.conf file: %(error)s")
 
 
+# Infortrend EonStor DS Driver
+class InfortrendCliException(CinderException):
+    message = _("Infortrend CLI exception: %(err)s Param: %(param)s "
+                "(Return Code: %(rc)s) (Output: %(out)s)")
+
+
 # DOTHILL drivers
 class DotHillInvalidBackend(CinderException):
     message = _("Backend doesn't exist (%(backend)s)")
diff --git a/cinder/tests/unit/test_infortrend_cli.py b/cinder/tests/unit/test_infortrend_cli.py
new file mode 100644 (file)
index 0000000..9573c63
--- /dev/null
@@ -0,0 +1,2228 @@
+# Copyright (c) 2015 Infortrend Technology, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from cinder import test
+from cinder.volume.drivers.infortrend.eonstor_ds_cli import cli_factory as cli
+
+
+class InfortrendCLITestData(object):
+
+    """CLI Test Data."""
+
+    # Infortrend entry
+    fake_lv_id = ['5DE94FF775D81C30', '1234567890']
+
+    fake_partition_id = ['6A41315B0EDC8EB7', '51B4283E4E159173',
+                         '987654321', '123456789',
+                         '2667FE351FC505AE', '53F3E98141A2E871']
+
+    fake_pair_id = ['55D790F8350B036B', '095A184B0ED2DB10']
+
+    fake_snapshot_id = ['2C7A8D211F3B1E36', '60135EE53C14D5EB']
+
+    fake_data_port_ip = ['172.27.0.1', '172.27.0.2',
+                         '172.27.0.3', '172.27.0.4',
+                         '172.27.0.5', '172.27.0.6']
+
+    fake_model = ['DS S12F-G2852-6']
+
+    fake_manage_port_ip = ['172.27.0.10']
+
+    fake_system_id = ['DEEC']
+
+    fake_host_ip = ['172.27.0.2']
+
+    fake_target_wwnns = ['100123D02300DEEC', '100123D02310DEEC']
+
+    fake_target_wwpns = ['110123D02300DEEC', '120123D02300DEEC',
+                         '110123D02310DEEC', '120123D02310DEEC']
+
+    fake_initiator_wwnns = ['2234567890123456', '2234567890543216']
+
+    fake_initiator_wwpns = ['1234567890123456', '1234567890543216']
+
+    fake_initiator_iqn = ['iqn.1991-05.com.infortrend:pc123',
+                          'iqn.1991-05.com.infortrend:pc456']
+
+    fake_lun_map = [0, 1, 2]
+
+    # cinder entry
+    test_provider_location = [(
+        'system_id^%s@partition_id^%s') % (
+            int(fake_system_id[0], 16), fake_partition_id[0]),
+    ]
+
+    test_volume = {
+        'id': '5aa119a8-d25b-45a7-8d1b-88e127885635',
+        'size': 1,
+        'name': 'Part-1',
+        'host': 'infortrend-server1@backend_1#LV-1',
+        'name_id': '5aa119a8-d25b-45a7-8d1b-88e127885635',
+        'provider_auth': None,
+        'project_id': 'project',
+        'display_name': None,
+        'display_description': 'Part-1',
+        'volume_type_id': None,
+        'provider_location': test_provider_location[0],
+        'volume_attachment': [],
+    }
+
+    test_dst_volume = {
+        'id': '6bb119a8-d25b-45a7-8d1b-88e127885666',
+        'size': 1,
+        'name': 'Part-1-Copy',
+        'host': 'infortrend-server1@backend_1',
+        'name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666',
+        'provider_auth': None,
+        'project_id': 'project',
+        'display_name': None,
+        'display_description': 'Part-1-Copy',
+        'volume_type_id': None,
+        'provider_location': '',
+        'volume_attachment': [],
+    }
+
+    test_ref_volume = {
+        'source-id': '6bb119a8-d25b-45a7-8d1b-88e127885666',
+        'size': 1,
+    }
+
+    test_ref_volume_with_import = {
+        'source-name': 'import_into_openstack',
+        'size': 1,
+    }
+
+    test_snapshot = {
+        'id': 'ffa9bc5e-1172-4021-acaf-cdcd78a9584d',
+        'volume_id': test_volume['id'],
+        'size': 2,
+        'volume_name': test_volume['name'],
+        'volume_size': 2,
+        'project_id': 'project',
+        'display_name': None,
+        'display_description': 'SI-1',
+        'volume_type_id': None,
+        'provider_location': fake_snapshot_id[0],
+    }
+
+    test_iqn = [(
+        'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % (
+            int(fake_system_id[0], 16), 1, 0, 1), (
+        'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % (
+            int(fake_system_id[0], 16), 1, 0, 1),
+    ]
+
+    test_iscsi_properties = {
+        'driver_volume_type': 'iscsi',
+        'data': {
+            'target_discovered': True,
+            'target_portal': '%s:3260' % fake_data_port_ip[2],
+            'target_iqn': test_iqn[0],
+            'target_lun': fake_lun_map[0],
+            'volume_id': test_volume['id'],
+        },
+    }
+
+    test_iscsi_properties_with_mcs = {
+        'driver_volume_type': 'iscsi',
+        'data': {
+            'target_discovered': True,
+            'target_portal': '%s:3260' % fake_data_port_ip[0],
+            'target_iqn': test_iqn[1],
+            'target_lun': fake_lun_map[2],
+            'volume_id': test_volume['id'],
+        },
+    }
+
+    test_iqn_empty_map = [(
+        'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % (
+            int(fake_system_id[0], 16), 0, 0, 1),
+    ]
+
+    test_iscsi_properties_empty_map = {
+        'driver_volume_type': 'iscsi',
+        'data': {
+            'target_discovered': True,
+            'target_portal': '%s:3260' % fake_data_port_ip[0],
+            'target_iqn': test_iqn_empty_map[0],
+            'target_lun': fake_lun_map[0],
+            'volume_id': test_volume['id'],
+        },
+    }
+
+    test_initiator_target_map = {
+        fake_initiator_wwpns[0]: fake_target_wwpns[0:2],
+        fake_initiator_wwpns[1]: fake_target_wwpns[0:2],
+    }
+
+    test_fc_properties = {
+        'driver_volume_type': 'fibre_channel',
+        'data': {
+            'target_discovered': True,
+            'target_lun': fake_lun_map[0],
+            'target_wwn': fake_target_wwpns[0:2],
+            'access_mode': 'rw',
+            'initiator_target_map': test_initiator_target_map,
+        },
+    }
+
+    test_initiator_target_map_specific_channel = {
+        fake_initiator_wwpns[0]: [fake_target_wwpns[1]],
+        fake_initiator_wwpns[1]: [fake_target_wwpns[1]],
+    }
+
+    test_fc_properties_with_specific_channel = {
+        'driver_volume_type': 'fibre_channel',
+        'data': {
+            'target_discovered': True,
+            'target_lun': fake_lun_map[0],
+            'target_wwn': [fake_target_wwpns[1]],
+            'access_mode': 'rw',
+            'initiator_target_map': test_initiator_target_map_specific_channel,
+        },
+    }
+
+    test_target_wwpns_map_multipath_r_model = [
+        fake_target_wwpns[0],
+        fake_target_wwpns[2],
+        fake_target_wwpns[1],
+        fake_target_wwpns[3],
+    ]
+
+    test_initiator_target_map_multipath_r_model = {
+        fake_initiator_wwpns[0]: test_target_wwpns_map_multipath_r_model[:],
+        fake_initiator_wwpns[1]: test_target_wwpns_map_multipath_r_model[:],
+    }
+
+    test_fc_properties_multipath_r_model = {
+        'driver_volume_type': 'fibre_channel',
+        'data': {
+            'target_discovered': True,
+            'target_lun': fake_lun_map[0],
+            'target_wwn': test_target_wwpns_map_multipath_r_model[:],
+            'access_mode': 'rw',
+            'initiator_target_map':
+                test_initiator_target_map_multipath_r_model,
+        },
+    }
+
+    test_initiator_target_map_zoning = {
+        fake_initiator_wwpns[0].lower():
+            [x.lower() for x in fake_target_wwpns[0:2]],
+        fake_initiator_wwpns[1].lower():
+            [x.lower() for x in fake_target_wwpns[0:2]],
+    }
+
+    test_fc_properties_zoning = {
+        'driver_volume_type': 'fibre_channel',
+        'data': {
+            'target_discovered': True,
+            'target_lun': fake_lun_map[0],
+            'target_wwn': [x.lower() for x in fake_target_wwpns[0:2]],
+            'access_mode': 'rw',
+            'initiator_target_map': test_initiator_target_map_zoning,
+        },
+    }
+
+    test_initiator_target_map_zoning_r_model = {
+        fake_initiator_wwpns[0].lower():
+            [x.lower() for x in fake_target_wwpns[1:3]],
+        fake_initiator_wwpns[1].lower():
+            [x.lower() for x in fake_target_wwpns[1:3]],
+    }
+
+    test_fc_properties_zoning_r_model = {
+        'driver_volume_type': 'fibre_channel',
+        'data': {
+            'target_discovered': True,
+            'target_lun': fake_lun_map[0],
+            'target_wwn': [x.lower() for x in fake_target_wwpns[1:3]],
+            'access_mode': 'rw',
+            'initiator_target_map': test_initiator_target_map_zoning_r_model,
+        },
+    }
+
+    test_fc_terminate_conn_info = {
+        'driver_volume_type': 'fibre_channel',
+        'data': {
+            'initiator_target_map': test_initiator_target_map_zoning,
+        },
+    }
+
+    test_connector_iscsi = {
+        'ip': fake_host_ip[0],
+        'initiator': fake_initiator_iqn[0],
+        'host': 'infortrend-server1@backend_1',
+    }
+
+    test_connector_fc = {
+        'wwpns': fake_initiator_wwpns,
+        'wwnns': fake_initiator_wwnns,
+        'host': 'infortrend-server1@backend_1',
+    }
+
+    fake_pool = {
+        'pool_name': 'LV-2',
+        'pool_id': fake_lv_id[1],
+        'total_capacity_gb': 1000,
+        'free_capacity_gb': 1000,
+        'reserved_percentage': 0,
+        'QoS_support': False,
+        'thin_provisioning_support': False,
+    }
+
+    test_pools = [{
+        'pool_name': 'LV-1',
+        'pool_id': fake_lv_id[0],
+        'total_capacity_gb': round(857982.0 / 1024, 2),
+        'free_capacity_gb': round(841978.0 / 1024, 2),
+        'reserved_percentage': 0,
+        'QoS_support': False,
+        'max_over_subscription_ratio': 20.0,
+        'thin_provisioning_support': False,
+        'thick_provisioning_support': True,
+        'provisioned_capacity_gb':
+            round((400) / 1024, 2),
+        'infortrend_provisioning': 'full',
+    }]
+
+    test_volume_states = {
+        'volume_backend_name': 'infortrend_backend_1',
+        'vendor_name': 'Infortrend',
+        'driver_version': '99.99',
+        'storage_protocol': 'iSCSI',
+        'pools': test_pools,
+    }
+
+    test_host = {
+        'host': 'infortrend-server1@backend_1',
+        'capabilities': test_volume_states,
+    }
+
+    test_migrate_volume_states = {
+        'volume_backend_name': 'infortrend_backend_1',
+        'vendor_name': 'Infortrend',
+        'driver_version': '99.99',
+        'storage_protocol': 'iSCSI',
+        'pool_name': 'LV-1',
+        'pool_id': fake_lv_id[1],
+        'total_capacity_gb': round(857982.0 / 1024, 2),
+        'free_capacity_gb': round(841978.0 / 1024, 2),
+        'reserved_percentage': 0,
+        'QoS_support': False,
+        'infortrend_provisioning': 'full',
+    }
+
+    test_migrate_host = {
+        'host': 'infortrend-server1@backend_1#LV-2',
+        'capabilities': test_migrate_volume_states,
+    }
+
+    test_migrate_volume_states_2 = {
+        'volume_backend_name': 'infortrend_backend_1',
+        'vendor_name': 'Infortrend',
+        'driver_version': '99.99',
+        'storage_protocol': 'iSCSI',
+        'pool_name': 'LV-1',
+        'pool_id': fake_lv_id[1],
+        'total_capacity_gb': round(857982.0 / 1024, 2),
+        'free_capacity_gb': round(841978.0 / 1024, 2),
+        'reserved_percentage': 0,
+        'QoS_support': False,
+        'infortrend_provisioning': 'full',
+    }
+
+    test_migrate_host_2 = {
+        'host': 'infortrend-server1@backend_1#LV-1',
+        'capabilities': test_migrate_volume_states_2,
+    }
+
+    fake_host = {
+        'host': 'infortrend-server1@backend_1',
+        'capabilities': {},
+    }
+
+    fake_volume_id = [test_volume['id'], test_dst_volume['id']]
+
+    fake_lookup_map = {
+        '12345678': {
+            'initiator_port_wwn_list':
+                [x.lower() for x in fake_initiator_wwpns],
+            'target_port_wwn_list':
+                [x.lower() for x in fake_target_wwpns[0:2]],
+        },
+    }
+
+    fake_lookup_map_r_model = {
+        '12345678': {
+            'initiator_port_wwn_list':
+                [x.lower() for x in fake_initiator_wwpns[:]],
+            'target_port_wwn_list':
+                [x.lower() for x in fake_target_wwpns[1:3]],
+        },
+    }
+
+    test_new_type = {
+        'name': 'type0',
+        'qos_specs_id': None,
+        'deleted': False,
+        'extra_specs': {'infortrend_provisioning': 'thin'},
+        'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0',
+    }
+
+    test_diff = {'extra_specs': {'infortrend_provisioning': ('full', 'thin')}}
+
+    def get_fake_cli_failed(self):
+        return """
+CLI: Failed
+Return: 0x0001
+
+CLI: No selected device
+Return: 0x000c
+"""
+
+    def get_fake_cli_failed_with_network(self):
+        return """
+CLI: Failed
+Return: 0x0001
+
+CLI: No network
+Return: 0x000b
+"""
+
+    def get_fake_cli_succeed(self):
+        return """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+CLI: Successful: 0 mapping(s) shown
+Return: 0x0000
+"""
+
+    def get_test_show_empty_list(self):
+        return (0, [])
+
+    def get_test_show_snapshot(self, partition_id=None, snapshot_id=None):
+        if partition_id and snapshot_id:
+            return (0, [{
+                'Map': 'No',
+                'Partition-ID': partition_id,
+                'SI-ID': snapshot_id,
+                'Name': '---',
+                'Activated-time': 'Thu, Jan 09 01:33:11 2020',
+                'Index': '1',
+            }])
+        else:
+            return (0, [{
+                'Map': 'No',
+                'Partition-ID': self.fake_partition_id[0],
+                'SI-ID': self.fake_snapshot_id[0],
+                'Name': '---',
+                'Activated-time': 'Thu, Jan 09 01:33:11 2020',
+                'Index': '1',
+            }, {
+                'Map': 'No',
+                'Partition-ID': self.fake_partition_id[0],
+                'SI-ID': self.fake_snapshot_id[1],
+                'Name': '---',
+                'Activated-time': 'Thu, Jan 09 01:35:50 2020',
+                'Index': '2',
+            }])
+
+    def get_fake_show_snapshot(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+\/\/\/-
+\
+/
+-
+
+\
+/
+-
+\/-\/- Index  SI-ID  Name  Partition-ID  Map  Activated-time
+---------------------------------------------------------------------------------
+ 1      %s     ---   %s            No   Thu, Jan 09 01:33:11 2020
+ 2      %s     ---   %s            No   Thu, Jan 09 01:35:50 2020
+
+CLI: Successful: 2 snapshot image(s) shown
+Return: 0x0000
+"""
+        return msg % (self.fake_snapshot_id[0],
+                      self.fake_partition_id[0],
+                      self.fake_snapshot_id[1],
+                      self.fake_partition_id[0])
+
+    def get_test_show_snapshot_detail_filled_block(self):
+        return (0, [{
+            'Mapped': 'Yes',
+            'Created-time': 'Wed, Jun 10 10:57:16 2015',
+            'ID': self.fake_snapshot_id[0],
+            'Last-modification-time': 'Wed, Jun 10 10:57:16 2015',
+            'Description': '---',
+            'Total-filled-block': '1',
+            'LV-ID': self.fake_lv_id[0],
+            'Activation-schedule-time': 'Not Actived',
+            'Mapping': 'CH:0/ID:0/LUN:1',
+            'Index': '1',
+            'Used': '0',
+            'Name': '---',
+            'Valid-filled-block': '0',
+            'Partition-ID': self.fake_partition_id[0],
+        }])
+
+    def get_test_show_snapshot_detail(self):
+        return (0, [{
+            'Mapped': 'Yes',
+            'Created-time': 'Wed, Jun 10 10:57:16 2015',
+            'ID': self.fake_snapshot_id[0],
+            'Last-modification-time': 'Wed, Jun 10 10:57:16 2015',
+            'Description': '---',
+            'Total-filled-block': '0',
+            'LV-ID': self.fake_lv_id[0],
+            'Activation-schedule-time': 'Not Actived',
+            'Mapping': 'CH:0/ID:0/LUN:1',
+            'Index': '1',
+            'Used': '0',
+            'Name': '---',
+            'Valid-filled-block': '0',
+            'Partition-ID': self.fake_partition_id[0],
+        }])
+
+    def get_fake_show_snapshot_detail(self):
+        msg = """
+CLI: Successful: Device(UID:25090, Name:, Model:DS 1016RE) selected.
+Return: 0x0000
+
+ ID: %s
+ Index: 1
+ Name: ---
+ Partition-ID: %s
+ LV-ID: %s
+ Created-time: Wed, Jun 10 10:57:16 2015
+ Last-modification-time: Wed, Jun 10 10:57:16 2015
+ Activation-schedule-time: Not Actived
+ Used: 0
+ Valid-filled-block: 0
+ Total-filled-block: 0
+ Description: ---
+ Mapped: Yes
+ Mapping: CH:0/ID:0/LUN:1
+
+CLI: Successful: 1 snapshot image(s) shown
+Return: 0x0000
+"""
+        return msg % (self.fake_snapshot_id[0],
+                      self.fake_partition_id[0],
+                      self.fake_lv_id[0])
+
+    def get_test_show_net(self):
+        return (0, [{
+            'Slot': 'slotA',
+            'MAC': '10D02380DEEC',
+            'ID': '1',
+            'IPv4': self.fake_data_port_ip[0],
+            'Mode': 'Disabled',
+            'IPv6': '---',
+        }, {
+            'Slot': 'slotB',
+            'MAC': '10D02390DEEC',
+            'ID': '1',
+            'IPv4': self.fake_data_port_ip[1],
+            'Mode': 'Disabled',
+            'IPv6': '---',
+        }, {
+            'Slot': 'slotA',
+            'MAC': '10D02340DEEC',
+            'ID': '2',
+            'IPv4': self.fake_data_port_ip[2],
+            'Mode': 'Disabled',
+            'IPv6': '---',
+        }, {
+            'Slot': 'slotB',
+            'MAC': '10D02350DEEC',
+            'ID': '2',
+            'IPv4': self.fake_data_port_ip[3],
+            'Mode': 'Disabled',
+            'IPv6': '---',
+        }, {
+            'Slot': 'slotA',
+            'MAC': '10D02310DEEC',
+            'ID': '4',
+            'IPv4': self.fake_data_port_ip[4],
+            'Mode': 'Disabled',
+            'IPv6': '---',
+        }, {
+            'Slot': 'slotB',
+            'MAC': '10D02320DEEC',
+            'ID': '4',
+            'IPv4': self.fake_data_port_ip[5],
+            'Mode': 'Disabled',
+            'IPv6': '---',
+        }, {
+            'Slot': '---',
+            'MAC': '10D023077124',
+            'ID': '32',
+            'IPv4': '172.27.1.1',
+            'Mode': 'Disabled',
+            'IPv6': '---',
+        }])
+
+    def get_fake_show_net(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ ID  MAC           Mode  IPv4            Mode      IPv6  Slot
+---------------------------------------------------------------
+ 1   10D02380DEEC  DHCP  %s              Disabled  ---   slotA
+ 1   10D02390DEEC  DHCP  %s              Disabled  ---   slotB
+ 2   10D02340DEEC  DHCP  %s              Disabled  ---   slotA
+ 2   10D02350DEEC  DHCP  %s              Disabled  ---   slotB
+ 4   10D02310DEEC  DHCP  %s              Disabled  ---   slotA
+ 4   10D02320DEEC  DHCP  %s              Disabled  ---   slotB
+ 32  10D023077124  DHCP  172.27.1.1      Disabled  ---   ---
+
+CLI: Successful: 2 record(s) found
+Return: 0x0000
+"""
+        return msg % (self.fake_data_port_ip[0], self.fake_data_port_ip[1],
+                      self.fake_data_port_ip[2], self.fake_data_port_ip[3],
+                      self.fake_data_port_ip[4], self.fake_data_port_ip[5])
+
+    def get_test_show_net_detail(self):
+        return (0, [{
+            'Slot': 'slotA',
+            'IPv4-mode': 'DHCP',
+            'ID': '1',
+            'IPv6-address': '---',
+            'Net-mask': '---',
+            'IPv4-address': '---',
+            'Route': '---',
+            'Gateway': '---',
+            'IPv6-mode': 'Disabled',
+            'MAC': '00D023877124',
+            'Prefix-length': '---',
+        }, {
+            'Slot': '---',
+            'IPv4-mode': 'DHCP',
+            'ID': '32',
+            'IPv6-address': '---',
+            'Net-mask': '255.255.240.0',
+            'IPv4-address': '172.27.112.245',
+            'Route': '---',
+            'Gateway': '172.27.127.254',
+            'IPv6-mode': 'Disabled',
+            'MAC': '00D023077124',
+            'Prefix-length': '---',
+        }])
+
+    def get_fake_show_net_detail(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ ID: 1
+ MAC: 00D023877124
+ IPv4-mode: DHCP
+ IPv4-address: ---
+ Net-mask: ---
+ Gateway: ---
+ IPv6-mode: Disabled
+ IPv6-address: ---
+ Prefix-length: ---
+ Route: ---
+ Slot: slotA
+
+ ID: 32
+ MAC: 00D023077124
+ IPv4-mode: DHCP
+ IPv4-address: 172.27.112.245
+ Net-mask: 255.255.240.0
+ Gateway: 172.27.127.254
+ IPv6-mode: Disabled
+ IPv6-address: ---
+ Prefix-length: ---
+ Route: ---
+ Slot: ---
+
+CLI: Successful: 3 record(s) found
+Return: 0x0000
+"""
+        return msg
+
+    def get_test_show_partition(self, volume_id=None, pool_id=None):
+        result = [{
+            'ID': self.fake_partition_id[0],
+            'Used': '200',
+            'Name': self.fake_volume_id[0].replace('-', ''),
+            'Size': '200',
+            'Min-reserve': '200',
+            'LV-ID': self.fake_lv_id[0],
+        }, {
+            'ID': self.fake_partition_id[1],
+            'Used': '200',
+            'Name': self.fake_volume_id[1].replace('-', ''),
+            'Size': '200',
+            'Min-reserve': '200',
+            'LV-ID': self.fake_lv_id[0],
+        }]
+        if volume_id and pool_id:
+            result.append({
+                'ID': self.fake_partition_id[2],
+                'Used': '200',
+                'Name': volume_id,
+                'Size': '200',
+                'Min-reserve': '200',
+                'LV-ID': pool_id,
+            })
+        return (0, result)
+
+    def get_fake_show_partition(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ ID  Name         LV-ID  Size   Used   Min-reserve
+---------------------------------------------------
+ %s  %s           %s     200    200    200
+ %s  %s           %s     200    200    200
+
+CLI: Successful: 3 partition(s) shown
+Return: 0x0000
+"""
+        return msg % (self.fake_partition_id[0],
+                      self.fake_volume_id[0].replace('-', ''),
+                      self.fake_lv_id[0],
+                      self.fake_partition_id[1],
+                      self.fake_volume_id[1].replace('-', ''),
+                      self.fake_lv_id[0])
+
+    def get_test_show_partition_detail_for_map(
+            self, partition_id, mapped='true'):
+        result = [{
+            'LV-ID': self.fake_lv_id[0],
+            'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1',
+            'Used': '200',
+            'Size': '200',
+            'ID': partition_id,
+            'Progress': '---',
+            'Min-reserve': '200',
+            'Last-modification-time': 'Wed, Jan 08 20:23:23 2020',
+            'Valid-filled-block': '100',
+            'Name': self.fake_volume_id[0].replace('-', ''),
+            'Mapped': mapped,
+            'Total-filled-block': '100',
+            'Creation-time': 'Wed, Jan 08 20:23:23 2020',
+        }]
+        return (0, result)
+
+    def get_test_show_partition_detail(self, volume_id=None, pool_id=None):
+        result = [{
+            'LV-ID': self.fake_lv_id[0],
+            'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0',
+            'Used': '200',
+            'Size': '200',
+            'ID': self.fake_partition_id[0],
+            'Progress': '---',
+            'Min-reserve': '200',
+            'Last-modification-time': 'Wed, Jan 08 20:23:23 2020',
+            'Valid-filled-block': '100',
+            'Name': self.fake_volume_id[0].replace('-', ''),
+            'Mapped': 'true',
+            'Total-filled-block': '100',
+            'Creation-time': 'Wed, Jan 08 20:23:23 2020',
+        }, {
+            'LV-ID': self.fake_lv_id[0],
+            'Mapping': '---',
+            'Used': '200',
+            'Size': '200',
+            'ID': self.fake_partition_id[1],
+            'Progress': '---',
+            'Min-reserve': '200',
+            'Last-modification-time': 'Sat, Jan 11 22:18:40 2020',
+            'Valid-filled-block': '100',
+            'Name': self.fake_volume_id[1].replace('-', ''),
+            'Mapped': 'false',
+            'Total-filled-block': '100',
+            'Creation-time': 'Sat, Jan 11 22:18:40 2020',
+        }]
+        if volume_id and pool_id:
+            result.extend([{
+                'LV-ID': pool_id,
+                'Mapping': '---',
+                'Used': '200',
+                'Size': '200',
+                'ID': self.fake_partition_id[2],
+                'Progress': '---',
+                'Min-reserve': '200',
+                'Last-modification-time': 'Sat, Jan 15 22:18:40 2020',
+                'Valid-filled-block': '100',
+                'Name': volume_id,
+                'Mapped': 'false',
+                'Total-filled-block': '100',
+                'Creation-time': 'Sat, Jan 15 22:18:40 2020',
+            }, {
+                'LV-ID': '987654321',
+                'Mapping': '---',
+                'Used': '200',
+                'Size': '200',
+                'ID': '123123123123',
+                'Progress': '---',
+                'Min-reserve': '200',
+                'Last-modification-time': 'Sat, Jan 12 22:18:40 2020',
+                'Valid-filled-block': '100',
+                'Name': volume_id,
+                'Mapped': 'false',
+                'Total-filled-block': '100',
+                'Creation-time': 'Sat, Jan 15 22:18:40 2020',
+            }])
+        return (0, result)
+
+    def get_fake_show_partition_detail(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ ID: %s
+ Name: %s
+ LV-ID: %s
+ Size: 200
+ Used: 200
+ Min-reserve: 200
+ Creation-time: Wed, Jan 08 20:23:23 2020
+ Last-modification-time: Wed, Jan 08 20:23:23 2020
+ Valid-filled-block: 100
+ Total-filled-block: 100
+ Progress: ---
+ Mapped: true
+ Mapping: CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0
+
+ ID: %s
+ Name: %s
+ LV-ID: %s
+ Size: 200
+ Used: 200
+ Min-reserve: 200
+ Creation-time: Sat, Jan 11 22:18:40 2020
+ Last-modification-time: Sat, Jan 11 22:18:40 2020
+ Valid-filled-block: 100
+ Total-filled-block: 100
+ Progress: ---
+ Mapped: false
+ Mapping: ---
+
+CLI: Successful: 3 partition(s) shown
+Return: 0x0000
+"""
+        return msg % (self.fake_partition_id[0],
+                      self.fake_volume_id[0].replace('-', ''),
+                      self.fake_lv_id[0],
+                      self.fake_partition_id[1],
+                      self.fake_volume_id[1].replace('-', ''),
+                      self.fake_lv_id[0])
+
+    def get_test_show_replica_detail_for_migrate(
+            self, src_part_id, dst_part_id, volume_id, status='Completed'):
+        result = [{
+            'Pair-ID': self.fake_pair_id[0],
+            'Name': 'Cinder-Snapshot',
+            'Source-Device': 'DEEC',
+            'Source': src_part_id,
+            'Source-Type': 'LV-Partition',
+            'Source-Name': volume_id,
+            'Source-LV': '5DE94FF775D81C30',
+            'Source-VS': '2C482316298F7A4E',
+            'Source-Mapped': 'Yes',
+            'Target-Device': 'DEEC',
+            'Target': dst_part_id,
+            'Target-Type': 'LV-Partition',
+            'Target-Name': volume_id,
+            'Target-LV': '5DE94FF775D81C30',
+            'Target-VS': '033EA1FA4EA193EB',
+            'Target-Mapped': 'No',
+            'Type': 'Copy',
+            'Priority': 'Normal',
+            'Timeout': '---',
+            'Incremental': '---',
+            'Compression': '---',
+            'Status': status,
+            'Progress': '---',
+            'Created-time': '01/11/2020 22:20 PM',
+            'Sync-commence-time': '01/11/2020 22:20 PM',
+            'Split-time': '01/11/2020 22:20 PM',
+            'Completed-time': '01/11/2020 22:21 PM',
+            'Description': '---',
+        }]
+        return (0, result)
+
+    def get_test_show_replica_detail_for_si_sync_pair(self):
+        result = [{
+            'Pair-ID': self.fake_pair_id[0],
+            'Name': 'Cinder-Snapshot',
+            'Source-Device': 'DEEC',
+            'Source': self.fake_snapshot_id[0],
+            'Source-Type': 'LV-Partition',
+            'Source-Name': '',
+            'Source-LV': '5DE94FF775D81C30',
+            'Source-VS': '2C482316298F7A4E',
+            'Source-Mapped': 'Yes',
+            'Target-Device': 'DEEC',
+            'Target': self.fake_partition_id[1],
+            'Target-Type': 'LV-Partition',
+            'Target-Name': '',
+            'Target-LV': '5DE94FF775D81C30',
+            'Target-VS': '033EA1FA4EA193EB',
+            'Target-Mapped': 'No',
+            'Type': 'Copy',
+            'Priority': 'Normal',
+            'Timeout': '---',
+            'Incremental': '---',
+            'Compression': '---',
+            'Status': 'Copy',
+            'Progress': '---',
+            'Created-time': '01/11/2020 22:20 PM',
+            'Sync-commence-time': '01/11/2020 22:20 PM',
+            'Split-time': '01/11/2020 22:20 PM',
+            'Completed-time': '01/11/2020 22:21 PM',
+            'Description': '---',
+        }]
+        return (0, result)
+
+    def get_test_show_replica_detail_for_sync_pair(self):
+        result = [{
+            'Pair-ID': self.fake_pair_id[0],
+            'Name': 'Cinder-Snapshot',
+            'Source-Device': 'DEEC',
+            'Source': self.fake_partition_id[0],
+            'Source-Type': 'LV-Partition',
+            'Source-Name': self.fake_volume_id[0].replace('-', ''),
+            'Source-LV': '5DE94FF775D81C30',
+            'Source-VS': '2C482316298F7A4E',
+            'Source-Mapped': 'Yes',
+            'Target-Device': 'DEEC',
+            'Target': self.fake_partition_id[1],
+            'Target-Type': 'LV-Partition',
+            'Target-Name': self.fake_volume_id[1].replace('-', ''),
+            'Target-LV': '5DE94FF775D81C30',
+            'Target-VS': '033EA1FA4EA193EB',
+            'Target-Mapped': 'No',
+            'Type': 'Copy',
+            'Priority': 'Normal',
+            'Timeout': '---',
+            'Incremental': '---',
+            'Compression': '---',
+            'Status': 'Copy',
+            'Progress': '---',
+            'Created-time': '01/11/2020 22:20 PM',
+            'Sync-commence-time': '01/11/2020 22:20 PM',
+            'Split-time': '01/11/2020 22:20 PM',
+            'Completed-time': '01/11/2020 22:21 PM',
+            'Description': '---',
+        }]
+        return (0, result)
+
+    def get_test_show_replica_detail(self):
+        result = [{
+            'Pair-ID': '4BF246E26966F015',
+            'Name': 'Cinder-Snapshot',
+            'Source-Device': 'DEEC',
+            'Source': self.fake_partition_id[2],
+            'Source-Type': 'LV-Partition',
+            'Source-Name': 'Part-2',
+            'Source-LV': '5DE94FF775D81C30',
+            'Source-VS': '2C482316298F7A4E',
+            'Source-Mapped': 'No',
+            'Target-Device': 'DEEC',
+            'Target': self.fake_partition_id[3],
+            'Target-Type': 'LV-Partition',
+            'Target-Name': 'Part-1-Copy',
+            'Target-LV': '5DE94FF775D81C30',
+            'Target-VS': '714B80F0335F6E52',
+            'Target-Mapped': 'No',
+            'Type': 'Copy',
+            'Priority': 'Normal',
+            'Timeout': '---',
+            'Incremental': '---',
+            'Compression': '---',
+            'Status': 'Completed',
+            'Progress': '---',
+            'Created-time': '01/11/2020 22:20 PM',
+            'Sync-commence-time': '01/11/2020 22:20 PM',
+            'Split-time': '01/11/2020 22:20 PM',
+            'Completed-time': '01/11/2020 22:21 PM',
+            'Description': '---',
+        }, {
+            'Pair-ID': self.fake_pair_id[0],
+            'Name': 'Cinder-Migrate',
+            'Source-Device': 'DEEC',
+            'Source': self.fake_partition_id[0],
+            'Source-Type': 'LV-Partition',
+            'Source-Name': self.fake_volume_id[0].replace('-', ''),
+            'Source-LV': '5DE94FF775D81C30',
+            'Source-VS': '2C482316298F7A4E',
+            'Source-Mapped': 'Yes',
+            'Target-Device': 'DEEC',
+            'Target': self.fake_partition_id[1],
+            'Target-Type': 'LV-Partition',
+            'Target-Name': self.fake_volume_id[1].replace('-', ''),
+            'Target-LV': '5DE94FF775D81C30',
+            'Target-VS': '033EA1FA4EA193EB',
+            'Target-Mapped': 'No',
+            'Type': 'Mirror',
+            'Priority': 'Normal',
+            'Timeout': '---',
+            'Incremental': '---',
+            'Compression': '---',
+            'Status': 'Mirror',
+            'Progress': '---',
+            'Created-time': '01/11/2020 22:20 PM',
+            'Sync-commence-time': '01/11/2020 22:20 PM',
+            'Split-time': '01/11/2020 22:20 PM',
+            'Completed-time': '01/11/2020 22:21 PM',
+            'Description': '---',
+        }, {
+            'Pair-ID': self.fake_pair_id[1],
+            'Name': 'Cinder-Migrate',
+            'Source-Device': 'DEEC',
+            'Source': self.fake_partition_id[4],
+            'Source-Type': 'LV-Partition',
+            'Source-Name': self.fake_volume_id[0].replace('-', ''),
+            'Source-LV': '5DE94FF775D81C30',
+            'Source-VS': '2C482316298F7A4E',
+            'Source-Mapped': 'No',
+            'Target-Device': 'DEEC',
+            'Target': self.fake_partition_id[5],
+            'Target-Type': 'LV-Partition',
+            'Target-Name': self.fake_volume_id[1].replace('-', ''),
+            'Target-LV': '5DE94FF775D81C30',
+            'Target-VS': '714B80F0335F6E52',
+            'Target-Mapped': 'Yes',
+            'Type': 'Mirror',
+            'Priority': 'Normal',
+            'Timeout': '---',
+            'Incremental': '---',
+            'Compression': '---',
+            'Status': 'Mirror',
+            'Progress': '---',
+            'Created-time': '01/11/2020 22:20 PM',
+            'Sync-commence-time': '01/11/2020 22:20 PM',
+            'Split-time': '01/11/2020 22:20 PM',
+            'Completed-time': '01/11/2020 22:21 PM',
+            'Description': '---',
+        }]
+        return (0, result)
+
+    def get_fake_show_replica_detail(self):
+        msg = """
+ CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected.
+Return: 0x0000
+
+ Pair-ID: 4BF246E26966F015
+ Name: Cinder-Snapshot
+ Source-Device: DEEC
+ Source: %s
+ Source-Type: LV-Partition
+ Source-Name: Part-2
+ Source-LV: 5DE94FF775D81C30
+ Source-VS: 2C482316298F7A4E
+ Source-Mapped: No
+ Target-Device: DEEC
+ Target: %s
+ Target-Type: LV-Partition
+ Target-Name: Part-1-Copy
+ Target-LV: 5DE94FF775D81C30
+ Target-VS: 714B80F0335F6E52
+ Target-Mapped: No
+ Type: Copy
+ Priority: Normal
+ Timeout: ---
+ Incremental: ---
+ Compression: ---
+ Status: Completed
+ Progress: ---
+ Created-time: 01/11/2020 22:20 PM
+ Sync-commence-time: 01/11/2020 22:20 PM
+ Split-time: 01/11/2020 22:20 PM
+ Completed-time: 01/11/2020 22:21 PM
+ Description: ---
+
+ Pair-ID: %s
+ Name: Cinder-Migrate
+ Source-Device: DEEC
+ Source: %s
+ Source-Type: LV-Partition
+ Source-Name: %s
+ Source-LV: 5DE94FF775D81C30
+ Source-VS: 2C482316298F7A4E
+ Source-Mapped: Yes
+ Target-Device: DEEC
+ Target: %s
+ Target-Type: LV-Partition
+ Target-Name: %s
+ Target-LV: 5DE94FF775D81C30
+ Target-VS: 033EA1FA4EA193EB
+ Target-Mapped: No
+ Type: Mirror
+ Priority: Normal
+ Timeout: ---
+ Incremental: ---
+ Compression: ---
+ Status: Mirror
+ Progress: ---
+ Created-time: 01/11/2020 22:20 PM
+ Sync-commence-time: 01/11/2020 22:20 PM
+ Split-time: 01/11/2020 22:20 PM
+ Completed-time: 01/11/2020 22:21 PM
+ Description: ---
+
+ Pair-ID: %s
+ Name: Cinder-Migrate
+ Source-Device: DEEC
+ Source: %s
+ Source-Type: LV-Partition
+ Source-Name: %s
+ Source-LV: 5DE94FF775D81C30
+ Source-VS: 2C482316298F7A4E
+ Source-Mapped: No
+ Target-Device: DEEC
+ Target: %s
+ Target-Type: LV-Partition
+ Target-Name: %s
+ Target-LV: 5DE94FF775D81C30
+ Target-VS: 714B80F0335F6E52
+ Target-Mapped: Yes
+ Type: Mirror
+ Priority: Normal
+ Timeout: ---
+ Incremental: ---
+ Compression: ---
+ Status: Mirror
+ Progress: ---
+ Created-time: 01/11/2020 22:20 PM
+ Sync-commence-time: 01/11/2020 22:20 PM
+ Split-time: 01/11/2020 22:20 PM
+ Completed-time: 01/11/2020 22:21 PM
+ Description: ---
+
+CLI: Successful: 3 replication job(s) shown
+Return: 0x0000
+"""
+        return msg % (self.fake_partition_id[2],
+                      self.fake_partition_id[3],
+                      self.fake_pair_id[0],
+                      self.fake_partition_id[0],
+                      self.fake_volume_id[0].replace('-', ''),
+                      self.fake_partition_id[1],
+                      self.fake_volume_id[1].replace('-', ''),
+                      self.fake_pair_id[1],
+                      self.fake_partition_id[4],
+                      self.fake_volume_id[0].replace('-', ''),
+                      self.fake_partition_id[5],
+                      self.fake_volume_id[1].replace('-', ''))
+
+    def get_test_show_lv(self):
+        return (0, [{
+            'Name': 'LV-1',
+            'LD-amount': '1',
+            'Available': '841978 MB',
+            'ID': self.fake_lv_id[0],
+            'Progress': '---',
+            'Size': '857982 MB',
+            'Status': 'On-line',
+        }])
+
+    def get_fake_show_lv(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ ID  Name  LD-amount  Size       Available  Progress  Status
+--------------------------------------------------------------
+ %s  LV-1  1          857982 MB  841978 MB  ---       On-line
+
+CLI: Successful: 1 Logical Volumes(s) shown
+Return: 0x0000
+"""
+        return msg % self.fake_lv_id[0]
+
+    def get_test_show_lv_detail(self):
+        return (0, [{
+            'Policy': 'Default',
+            'Status': 'On-line',
+            'ID': self.fake_lv_id[0],
+            'Available': '841978 MB',
+            'Expandable-size': '0 MB',
+            'Name': 'LV-1',
+            'Size': '857982 MB',
+            'LD-amount': '1',
+            'Progress': '---',
+        }])
+
+    def get_fake_show_lv_detail(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ ID: %s
+ Name: LV-1
+ LD-amount: 1
+ Size: 857982 MB
+ Available: 841978 MB
+ Expandable-size: 0 MB
+ Policy: Default
+ Progress: ---
+ Status: On-line
+
+CLI: Successful: 1 Logical Volumes(s) shown
+Return: 0x0000
+"""
+        return msg % self.fake_lv_id[0]
+
+    def get_test_show_lv_tier_for_migration(self):
+        return (0, [{
+            'LV-Name': 'TierLV',
+            'LV-ID': self.fake_lv_id[1],
+            'Tier': '0',
+            'Size': '418.93 GB',
+            'Used': '10 GB(2.4%)',
+            'Data Service': '0 MB(0.0%)',
+            'Reserved Ratio': '10.0%',
+        }, {
+            'LV-Name': 'TierLV',
+            'LV-ID': self.fake_lv_id[1],
+            'Tier': '3',
+            'Size': '931.02 GB',
+            'Used': '0 MB(0.0%)',
+            'Data Service': '0 MB(0.0%)',
+            'Reserved Ratio': '0.0%',
+        }])
+
+    def get_test_show_lv_tier(self):
+        return (0, [{
+            'LV-Name': 'TierLV',
+            'LV-ID': self.fake_lv_id[0],
+            'Tier': '0',
+            'Size': '418.93 GB',
+            'Used': '10 GB(2.4%)',
+            'Data Service': '0 MB(0.0%)',
+            'Reserved Ratio': '10.0%',
+        }, {
+            'LV-Name': 'TierLV',
+            'LV-ID': self.fake_lv_id[0],
+            'Tier': '3',
+            'Size': '931.02 GB',
+            'Used': '0 MB(0.0%)',
+            'Data Service': '0 MB(0.0%)',
+            'Reserved Ratio': '0.0%',
+        }])
+
+    def get_fake_show_lv_tier(self):
+        msg = """
+CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected.
+Return: 0x0000
+
+ LV-Name  LV-ID  Tier  Size       Used          Data Service   Reserved Ratio
+------------------------------------------------------------------------------
+ TierLV   %s     0     418.93 GB  10 GB(2.4%%)  0 MB(0.0%%)    10.0%%
+ TierLV   %s     3     931.02 GB  0 MB(0.0%%)   0 MB(0.0%%)    0.0%%
+
+CLI: Successful: 2 lv tiering(s) shown
+Return: 0x0000
+"""
+        return msg % (self.fake_lv_id[0],
+                      self.fake_lv_id[0])
+
+    def get_test_show_device(self):
+        return (0, [{
+            'ID': self.fake_system_id[0],
+            'Connected-IP': self.fake_manage_port_ip[0],
+            'Name': '---',
+            'Index': '0*',
+            'JBOD-ID': 'N/A',
+            'Capacity': '1.22 TB',
+            'Model': self.fake_model[0],
+            'Service-ID': '8445676',
+        }])
+
+    def get_fake_show_device(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ Index  ID     Model  Name  Connected-IP  JBOD-ID  Capacity  Service-ID
+------------------------------------------------------------------------
+ 0*     %s     %s     ---   %s            N/A      1.22 TB   8445676
+
+CLI: Successful: 1 device(s) found
+Return: 0x0000
+"""
+        return msg % (self.fake_system_id[0],
+                      self.fake_model[0],
+                      self.fake_manage_port_ip[0])
+
+    def get_test_show_channel_single(self):
+        return (0, [{
+            'ID': '112',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '0',
+            'MCS': 'N/A',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '1',
+            'MCS': '0',
+            'curClock': '---',
+        }])
+
+    def get_test_show_channel_with_mcs(self):
+        return (0, [{
+            'ID': '112',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '0',
+            'MCS': 'N/A',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '1',
+            'MCS': '1',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '2',
+            'MCS': '1',
+            'curClock': '---',
+        }, {
+            'ID': '---',
+            'defClock': '6.0 Gbps',
+            'Type': 'SAS',
+            'Mode': 'Drive',
+            'Width': 'SAS',
+            'Ch': '3',
+            'MCS': 'N/A',
+            'curClock': '6.0 Gbps',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '4',
+            'MCS': '2',
+            'curClock': '---',
+        }, {
+            'ID': '112',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '5',
+            'MCS': 'N/A',
+            'curClock': '---',
+        }])
+
+    def get_test_show_channel_without_mcs(self):
+        return (0, [{
+            'ID': '112',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '0',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '1',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '2',
+            'curClock': '---',
+        }, {
+            'ID': '---',
+            'defClock': '6.0 Gbps',
+            'Type': 'SAS',
+            'Mode': 'Drive',
+            'Width': 'SAS',
+            'Ch': '3',
+            'curClock': '6.0 Gbps',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '4',
+            'curClock': '---',
+        }, {
+            'ID': '112',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '5',
+            'curClock': '---',
+        }])
+
+    def get_test_show_channel_with_diff_target_id(self):
+        return (0, [{
+            'ID': '32',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '0',
+            'MCS': 'N/A',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '1',
+            'MCS': '0',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '2',
+            'MCS': '1',
+            'curClock': '---',
+        }, {
+            'ID': '---',
+            'defClock': '6.0 Gbps',
+            'Type': 'SAS',
+            'Mode': 'Drive',
+            'Width': 'SAS',
+            'Ch': '3',
+            'MCS': 'N/A',
+            'curClock': '6.0 Gbps',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '4',
+            'MCS': '2',
+            'curClock': '---',
+        }, {
+            'ID': '48',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '5',
+            'MCS': 'N/A',
+            'curClock': '---',
+        }])
+
+    def get_test_show_channel(self):
+        return (0, [{
+            'ID': '112',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '0',
+            'MCS': 'N/A',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '1',
+            'MCS': '0',
+            'curClock': '---',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '2',
+            'MCS': '1',
+            'curClock': '---',
+        }, {
+            'ID': '---',
+            'defClock': '6.0 Gbps',
+            'Type': 'SAS',
+            'Mode': 'Drive',
+            'Width': 'SAS',
+            'Ch': '3',
+            'MCS': 'N/A',
+            'curClock': '6.0 Gbps',
+        }, {
+            'ID': '0',
+            'defClock': 'Auto',
+            'Type': 'NETWORK',
+            'Mode': 'Host',
+            'Width': 'iSCSI',
+            'Ch': '4',
+            'MCS': '2',
+            'curClock': '---',
+        }, {
+            'ID': '112',
+            'defClock': 'Auto',
+            'Type': 'FIBRE',
+            'Mode': 'Host',
+            'Width': '---',
+            'Ch': '5',
+            'MCS': 'N/A',
+            'curClock': '---',
+        }])
+
+    def get_fake_show_channel(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ Ch  Mode   Type     defClock  curClock  Width  ID   MCS
+---------------------------------------------------------
+ 0   Host   FIBRE    Auto      ---       ---    112  N/A
+ 1   Host   NETWORK  Auto      ---       iSCSI  0    0
+ 2   Host   NETWORK  Auto      ---       iSCSI  0    1
+ 3   Drive  SAS      6.0 Gbps  6.0 Gbps  SAS    ---  N/A
+ 4   Host   NETWORK  Auto      ---       iSCSI  0    2
+ 5   Host   FIBRE    Auto      ---       ---    112  N/A
+
+CLI: Successful: : 6 channel(s) shown
+Return: 0x0000
+"""
+        return msg
+
+    def get_test_show_channel_r_model(self):
+        return (0, [{
+            'Mode': 'Host',
+            'AID': '112',
+            'defClock': 'Auto',
+            'MCS': 'N/A',
+            'Ch': '0',
+            'BID': '113',
+            'curClock': '---',
+            'Width': '---',
+            'Type': 'FIBRE',
+        }, {
+            'Mode': 'Host',
+            'AID': '0',
+            'defClock': 'Auto',
+            'MCS': '0',
+            'Ch': '1',
+            'BID': '1',
+            'curClock': '---',
+            'Width': 'iSCSI',
+            'Type': 'NETWORK',
+        }, {
+            'Mode': 'Host',
+            'AID': '0',
+            'defClock': 'Auto',
+            'MCS': '1',
+            'Ch': '2',
+            'BID': '1',
+            'curClock': '---',
+            'Width': 'iSCSI',
+            'Type': 'NETWORK',
+        }, {
+            'Mode': 'Drive',
+            'AID': '---',
+            'defClock': '6.0 Gbps',
+            'MCS': 'N/A',
+            'Ch': '3',
+            'BID': '---',
+            'curClock': '6.0 Gbps',
+            'Width': 'SAS',
+            'Type': 'SAS',
+        }, {
+            'Mode': 'Host',
+            'AID': '0',
+            'defClock': 'Auto',
+            'MCS': '2',
+            'Ch': '4',
+            'BID': '1',
+            'curClock': '---',
+            'Width': 'iSCSI',
+            'Type': 'NETWORK',
+        }, {
+            'Mode': 'Host',
+            'AID': '112',
+            'defClock': 'Auto',
+            'MCS': 'N/A',
+            'Ch': '5',
+            'BID': '113',
+            'curClock': '---',
+            'Width': '---',
+            'Type': 'FIBRE',
+        }])
+
+    def get_fake_show_channel_r_model(self):
+        msg = """
+CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected.
+Return: 0x0000
+
+ Ch    Mode   Type     defClock  curClock  Width  AID  BID  MCS
+----------------------------------------------------------------
+ 0     Host   FIBRE    Auto      ---       ---    112  113  N/A
+ 1     Host   NETWORK  Auto      ---       iSCSI  0    1    0
+ 2     Host   NETWORK  Auto      ---       iSCSI  0    1    1
+ 3     Drive  SAS      6.0 Gbps  6.0 Gbps  SAS    ---  ---  N/A
+ 4     Host   NETWORK  Auto      ---       iSCSI  0    1    2
+ 5     Host   FIBRE    Auto      ---       ---    112  113  N/A
+
+CLI: Successful: : 9 channel(s) shown
+Return: 0x0000
+"""
+        return msg
+
+    def get_show_map_with_lun_map_on_zoning(self):
+        return (0, [{
+            'Ch': '0',
+            'LUN': '0',
+            'Media': 'PART',
+            'Host-ID': self.fake_initiator_wwpns[0],
+            'Target': '112',
+            'Name': 'Part-1',
+            'ID': self.fake_partition_id[0],
+        }])
+
+    def get_test_show_map(self, partition_id=None, channel_id=None):
+        if partition_id and channel_id:
+            return (0, [{
+                'Ch': channel_id,
+                'LUN': '0',
+                'Media': 'PART',
+                'Host-ID': '---',
+                'Target': '0',
+                'Name': 'Part-1',
+                'ID': partition_id,
+            }, {
+                'Ch': channel_id,
+                'LUN': '1',
+                'Media': 'PART',
+                'Host-ID': '---',
+                'Target': '0',
+                'Name': 'Part-1',
+                'ID': partition_id,
+            }])
+        else:
+            return (0, [{
+                'Ch': '1',
+                'LUN': '0',
+                'Media': 'PART',
+                'Host-ID': '---',
+                'Target': '0',
+                'Name': 'Part-1',
+                'ID': self.fake_partition_id[0],
+            }, {
+                'Ch': '1',
+                'LUN': '1',
+                'Media': 'PART',
+                'Host-ID': '---',
+                'Target': '0',
+                'Name': 'Part-1',
+                'ID': self.fake_partition_id[0],
+            }, {
+                'Ch': '4',
+                'LUN': '0',
+                'Media': 'PART',
+                'Host-ID': '---',
+                'Target': '0',
+                'Name': 'Part-1',
+                'ID': self.fake_partition_id[0],
+            }])
+
+    def get_test_show_map_multimap(self):
+        return (0, [{
+            'Ch': '1',
+            'LUN': '0',
+            'Media': 'PART',
+            'Host-ID': '---',
+            'Target': '0',
+            'Name': 'Part-1',
+            'ID': self.fake_partition_id[0],
+        }, {
+            'Ch': '1',
+            'LUN': '1',
+            'Media': 'PART',
+            'Host-ID': '---',
+            'Target': '0',
+            'Name': 'Part-1',
+            'ID': self.fake_partition_id[0],
+        }, {
+            'Ch': '4',
+            'LUN': '0',
+            'Media': 'PART',
+            'Host-ID': '210000E08B0AADE1',
+            'Target': '0',
+            'Name': 'Part-1',
+            'ID': self.fake_partition_id[0],
+        }, {
+            'Ch': '4',
+            'LUN': '0',
+            'Media': 'PART',
+            'Host-ID': '210000E08B0AADE2',
+            'Target': '0',
+            'Name': 'Part-1',
+            'ID': self.fake_partition_id[0],
+        }])
+
+    def get_fake_show_map(self):
+        msg = """
+CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
+Return: 0x0000
+
+ Ch  Target  LUN  Media  Name    ID  Host-ID
+-----------------------------------------------------------
+ 1   0       0    PART   Part-1  %s  ---
+ 1   0       1    PART   Part-1  %s  ---
+ 4   0       0    PART   Part-1  %s  ---
+
+CLI: Successful: 3 mapping(s) shown
+Return: 0x0000
+"""
+        return msg % (self.fake_partition_id[0],
+                      self.fake_partition_id[0],
+                      self.fake_partition_id[0])
+
+    def get_test_show_license(self):
+        return (0, {
+            'Local Volume Copy': {
+                'Support': False,
+                'Amount': '8/256',
+            },
+            'Synchronous Remote Mirror': {
+                'Support': False,
+                'Amount': '8/256',
+            },
+            'Snapshot': {
+                'Support': False,
+                'Amount': '1024/16384',
+            },
+            'Self-Encryption Drives': {
+                'Support': False,
+                'Amount': '---',
+            },
+            'Compression': {
+                'Support': False,
+                'Amount': '---',
+            },
+            'Local volume Mirror': {
+                'Support': False,
+                'Amount': '8/256',
+            },
+            'Storage Tiering': {
+                'Support': False,
+                'Amount': '---',
+            },
+            'Asynchronous Remote Mirror': {
+                'Support': False,
+                'Amount': '8/256',
+            },
+            'Scale-out': {
+                'Support': False,
+                'Amount': 'Not Support',
+            },
+            'Thin Provisioning': {
+                'Support': False,
+                'Amount': '---',
+            },
+            'Max JBOD': {
+                'Support': False,
+                'Amount': '15',
+            },
+            'EonPath': {
+                'Support': False,
+                'Amount': '---',
+            }
+        })
+
+    def get_fake_show_license(self):
+        msg = """
+CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected.
+Return: 0x0000
+
+ License                     Amount(Partition/Subsystem)  Expired
+------------------------------------------------------------------
+ EonPath                     ---                          Expired
+ Scale-out                   Not Support                  ---
+ Snapshot                    1024/16384                   Expired
+ Local Volume Copy           8/256                        Expired
+ Local volume Mirror         8/256                        Expired
+ Synchronous Remote Mirror   8/256                        Expired
+ Asynchronous Remote Mirror  8/256                        Expired
+ Compression                 ---                          Expired
+ Thin Provisioning           ---                          Expired
+ Storage Tiering             ---                          Expired
+ Max JBOD                    15                           Expired
+ Self-Encryption Drives      ---                          Expired
+
+CLI: Successful
+Return: 0x0000
+"""
+        return msg
+
+    def get_test_show_wwn_with_g_model(self):
+        return (0, [{
+            'ID': 'ID:112',
+            'WWPN': self.fake_target_wwpns[0],
+            'CH': '0',
+            'WWNN': self.fake_target_wwnns[0],
+        }, {
+            'ID': 'ID:112',
+            'WWPN': self.fake_target_wwpns[1],
+            'CH': '5',
+            'WWNN': self.fake_target_wwnns[0],
+        }])
+
+    def get_test_show_wwn(self):
+        return (0, [{
+            'ID': 'AID:112',
+            'WWPN': self.fake_target_wwpns[0],
+            'CH': '0',
+            'WWNN': self.fake_target_wwnns[0],
+        }, {
+            'ID': 'BID:113',
+            'WWPN': self.fake_target_wwpns[2],
+            'CH': '0',
+            'WWNN': self.fake_target_wwnns[1],
+        }, {
+            'ID': 'AID:112',
+            'WWPN': self.fake_target_wwpns[1],
+            'CH': '5',
+            'WWNN': self.fake_target_wwnns[0],
+        }, {
+            'ID': 'BID:113',
+            'WWPN': self.fake_target_wwpns[3],
+            'CH': '5',
+            'WWNN': self.fake_target_wwnns[1],
+        }])
+
+    def get_fake_show_wwn(self):
+        msg = """
+CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected.
+Return: 0x0000
+
+WWN entries in controller for host channels:
+ CH  ID       WWNN  WWPN
+-------------------------------------------------
+ 0   AID:112  %s    %s
+ 0   BID:113  %s    %s
+ 5   AID:112  %s    %s
+ 5   BID:113  %s    %s
+
+CLI: Successful
+Return: 0x0000
+"""
+        return msg % (self.fake_target_wwnns[0], self.fake_target_wwpns[0],
+                      self.fake_target_wwnns[1], self.fake_target_wwpns[2],
+                      self.fake_target_wwnns[0], self.fake_target_wwpns[1],
+                      self.fake_target_wwnns[1], self.fake_target_wwpns[3])
+
+    def get_test_show_iqn(self):
+        return (0, [{
+            'Name': self.fake_initiator_iqn[0][-16:],
+            'IQN': self.fake_initiator_iqn[0],
+            'User': '---',
+            'Password': '---',
+            'Target': '---',
+            'Target-Password': '---',
+            'IP': '0.0.0.0',
+            'Mask': '0.0.0.0',
+        }])
+
+    def get_fake_show_iqn(self):
+        msg = """
+CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected.
+Return: 0x0000
+
+Detected host IQN:
+ IQN
+----------------------------------------
+ %s
+
+
+List of initiator IQN(s):
+--------------------------
+ Name: %s
+ IQN: %s
+ User: ---
+ Password: ---
+ Target: ---
+ Target-Password: ---
+ IP: 0.0.0.0
+ Mask: 0.0.0.0
+
+CLI: Successful: 1 initiator iqn(s) shown
+Return: 0x0000
+"""
+        return msg % (self.fake_initiator_iqn[0],
+                      self.fake_initiator_iqn[0][-16:],
+                      self.fake_initiator_iqn[0])
+
+    def get_fake_discovery(self, target_iqns, target_portals):
+        template = '%s,1 %s'
+
+        if len(target_iqns) == 1:
+            result = template % (target_portals[0], target_iqns[0])
+            return (0, result)
+
+        result = []
+        for i in range(len(target_iqns)):
+            result.append(template % (
+                target_portals[i], target_iqns[i]))
+        return (0, '\n'.join(result))
+
+
+class InfortrendCLITestCase(test.TestCase):
+
+    CommandList = ['CreateLD', 'CreateLV',
+                   'CreatePartition', 'DeletePartition', 'SetPartition',
+                   'CreateMap', 'DeleteMap',
+                   'CreateSnapshot', 'DeleteSnapshot',
+                   'CreateReplica', 'DeleteReplica',
+                   'CreateIQN', 'DeleteIQN',
+                   'ShowLD', 'ShowLV',
+                   'ShowPartition', 'ShowSnapshot',
+                   'ShowDevice', 'ShowChannel',
+                   'ShowDisk', 'ShowMap',
+                   'ShowNet', 'ShowLicense',
+                   'ShowWWN', 'ShowReplica',
+                   'ShowIQN']
+
+    def __init__(self, *args, **kwargs):
+        super(InfortrendCLITestCase, self).__init__(*args, **kwargs)
+        self.cli_data = InfortrendCLITestData()
+
+    def setUp(self):
+        super(InfortrendCLITestCase, self).setUp()
+
+    def _cli_set(self, cli, fake_result):
+        cli_conf = {
+            'path': '',
+            'password': '',
+            'ip': '',
+            'cli_retry_time': 1,
+        }
+        cli = cli(cli_conf)
+
+        cli._execute = mock.Mock(return_value=fake_result)
+
+        return cli
+
+    def _cli_multi_set(self, cli, fake_result_list):
+        cli_conf = {
+            'path': '',
+            'password': '',
+            'ip': '',
+            'cli_retry_time': 5,
+        }
+        cli = cli(cli_conf)
+
+        cli._execute = mock.Mock(side_effect=fake_result_list)
+
+        return cli
+
+    def _test_command_succeed(self, command):
+
+        fake_cli_succeed = self.cli_data.get_fake_cli_succeed()
+        test_command = self._cli_set(command, fake_cli_succeed)
+
+        rc, out = test_command.execute()
+        self.assertEqual(0, rc)
+
+    def _test_command_failed(self, command):
+
+        fake_cli_failed = self.cli_data.get_fake_cli_failed()
+        test_command = self._cli_set(command, fake_cli_failed)
+
+        rc, out = test_command.execute()
+        self.assertEqual(int('0x000c', 16), rc)
+
+    def _test_command_failed_retry_succeed(self, log_error, command):
+
+        log_error.reset_mock()
+
+        LOG_ERROR_STR = (
+            'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s'
+        )
+
+        fake_result_list = [
+            self.cli_data.get_fake_cli_failed(),
+            self.cli_data.get_fake_cli_failed_with_network(),
+            self.cli_data.get_fake_cli_succeed(),
+        ]
+        test_command = self._cli_multi_set(command, fake_result_list)
+
+        rc, out = test_command.execute()
+        self.assertEqual(0, rc)
+
+        expect_log_error = [
+            mock.call(LOG_ERROR_STR, {
+                'retry': 1,
+                'method': test_command.__class__.__name__,
+                'rc': int('0x000c', 16),
+                'reason': 'No selected device',
+            }),
+            mock.call(LOG_ERROR_STR, {
+                'retry': 2,
+                'method': test_command.__class__.__name__,
+                'rc': int('0x000b', 16),
+                'reason': 'No network',
+            })
+        ]
+        log_error.assert_has_calls(expect_log_error)
+
+    def _test_command_failed_retry_timeout(self, log_error, command):
+
+        log_error.reset_mock()
+
+        LOG_ERROR_STR = (
+            'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s'
+        )
+
+        fake_result_list = [
+            self.cli_data.get_fake_cli_failed(),
+            self.cli_data.get_fake_cli_failed_with_network(),
+            self.cli_data.get_fake_cli_failed_with_network(),
+            self.cli_data.get_fake_cli_failed(),
+            self.cli_data.get_fake_cli_failed_with_network(),
+        ]
+        test_command = self._cli_multi_set(command, fake_result_list)
+
+        rc, out = test_command.execute()
+        self.assertEqual(int('0x000b', 16), rc)
+        self.assertEqual('No network', out)
+
+        expect_log_error = [
+            mock.call(LOG_ERROR_STR, {
+                'retry': 1,
+                'method': test_command.__class__.__name__,
+                'rc': int('0x000c', 16),
+                'reason': 'No selected device',
+            }),
+            mock.call(LOG_ERROR_STR, {
+                'retry': 2,
+                'method': test_command.__class__.__name__,
+                'rc': int('0x000b', 16),
+                'reason': 'No network',
+            }),
+            mock.call(LOG_ERROR_STR, {
+                'retry': 3,
+                'method': test_command.__class__.__name__,
+                'rc': int('0x000b', 16),
+                'reason': 'No network',
+            }),
+            mock.call(LOG_ERROR_STR, {
+                'retry': 4,
+                'method': test_command.__class__.__name__,
+                'rc': int('0x000c', 16),
+                'reason': 'No selected device',
+            }),
+            mock.call(LOG_ERROR_STR, {
+                'retry': 5,
+                'method': test_command.__class__.__name__,
+                'rc': int('0x000b', 16),
+                'reason': 'No network',
+            })
+        ]
+        log_error.assert_has_calls(expect_log_error)
+
+    def _test_show_command(self, fake_data, test_data, command, *params):
+
+        test_command = self._cli_set(command, fake_data)
+
+        rc, out = test_command.execute(*params)
+
+        self.assertEqual(test_data[0], rc)
+
+        if isinstance(out, list):
+            for i in range(len(test_data[1])):
+                self.assertDictMatch(out[i], test_data[1][i])
+        else:
+            self.assertDictMatch(out, test_data[1])
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_cli_all_command_execute(self):
+
+        for command in self.CommandList:
+            self._test_command_succeed(getattr(cli, command))
+            self._test_command_failed(getattr(cli, command))
+
+    @mock.patch.object(cli.LOG, 'error')
+    def test_cli_all_command_execute_retry_succeed(self, log_error):
+
+        for command in self.CommandList:
+            self._test_command_failed_retry_succeed(
+                log_error, getattr(cli, command))
+
+    @mock.patch.object(cli.LOG, 'error')
+    def test_cli_all_command_execute_retry_timeout(self, log_error):
+
+        for command in self.CommandList:
+            self._test_command_failed_retry_timeout(
+                log_error, getattr(cli, command))
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_snapshot(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_snapshot(),
+            self.cli_data.get_test_show_snapshot(),
+            cli.ShowSnapshot)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_snapshot_detail(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_snapshot_detail(),
+            self.cli_data.get_test_show_snapshot_detail(),
+            cli.ShowSnapshot, '-l')
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_net(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_net(),
+            self.cli_data.get_test_show_net(),
+            cli.ShowNet)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_detail_net(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_net_detail(),
+            self.cli_data.get_test_show_net_detail(),
+            cli.ShowNet, '-l')
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_partition(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_partition(),
+            self.cli_data.get_test_show_partition(),
+            cli.ShowPartition)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_partition_detail(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_partition_detail(),
+            self.cli_data.get_test_show_partition_detail(),
+            cli.ShowPartition, '-l')
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_lv(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_lv(),
+            self.cli_data.get_test_show_lv(),
+            cli.ShowLV)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_lv_detail(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_lv_detail(),
+            self.cli_data.get_test_show_lv_detail(),
+            cli.ShowLV, '-l')
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_lv_tier(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_lv_tier(),
+            self.cli_data.get_test_show_lv_tier(),
+            cli.ShowLV, 'tier')
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_device(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_device(),
+            self.cli_data.get_test_show_device(),
+            cli.ShowDevice)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_channel(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_channel(),
+            self.cli_data.get_test_show_channel(),
+            cli.ShowChannel)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_channel_with_r_model(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_channel_r_model(),
+            self.cli_data.get_test_show_channel_r_model(),
+            cli.ShowChannel)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_map(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_map(),
+            self.cli_data.get_test_show_map(),
+            cli.ShowMap)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_license(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_license(),
+            self.cli_data.get_test_show_license(),
+            cli.ShowLicense)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_replica_detail(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_replica_detail(),
+            self.cli_data.get_test_show_replica_detail(),
+            cli.ShowReplica, '-l')
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_wwn(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_wwn(),
+            self.cli_data.get_test_show_wwn(),
+            cli.ShowWWN)
+
+    @mock.patch.object(cli.LOG, 'debug', mock.Mock())
+    def test_show_iqn(self):
+        self._test_show_command(
+            self.cli_data.get_fake_show_iqn(),
+            self.cli_data.get_test_show_iqn(),
+            cli.ShowIQN)
diff --git a/cinder/tests/unit/test_infortrend_common.py b/cinder/tests/unit/test_infortrend_common.py
new file mode 100644 (file)
index 0000000..4ec5e1f
--- /dev/null
@@ -0,0 +1,1988 @@
+# Copyright (c) 2015 Infortrend Technology, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder.tests.unit import test_infortrend_cli
+from cinder.tests.unit import utils
+from cinder.volume import configuration
+from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli
+
+SUCCEED = (0, '')
+FAKE_ERROR_RETURN = (-1, '')
+
+
+class InfortrendTestCass(test.TestCase):
+
+    def __init__(self, *args, **kwargs):
+        super(InfortrendTestCass, self).__init__(*args, **kwargs)
+
+    def setUp(self):
+        super(InfortrendTestCass, self).setUp()
+        self.cli_data = test_infortrend_cli.InfortrendCLITestData()
+
+        self.configuration = configuration.Configuration(None)
+        self.configuration.append_config_values = mock.Mock(return_value=0)
+        self.configuration.safe_get = self._fake_safe_get
+
+    def _fake_safe_get(self, key):
+        return getattr(self.configuration, key)
+
+    def _driver_setup(self, mock_commands, configuration=None):
+        if configuration is None:
+            configuration = self.configuration
+        self.driver = self._get_driver(configuration)
+
+        mock_commands_execute = self._mock_command_execute(mock_commands)
+        mock_cli = mock.Mock(side_effect=mock_commands_execute)
+
+        self.driver._execute_command = mock_cli
+
+    def _get_driver(self, conf):
+        raise NotImplementedError
+
+    def _mock_command_execute(self, mock_commands):
+        def fake_execute_command(cli_type, *args, **kwargs):
+            if cli_type in mock_commands.keys():
+                if isinstance(mock_commands[cli_type], list):
+                    ret = mock_commands[cli_type][0]
+                    del mock_commands[cli_type][0]
+                    return ret
+                elif isinstance(mock_commands[cli_type], tuple):
+                    return mock_commands[cli_type]
+                else:
+                    return mock_commands[cli_type](*args, **kwargs)
+            return FAKE_ERROR_RETURN
+        return fake_execute_command
+
+    def _mock_show_lv_for_migrate(self, *args, **kwargs):
+        if 'tier' in args:
+            return self.cli_data.get_test_show_lv_tier_for_migration()
+        return self.cli_data.get_test_show_lv()
+
+    def _mock_show_lv(self, *args, **kwargs):
+        if 'tier' in args:
+            return self.cli_data.get_test_show_lv_tier()
+        return self.cli_data.get_test_show_lv()
+
+    def _assert_cli_has_calls(self, expect_cli_cmd):
+        self.driver._execute_command.assert_has_calls(expect_cli_cmd)
+
+
+class InfortrendFCCommonTestCase(InfortrendTestCass):
+
+    def __init__(self, *args, **kwargs):
+        super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs)
+
+    def setUp(self):
+        super(InfortrendFCCommonTestCase, self).setUp()
+
+        self.configuration.volume_backend_name = 'infortrend_backend_1'
+        self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
+        self.configuration.san_password = '111111'
+        self.configuration.infortrend_provisioning = 'full'
+        self.configuration.infortrend_tiering = '0'
+        self.configuration.infortrend_pools_name = 'LV-1, LV-2'
+        self.configuration.infortrend_slots_a_channels_id = '0,5'
+        self.configuration.infortrend_slots_b_channels_id = '0,5'
+        self.configuration.infortrend_cli_timeout = 30
+
+    def _get_driver(self, conf):
+        return common_cli.InfortrendCommon('FC', configuration=conf)
+
+    def test_normal_channel(self):
+
+        test_map_dict = {
+            'slot_a': {'0': [], '5': []},
+            'slot_b': {},
+        }
+        test_target_dict = {
+            'slot_a': {'0': '112', '5': '112'},
+            'slot_b': {},
+        }
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._init_map_info(True)
+
+        self.assertDictMatch(self.driver.map_dict, test_map_dict)
+        self.assertDictMatch(self.driver.target_dict, test_target_dict)
+
+    def test_normal_channel_with_r_model(self):
+
+        test_map_dict = {
+            'slot_a': {'0': [], '5': []},
+            'slot_b': {'0': [], '5': []},
+        }
+        test_target_dict = {
+            'slot_a': {'0': '112', '5': '112'},
+            'slot_b': {'0': '113', '5': '113'},
+        }
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._init_map_info(True)
+
+        self.assertDictMatch(self.driver.map_dict, test_map_dict)
+        self.assertDictMatch(self.driver.target_dict, test_target_dict)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_fc
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'CreateMap': SUCCEED,
+            'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
+        }
+        self._driver_setup(mock_commands)
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        self.assertDictMatch(properties, self.cli_data.test_fc_properties)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection_specific_channel(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_fc
+        configuration = copy.copy(self.configuration)
+        configuration.infortrend_slots_a_channels_id = '5'
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'CreateMap': SUCCEED,
+            'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
+        }
+        self._driver_setup(mock_commands, configuration)
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        self.assertDictMatch(
+            properties, self.cli_data.test_fc_properties_with_specific_channel)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection_with_diff_target_id(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_fc
+        test_initiator_wwpns = test_connector['wwpns']
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        configuration = copy.copy(self.configuration)
+        configuration.infortrend_slots_a_channels_id = '5'
+
+        mock_commands = {
+            'ShowChannel':
+                self.cli_data.get_test_show_channel_with_diff_target_id(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'CreateMap': SUCCEED,
+            'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
+        }
+        self._driver_setup(mock_commands, configuration)
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        expect_cli_cmd = [
+            mock.call('ShowChannel'),
+            mock.call('ShowMap'),
+            mock.call('ShowWWN'),
+            mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
+                      'wwn=%s' % test_initiator_wwpns[0]),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+        self.assertDictMatch(
+            properties, self.cli_data.test_fc_properties_with_specific_channel)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection_multipath_with_r_model(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = copy.deepcopy(self.cli_data.test_connector_fc)
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'CreateMap': SUCCEED,
+            'ShowWWN': self.cli_data.get_test_show_wwn(),
+        }
+        self._driver_setup(mock_commands)
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        self.assertDictMatch(
+            properties, self.cli_data.test_fc_properties_multipath_r_model)
+
+    def test_initialize_connection_with_get_wwn_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_fc
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'CreateMap': SUCCEED,
+            'ShowWWN': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.initialize_connection,
+            test_volume,
+            test_connector)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection_with_zoning(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_fc
+        test_initiator_wwpns = test_connector['wwpns']
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
+        test_lookup_map = self.cli_data.fake_lookup_map
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'CreateMap': SUCCEED,
+            'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
+        }
+        self._driver_setup(mock_commands)
+        self.driver.fc_lookup_service = mock.Mock()
+        get_device_mapping_from_network = (
+            self.driver.fc_lookup_service.get_device_mapping_from_network
+        )
+        get_device_mapping_from_network.return_value = test_lookup_map
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        get_device_mapping_from_network.assert_has_calls(
+            [mock.call(test_connector['wwpns'], test_all_target_wwpns)])
+
+        expect_cli_cmd = [
+            mock.call('ShowChannel'),
+            mock.call('ShowMap'),
+            mock.call('ShowWWN'),
+            mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
+                      'wwn=%s' % test_initiator_wwpns[0]),
+            mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
+                      'wwn=%s' % test_initiator_wwpns[0]),
+            mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
+                      'wwn=%s' % test_initiator_wwpns[1]),
+            mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
+                      'wwn=%s' % test_initiator_wwpns[1]),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+        self.assertDictMatch(
+            properties, self.cli_data.test_fc_properties_zoning)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection_with_zoning_r_model(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_fc
+        test_initiator_wwpns = test_connector['wwpns']
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
+        test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
+        test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
+        test_lookup_map = self.cli_data.fake_lookup_map_r_model
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'CreateMap': SUCCEED,
+            'ShowWWN': self.cli_data.get_test_show_wwn(),
+        }
+        self._driver_setup(mock_commands)
+        self.driver.fc_lookup_service = mock.Mock()
+        get_device_mapping_from_network = (
+            self.driver.fc_lookup_service.get_device_mapping_from_network
+        )
+        get_device_mapping_from_network.return_value = test_lookup_map
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        get_device_mapping_from_network.assert_has_calls(
+            [mock.call(test_connector['wwpns'], test_all_target_wwpns)])
+
+        expect_cli_cmd = [
+            mock.call('ShowChannel'),
+            mock.call('ShowMap'),
+            mock.call('ShowWWN'),
+            mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
+                      'wwn=%s' % test_initiator_wwpns[0]),
+            mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
+                      'wwn=%s' % test_initiator_wwpns[0]),
+            mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
+                      'wwn=%s' % test_initiator_wwpns[1]),
+            mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
+                      'wwn=%s' % test_initiator_wwpns[1]),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+        self.assertDictMatch(
+            properties, self.cli_data.test_fc_properties_zoning_r_model)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_terminate_connection(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_connector = self.cli_data.test_connector_fc
+
+        mock_commands = {
+            'DeleteMap': SUCCEED,
+            'ShowMap': self.cli_data.get_test_show_map(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.terminate_connection(test_volume, test_connector)
+
+        expect_cli_cmd = [
+            mock.call('DeleteMap', 'part', test_partition_id, '-y'),
+            mock.call('ShowMap'),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_terminate_connection_with_zoning(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_connector = self.cli_data.test_connector_fc
+        test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
+        test_lookup_map = self.cli_data.fake_lookup_map
+
+        mock_commands = {
+            'DeleteMap': SUCCEED,
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
+        }
+        self._driver_setup(mock_commands)
+        self.driver.map_dict = {
+            'slot_a': {'0': [], '5': []},
+            'slot_b': {},
+        }
+        self.driver.fc_lookup_service = mock.Mock()
+        get_device_mapping_from_network = (
+            self.driver.fc_lookup_service.get_device_mapping_from_network
+        )
+        get_device_mapping_from_network.return_value = test_lookup_map
+
+        conn_info = self.driver.terminate_connection(
+            test_volume, test_connector)
+
+        get_device_mapping_from_network.assert_has_calls(
+            [mock.call(test_connector['wwpns'], test_all_target_wwpns)])
+        expect_cli_cmd = [
+            mock.call('DeleteMap', 'part', test_partition_id, '-y'),
+            mock.call('ShowMap'),
+            mock.call('ShowWWN'),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+        self.assertDictMatch(
+            conn_info, self.cli_data.test_fc_terminate_conn_info)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_terminate_connection_with_zoning_and_lun_map_exist(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_connector = self.cli_data.test_connector_fc
+
+        mock_commands = {
+            'DeleteMap': SUCCEED,
+            'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(),
+        }
+        self._driver_setup(mock_commands)
+        self.driver.map_dict = {
+            'slot_a': {'0': [], '5': []},
+            'slot_b': {},
+        }
+        self.driver.target_dict = {
+            'slot_a': {'0': '112', '5': '112'},
+            'slot_b': {},
+        }
+        self.driver.fc_lookup_service = mock.Mock()
+
+        conn_info = self.driver.terminate_connection(
+            test_volume, test_connector)
+
+        expect_cli_cmd = [
+            mock.call('DeleteMap', 'part', test_partition_id, '-y'),
+            mock.call('ShowMap'),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+        self.assertEqual(None, conn_info)
+
+
+class InfortrendiSCSICommonTestCase(InfortrendTestCass):
+
+    def __init__(self, *args, **kwargs):
+        super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs)
+
+    def setUp(self):
+        super(InfortrendiSCSICommonTestCase, self).setUp()
+
+        self.configuration.volume_backend_name = 'infortrend_backend_1'
+        self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
+        self.configuration.san_password = '111111'
+        self.configuration.infortrend_provisioning = 'full'
+        self.configuration.infortrend_tiering = '0'
+        self.configuration.infortrend_pools_name = 'LV-1, LV-2'
+        self.configuration.infortrend_slots_a_channels_id = '1,2,4'
+        self.configuration.infortrend_slots_b_channels_id = '1,2,4'
+
+    def _get_driver(self, conf):
+        return common_cli.InfortrendCommon('iSCSI', configuration=conf)
+
+    @mock.patch.object(common_cli.LOG, 'warning')
+    def test_create_map_warning_return_code(self, log_warning):
+
+        FAKE_RETURN_CODE = (20, '')
+        mock_commands = {
+            'CreateMap': FAKE_RETURN_CODE,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._execute('CreateMap')
+        self.assertEqual(1, log_warning.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'warning')
+    def test_delete_map_warning_return_code(self, log_warning):
+
+        FAKE_RETURN_CODE = (11, '')
+        mock_commands = {
+            'DeleteMap': FAKE_RETURN_CODE,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._execute('DeleteMap')
+        self.assertEqual(1, log_warning.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'warning')
+    def test_create_iqn_warning_return_code(self, log_warning):
+
+        FAKE_RETURN_CODE = (20, '')
+        mock_commands = {
+            'CreateIQN': FAKE_RETURN_CODE,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._execute('CreateIQN')
+        self.assertEqual(1, log_warning.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'warning')
+    def test_delete_iqn_warning_return_code_has_map(self, log_warning):
+
+        FAKE_RETURN_CODE = (20, '')
+        mock_commands = {
+            'DeleteIQN': FAKE_RETURN_CODE,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._execute('DeleteIQN')
+        self.assertEqual(1, log_warning.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'warning')
+    def test_delete_iqn_warning_return_code_no_such_name(self, log_warning):
+
+        FAKE_RETURN_CODE = (11, '')
+        mock_commands = {
+            'DeleteIQN': FAKE_RETURN_CODE,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._execute('DeleteIQN')
+        self.assertEqual(1, log_warning.call_count)
+
+    def test_normal_channel(self):
+
+        test_map_dict = {
+            'slot_a': {'1': [], '2': [], '4': []},
+            'slot_b': {},
+        }
+        test_target_dict = {
+            'slot_a': {'1': '0', '2': '0', '4': '0'},
+            'slot_b': {},
+        }
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._init_map_info()
+
+        self.assertDictMatch(self.driver.map_dict, test_map_dict)
+        self.assertDictMatch(self.driver.target_dict, test_target_dict)
+
+    def test_normal_channel_with_multipath(self):
+
+        test_map_dict = {
+            'slot_a': {'1': [], '2': [], '4': []},
+            'slot_b': {'1': [], '2': [], '4': []},
+        }
+        test_target_dict = {
+            'slot_a': {'1': '0', '2': '0', '4': '0'},
+            'slot_b': {'1': '1', '2': '1', '4': '1'},
+        }
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver._init_map_info(multipath=True)
+
+        self.assertDictMatch(self.driver.map_dict, test_map_dict)
+        self.assertDictMatch(self.driver.target_dict, test_target_dict)
+
+    def test_specific_channel(self):
+
+        configuration = copy.copy(self.configuration)
+        configuration.infortrend_slots_a_channels_id = '2, 4'
+
+        test_map_dict = {
+            'slot_a': {'2': [], '4': []},
+            'slot_b': {},
+        }
+        test_target_dict = {
+            'slot_a': {'2': '0', '4': '0'},
+            'slot_b': {},
+        }
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+        }
+        self._driver_setup(mock_commands, configuration)
+
+        self.driver._init_map_info()
+
+        self.assertDictMatch(self.driver.map_dict, test_map_dict)
+        self.assertDictMatch(self.driver.target_dict, test_target_dict)
+
+    def test_update_mcs_dict(self):
+
+        configuration = copy.copy(self.configuration)
+        configuration.use_multipath_for_image_xfer = True
+
+        test_mcs_dict = {
+            'slot_a': {'1': ['1', '2'], '2': ['4']},
+            'slot_b': {},
+        }
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
+        }
+        self._driver_setup(mock_commands, configuration)
+
+        self.driver._init_map_info()
+
+        self.assertDictMatch(self.driver.mcs_dict, test_mcs_dict)
+
+    def test_mapping_info_with_mcs(self):
+
+        configuration = copy.copy(self.configuration)
+        configuration.use_multipath_for_image_xfer = True
+
+        fake_mcs_dict = {
+            'slot_a': {'0': ['1', '2'], '2': ['4']},
+            'slot_b': {},
+        }
+        lun_list = list(range(0, 127))
+        fake_map_dict = {
+            'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]},
+            'slot_b': {},
+        }
+
+        test_map_chl = {
+            'slot_a': ['1', '2'],
+        }
+        test_map_lun = ['2']
+        test_mcs_id = '0'
+        self.driver = self._get_driver(configuration)
+        self.driver.mcs_dict = fake_mcs_dict
+        self.driver.map_dict = fake_map_dict
+
+        map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
+
+        self.assertDictMatch(map_chl, test_map_chl)
+        self.assertEqual(test_map_lun, map_lun)
+        self.assertEqual(test_mcs_id, mcs_id)
+
+    def test_mapping_info_with_mcs_multi_group(self):
+
+        configuration = copy.copy(self.configuration)
+        configuration.use_multipath_for_image_xfer = True
+
+        fake_mcs_dict = {
+            'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']},
+            'slot_b': {},
+        }
+        lun_list = list(range(0, 127))
+        fake_map_dict = {
+            'slot_a': {
+                '1': lun_list[2:],
+                '2': lun_list[:],
+                '3': lun_list[:],
+                '4': lun_list[1:],
+                '5': lun_list[:],
+            },
+            'slot_b': {},
+        }
+
+        test_map_chl = {
+            'slot_a': ['3', '4'],
+        }
+        test_map_lun = ['1']
+        test_mcs_id = '1'
+        self.driver = self._get_driver(configuration)
+        self.driver.mcs_dict = fake_mcs_dict
+        self.driver.map_dict = fake_map_dict
+
+        map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
+
+        self.assertDictMatch(map_chl, test_map_chl)
+        self.assertEqual(test_map_lun, map_lun)
+        self.assertEqual(test_mcs_id, mcs_id)
+
+    def test_specific_channel_with_multipath(self):
+
+        configuration = copy.copy(self.configuration)
+        configuration.infortrend_slots_a_channels_id = '1,2'
+
+        test_map_dict = {
+            'slot_a': {'1': [], '2': []},
+            'slot_b': {},
+        }
+        test_target_dict = {
+            'slot_a': {'1': '0', '2': '0'},
+            'slot_b': {},
+        }
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+        }
+        self._driver_setup(mock_commands, configuration)
+
+        self.driver._init_map_info(multipath=True)
+
+        self.assertDictMatch(self.driver.map_dict, test_map_dict)
+        self.assertDictMatch(self.driver.target_dict, test_target_dict)
+
+    def test_specific_channel_with_multipath_r_model(self):
+
+        configuration = copy.copy(self.configuration)
+        configuration.infortrend_slots_a_channels_id = '1,2'
+        configuration.infortrend_slots_b_channels_id = '1'
+
+        test_map_dict = {
+            'slot_a': {'1': [], '2': []},
+            'slot_b': {'1': []},
+        }
+        test_target_dict = {
+            'slot_a': {'1': '0', '2': '0'},
+            'slot_b': {'1': '1'},
+        }
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
+        }
+        self._driver_setup(mock_commands, configuration)
+
+        self.driver._init_map_info(multipath=True)
+
+        self.assertDictMatch(self.driver.map_dict, test_map_dict)
+        self.assertDictMatch(self.driver.target_dict, test_target_dict)
+
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_create_volume(self, log_info):
+
+        test_volume = self.cli_data.test_volume
+        test_model_update = {
+            'provider_location': 'system_id^%s@partition_id^%s' % (
+                int(self.cli_data.fake_system_id[0], 16),
+                self.cli_data.fake_partition_id[0]),
+        }
+
+        mock_commands = {
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+            'ShowDevice': self.cli_data.get_test_show_device(),
+            'ShowLV': self._mock_show_lv,
+        }
+        self._driver_setup(mock_commands)
+
+        model_update = self.driver.create_volume(test_volume)
+
+        self.assertDictMatch(model_update, test_model_update)
+        self.assertEqual(1, log_info.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_create_volume_with_create_fail(self):
+        test_volume = self.cli_data.test_volume
+
+        mock_commands = {
+            'CreatePartition': FAKE_ERROR_RETURN,
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+            'ShowDevice': self.cli_data.get_test_show_device(),
+            'ShowLV': self._mock_show_lv,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.create_volume,
+            test_volume)
+
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_delete_volume(self, log_info):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_snapshot_id = self.cli_data.fake_snapshot_id
+        test_pair_id = self.cli_data.fake_pair_id
+
+        mock_commands = {
+            'ShowPartition':
+                self.cli_data.get_test_show_partition_detail_for_map(
+                    test_partition_id),
+            'ShowReplica': self.cli_data.get_test_show_replica_detail(),
+            'DeleteReplica': SUCCEED,
+            'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
+            'DeleteSnapshot': SUCCEED,
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'DeleteMap': SUCCEED,
+            'DeletePartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.delete_volume(test_volume)
+
+        expect_cli_cmd = [
+            mock.call('ShowPartition', '-l'),
+            mock.call('ShowReplica', '-l'),
+            mock.call('DeleteReplica', test_pair_id[0], '-y'),
+            mock.call('ShowSnapshot', 'part=%s' % test_partition_id),
+            mock.call('DeleteSnapshot', test_snapshot_id[0], '-y'),
+            mock.call('DeleteSnapshot', test_snapshot_id[1], '-y'),
+            mock.call('ShowMap', 'part=%s' % test_partition_id),
+            mock.call('DeleteMap', 'part', test_partition_id, '-y'),
+            mock.call('DeletePartition', test_partition_id, '-y'),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertEqual(1, log_info.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
+    def test_delete_volume_with_sync_pair(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+
+        mock_commands = {
+            'ShowPartition':
+                self.cli_data.get_test_show_partition_detail_for_map(
+                    test_partition_id),
+            'ShowReplica':
+                self.cli_data.get_test_show_replica_detail_for_sync_pair(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.VolumeDriverException,
+            self.driver.delete_volume,
+            test_volume)
+
+    def test_delete_volume_with_delete_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+
+        mock_commands = {
+            'ShowPartition':
+                self.cli_data.get_test_show_partition_detail_for_map(
+                    test_partition_id),
+            'ShowReplica': self.cli_data.get_test_show_replica_detail(),
+            'DeleteReplica': SUCCEED,
+            'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
+            'DeleteSnapshot': SUCCEED,
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'DeleteMap': SUCCEED,
+            'DeletePartition': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.delete_volume,
+            test_volume)
+
+    @mock.patch.object(common_cli.LOG, 'warning')
+    def test_delete_volume_with_partiton_not_found(self, log_warning):
+
+        test_volume = self.cli_data.test_volume
+
+        mock_commands = {
+            'ShowPartition': self.cli_data.get_test_show_empty_list(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.delete_volume(test_volume)
+
+        self.assertEqual(1, log_warning.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_delete_volume_without_provider(self, log_info):
+
+        test_system_id = self.cli_data.fake_system_id[0]
+        test_volume = copy.deepcopy(self.cli_data.test_volume)
+        test_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
+            int(test_system_id, 16), 'None')
+        test_partition_id = self.cli_data.fake_partition_id[0]
+
+        mock_commands = {
+            'ShowPartition':
+                self.cli_data.get_test_show_partition_detail_for_map(
+                    test_partition_id),
+            'ShowReplica': self.cli_data.get_test_show_replica_detail(),
+            'DeleteReplica': SUCCEED,
+            'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
+            'DeleteSnapshot': SUCCEED,
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'DeleteMap': SUCCEED,
+            'DeletePartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.delete_volume(test_volume)
+
+        self.assertEqual(1, log_info.call_count)
+
+    @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
+                new=utils.ZeroIntervalLoopingCall)
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_create_cloned_volume(self, log_info):
+
+        fake_partition_id = self.cli_data.fake_partition_id[0]
+        test_dst_volume = self.cli_data.test_dst_volume
+        test_dst_volume_id = test_dst_volume['id'].replace('-', '')
+        test_src_volume = self.cli_data.test_volume
+        test_dst_part_id = self.cli_data.fake_partition_id[1]
+        test_model_update = {
+            'provider_location': 'system_id^%s@partition_id^%s' % (
+                int(self.cli_data.fake_system_id[0], 16),
+                self.cli_data.fake_partition_id[1]),
+        }
+
+        mock_commands = {
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+            'ShowDevice': self.cli_data.get_test_show_device(),
+            'CreateReplica': SUCCEED,
+            'ShowLV': self._mock_show_lv,
+            'ShowReplica':
+                self.cli_data.get_test_show_replica_detail_for_migrate(
+                    fake_partition_id, test_dst_part_id, test_dst_volume_id),
+            'DeleteReplica': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        model_update = self.driver.create_cloned_volume(
+            test_dst_volume, test_src_volume)
+
+        self.assertDictMatch(model_update, test_model_update)
+        self.assertEqual(1, log_info.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_create_cloned_volume_with_create_replica_fail(self):
+
+        test_dst_volume = self.cli_data.test_dst_volume
+        test_src_volume = self.cli_data.test_volume
+
+        mock_commands = {
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+            'ShowDevice': self.cli_data.get_test_show_device(),
+            'CreateReplica': FAKE_ERROR_RETURN,
+            'ShowLV': self._mock_show_lv,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.create_cloned_volume,
+            test_dst_volume,
+            test_src_volume)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_create_export(self):
+
+        test_volume = self.cli_data.test_volume
+        test_model_update = {
+            'provider_location': test_volume['provider_location'],
+        }
+        self.driver = self._get_driver(self.configuration)
+
+        model_update = self.driver.create_export(None, test_volume)
+
+        self.assertDictMatch(model_update, test_model_update)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_get_volume_stats(self):
+
+        test_volume_states = self.cli_data.test_volume_states
+
+        mock_commands = {
+            'ShowLicense': self.cli_data.get_test_show_license(),
+            'ShowLV': self.cli_data.get_test_show_lv(),
+            'ShowPartition': self.cli_data.get_test_show_partition_detail(),
+        }
+        self._driver_setup(mock_commands)
+        self.driver.VERSION = '99.99'
+
+        volume_states = self.driver.get_volume_stats(True)
+
+        self.assertDictMatch(volume_states, test_volume_states)
+
+    def test_get_volume_stats_fail(self):
+
+        mock_commands = {
+            'ShowLicense': self.cli_data.get_test_show_license(),
+            'ShowLV': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.get_volume_stats)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_create_snapshot(self):
+
+        fake_partition_id = self.cli_data.fake_partition_id[0]
+        fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
+
+        mock_commands = {
+            'CreateSnapshot': SUCCEED,
+            'ShowSnapshot': self.cli_data.get_test_show_snapshot(
+                partition_id=fake_partition_id,
+                snapshot_id=fake_snapshot_id),
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+        }
+        self._driver_setup(mock_commands)
+
+        model_update = self.driver.create_snapshot(self.cli_data.test_snapshot)
+
+        self.assertEqual(fake_snapshot_id, model_update['provider_location'])
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_create_snapshot_without_partition_id(self):
+
+        fake_partition_id = self.cli_data.fake_partition_id[0]
+        fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
+        test_snapshot = self.cli_data.test_snapshot
+
+        mock_commands = {
+            'CreateSnapshot': SUCCEED,
+            'ShowSnapshot': self.cli_data.get_test_show_snapshot(
+                partition_id=fake_partition_id,
+                snapshot_id=fake_snapshot_id),
+            'ShowPartition': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.create_snapshot,
+            test_snapshot)
+
+    def test_create_snapshot_with_create_fail(self):
+
+        fake_partition_id = self.cli_data.fake_partition_id[0]
+        fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
+        test_snapshot = self.cli_data.test_snapshot
+
+        mock_commands = {
+            'CreateSnapshot': FAKE_ERROR_RETURN,
+            'ShowSnapshot': self.cli_data.get_test_show_snapshot(
+                partition_id=fake_partition_id,
+                snapshot_id=fake_snapshot_id),
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.create_snapshot,
+            test_snapshot)
+
+    def test_create_snapshot_with_show_fail(self):
+
+        test_snapshot = self.cli_data.test_snapshot
+
+        mock_commands = {
+            'CreateSnapshot': SUCCEED,
+            'ShowSnapshot': FAKE_ERROR_RETURN,
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.create_snapshot,
+            test_snapshot)
+
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_delete_snapshot(self, log_info):
+
+        test_snapshot = self.cli_data.test_snapshot
+
+        mock_commands = {
+            'ShowReplica': self.cli_data.get_test_show_replica_detail(),
+            'DeleteSnapshot': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.delete_snapshot(test_snapshot)
+
+        self.assertEqual(1, log_info.call_count)
+
+    def test_delete_snapshot_without_provider_location(self):
+
+        test_snapshot = self.cli_data.test_snapshot
+
+        self.driver = self._get_driver(self.configuration)
+        self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
+
+        self.assertRaises(
+            exception.VolumeBackendAPIException,
+            self.driver.delete_snapshot,
+            test_snapshot)
+
+    def test_delete_snapshot_with_fail(self):
+
+        test_snapshot = self.cli_data.test_snapshot
+
+        mock_commands = {
+            'ShowReplica': self.cli_data.get_test_show_replica_detail(),
+            'DeleteSnapshot': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.delete_snapshot,
+            test_snapshot)
+
+    @mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
+    def test_delete_snapshot_with_sync_pair(self):
+
+        test_snapshot = self.cli_data.test_snapshot
+
+        mock_commands = {
+            'ShowReplica':
+                self.cli_data.get_test_show_replica_detail_for_si_sync_pair(),
+            'DeleteSnapshot': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.VolumeDriverException,
+            self.driver.delete_snapshot,
+            test_snapshot)
+
+    @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
+                new=utils.ZeroIntervalLoopingCall)
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_create_volume_from_snapshot(self, log_info):
+
+        test_snapshot = self.cli_data.test_snapshot
+        test_snapshot_id = self.cli_data.fake_snapshot_id[0]
+        test_dst_volume = self.cli_data.test_dst_volume
+        test_dst_volume_id = test_dst_volume['id'].replace('-', '')
+        test_dst_part_id = self.cli_data.fake_partition_id[1]
+        test_model_update = {
+            'provider_location': 'system_id^%s@partition_id^%s' % (
+                int(self.cli_data.fake_system_id[0], 16),
+                self.cli_data.fake_partition_id[1]),
+        }
+        mock_commands = {
+            'ShowSnapshot':
+                self.cli_data.get_test_show_snapshot_detail_filled_block(),
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+            'ShowDevice': self.cli_data.get_test_show_device(),
+            'CreateReplica': SUCCEED,
+            'ShowLV': self._mock_show_lv,
+            'ShowReplica':
+                self.cli_data.get_test_show_replica_detail_for_migrate(
+                    test_snapshot_id, test_dst_part_id, test_dst_volume_id),
+            'DeleteReplica': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        model_update = self.driver.create_volume_from_snapshot(
+            test_dst_volume, test_snapshot)
+
+        self.assertDictMatch(model_update, test_model_update)
+        self.assertEqual(1, log_info.call_count)
+
+    @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
+                new=utils.ZeroIntervalLoopingCall)
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_create_volume_from_snapshot_without_filled_block(self, log_info):
+
+        test_snapshot = self.cli_data.test_snapshot
+        test_snapshot_id = self.cli_data.fake_snapshot_id[0]
+        test_dst_volume = self.cli_data.test_dst_volume
+        test_dst_volume_id = test_dst_volume['id'].replace('-', '')
+        test_dst_part_id = self.cli_data.fake_partition_id[1]
+        test_src_part_id = self.cli_data.fake_partition_id[0]
+        test_model_update = {
+            'provider_location': 'system_id^%s@partition_id^%s' % (
+                int(self.cli_data.fake_system_id[0], 16),
+                self.cli_data.fake_partition_id[1]),
+        }
+        mock_commands = {
+            'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail(),
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+            'ShowDevice': self.cli_data.get_test_show_device(),
+            'CreateReplica': SUCCEED,
+            'ShowLV': self._mock_show_lv,
+            'ShowReplica': [
+                self.cli_data.get_test_show_replica_detail_for_migrate(
+                    test_src_part_id, test_dst_part_id, test_dst_volume_id),
+                self.cli_data.get_test_show_replica_detail_for_migrate(
+                    test_snapshot_id, test_dst_part_id, test_dst_volume_id),
+            ],
+            'DeleteReplica': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        model_update = self.driver.create_volume_from_snapshot(
+            test_dst_volume, test_snapshot)
+
+        self.assertDictMatch(model_update, test_model_update)
+        self.assertEqual(1, log_info.call_count)
+
+    def test_create_volume_from_snapshot_without_provider_location(
+            self):
+
+        test_snapshot = self.cli_data.test_snapshot
+        test_dst_volume = self.cli_data.test_dst_volume
+
+        self.driver = self._get_driver(self.configuration)
+        self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
+
+        self.assertRaises(
+            exception.VolumeBackendAPIException,
+            self.driver.create_volume_from_snapshot,
+            test_dst_volume,
+            test_snapshot)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
+        test_iscsi_properties = self.cli_data.test_iscsi_properties
+        test_target_protal = [test_iscsi_properties['data']['target_portal']]
+        test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
+
+        test_connector['multipath'] = False
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'ShowIQN': self.cli_data.get_test_show_iqn(),
+            'CreateMap': SUCCEED,
+            'ShowNet': self.cli_data.get_test_show_net(),
+            'ExecuteCommand': self.cli_data.get_fake_discovery(
+                test_target_iqn, test_target_protal),
+        }
+        self._driver_setup(mock_commands)
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        self.assertDictMatch(properties, test_iscsi_properties)
+
+        expect_cli_cmd = [
+            mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
+                      'iqn=%s' % test_connector['initiator']),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection_with_iqn_not_exist(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1])
+        test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
+        test_iscsi_properties = self.cli_data.test_iscsi_properties
+        test_target_protal = [test_iscsi_properties['data']['target_portal']]
+        test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
+
+        test_connector['multipath'] = False
+        test_connector['initiator'] = test_initiator
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'ShowIQN': self.cli_data.get_test_show_iqn(),
+            'CreateIQN': SUCCEED,
+            'CreateMap': SUCCEED,
+            'ShowNet': self.cli_data.get_test_show_net(),
+            'ExecuteCommand': self.cli_data.get_fake_discovery(
+                test_target_iqn, test_target_protal),
+        }
+        self._driver_setup(mock_commands)
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        self.assertDictMatch(properties, test_iscsi_properties)
+
+        expect_cli_cmd = [
+            mock.call('CreateIQN', test_initiator, test_initiator[-16:]),
+            mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
+                      'iqn=%s' % test_connector['initiator']),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection_with_empty_map(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
+        test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map
+        test_target_protal = [test_iscsi_properties['data']['target_portal']]
+        test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
+
+        test_connector['multipath'] = False
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+            'ShowMap': self.cli_data.get_test_show_empty_list(),
+            'ShowIQN': self.cli_data.get_test_show_iqn(),
+            'CreateMap': SUCCEED,
+            'ShowNet': self.cli_data.get_test_show_net(),
+            'ExecuteCommand': self.cli_data.get_fake_discovery(
+                test_target_iqn, test_target_protal),
+        }
+        self._driver_setup(mock_commands)
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        self.assertDictMatch(
+            properties, self.cli_data.test_iscsi_properties_empty_map)
+
+    def test_initialize_connection_with_create_map_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_iscsi
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'ShowIQN': self.cli_data.get_test_show_iqn(),
+            'CreateMap': FAKE_ERROR_RETURN,
+            'ShowNet': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.initialize_connection,
+            test_volume,
+            test_connector)
+
+    def test_initialize_connection_with_get_ip_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_iscsi
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'ShowIQN': self.cli_data.get_test_show_iqn(),
+            'CreateMap': SUCCEED,
+            'ShowNet': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.initialize_connection,
+            test_volume,
+            test_connector)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_initialize_connection_with_mcs(self):
+
+        configuration = copy.copy(self.configuration)
+        configuration.use_multipath_for_image_xfer = True
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
+        test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs
+        test_target_protal = [test_iscsi_properties['data']['target_portal']]
+        test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
+
+        test_connector['multipath'] = False
+
+        mock_commands = {
+            'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
+            'ShowMap': self.cli_data.get_test_show_map(),
+            'ShowIQN': self.cli_data.get_test_show_iqn(),
+            'CreateMap': SUCCEED,
+            'ShowNet': self.cli_data.get_test_show_net(),
+            'ExecuteCommand': self.cli_data.get_fake_discovery(
+                test_target_iqn, test_target_protal),
+        }
+        self._driver_setup(mock_commands, configuration)
+
+        properties = self.driver.initialize_connection(
+            test_volume, test_connector)
+
+        self.assertDictMatch(properties, test_iscsi_properties)
+
+        expect_cli_cmd = [
+            mock.call('CreateMap', 'part', test_partition_id, '1', '0', '2',
+                      'iqn=%s' % test_connector['initiator']),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_extend_volume(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_new_size = 10
+        test_expand_size = test_new_size - test_volume['size']
+
+        mock_commands = {
+            'SetPartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.extend_volume(test_volume, test_new_size)
+
+        expect_cli_cmd = [
+            mock.call('SetPartition', 'expand', test_partition_id,
+                      'size=%sGB' % test_expand_size),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_extend_volume_mb(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_new_size = 5.5
+        test_expand_size = round((test_new_size - test_volume['size']) * 1024)
+
+        mock_commands = {
+            'SetPartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.extend_volume(test_volume, test_new_size)
+
+        expect_cli_cmd = [
+            mock.call('SetPartition', 'expand', test_partition_id,
+                      'size=%sMB' % test_expand_size),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+    def test_extend_volume_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_new_size = 10
+
+        mock_commands = {
+            'SetPartition': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.extend_volume,
+            test_volume,
+            test_new_size)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_terminate_connection(self):
+
+        test_volume = self.cli_data.test_volume
+        test_partition_id = self.cli_data.fake_partition_id[0]
+        test_connector = self.cli_data.test_connector_iscsi
+
+        mock_commands = {
+            'DeleteMap': SUCCEED,
+            'DeleteIQN': SUCCEED,
+            'ShowMap': self.cli_data.get_test_show_map(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.terminate_connection(test_volume, test_connector)
+
+        expect_cli_cmd = [
+            mock.call('DeleteMap', 'part', test_partition_id, '-y'),
+            mock.call('DeleteIQN', test_connector['initiator'][-16:]),
+            mock.call('ShowMap'),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+
+    def test_terminate_connection_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_connector = self.cli_data.test_connector_iscsi
+
+        mock_commands = {
+            'DeleteMap': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.terminate_connection,
+            test_volume,
+            test_connector)
+
+    @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
+                new=utils.ZeroIntervalLoopingCall)
+    def test_migrate_volume(self):
+
+        test_host = copy.deepcopy(self.cli_data.test_migrate_host)
+        fake_pool = copy.deepcopy(self.cli_data.fake_pool)
+        test_volume = self.cli_data.test_volume
+        test_volume_id = test_volume['id'].replace('-', '')
+        test_src_part_id = self.cli_data.fake_partition_id[0]
+        test_dst_part_id = self.cli_data.fake_partition_id[2]
+        test_pair_id = self.cli_data.fake_pair_id[0]
+        test_model_update = {
+            'provider_location': 'system_id^%s@partition_id^%s' % (
+                int(self.cli_data.fake_system_id[0], 16),
+                test_dst_part_id),
+        }
+
+        mock_commands = {
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(
+                test_volume_id, fake_pool['pool_id']),
+            'CreateReplica': SUCCEED,
+            'ShowLV': self._mock_show_lv_for_migrate,
+            'ShowReplica':
+                self.cli_data.get_test_show_replica_detail_for_migrate(
+                    test_src_part_id, test_dst_part_id, test_volume_id),
+            'DeleteReplica': SUCCEED,
+            'DeleteMap': SUCCEED,
+            'DeletePartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        rc, model_update = self.driver.migrate_volume(test_volume, test_host)
+
+        expect_cli_cmd = [
+            mock.call('CreatePartition',
+                      fake_pool['pool_id'],
+                      test_volume['id'].replace('-', ''),
+                      'size=%s' % (test_volume['size'] * 1024),
+                      ''),
+            mock.call('ShowPartition'),
+            mock.call('CreateReplica',
+                      'Cinder-Migrate',
+                      'part', test_src_part_id,
+                      'part', test_dst_part_id,
+                      'type=mirror'),
+            mock.call('ShowReplica', '-l'),
+            mock.call('DeleteReplica', test_pair_id, '-y'),
+            mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
+            mock.call('DeletePartition', test_src_part_id, '-y'),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertTrue(rc)
+        self.assertDictMatch(model_update, test_model_update)
+
+    @mock.patch.object(common_cli.LOG, 'warning')
+    def test_migrate_volume_with_invalid_storage(self, log_warning):
+
+        fake_host = self.cli_data.fake_host
+        test_volume = self.cli_data.test_volume
+
+        mock_commands = {
+            'ShowLV': self._mock_show_lv_for_migrate,
+        }
+        self._driver_setup(mock_commands)
+
+        rc, model_update = self.driver.migrate_volume(test_volume, fake_host)
+
+        self.assertFalse(rc)
+        self.assertTrue(model_update is None)
+        self.assertEqual(1, log_warning.call_count)
+
+    def test_migrate_volume_with_get_part_id_fail(self):
+
+        test_host = copy.deepcopy(self.cli_data.test_migrate_host)
+        test_volume = self.cli_data.test_volume
+
+        mock_commands = {
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(),
+            'DeleteMap': SUCCEED,
+            'CreateReplica': SUCCEED,
+            'CreateMap': SUCCEED,
+            'ShowLV': self._mock_show_lv_for_migrate,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.VolumeDriverException,
+            self.driver.migrate_volume,
+            test_volume,
+            test_host)
+
+    def test_migrate_volume_with_create_replica_fail(self):
+
+        test_host = copy.deepcopy(self.cli_data.test_migrate_host)
+        fake_pool = copy.deepcopy(self.cli_data.fake_pool)
+        test_volume = self.cli_data.test_volume
+
+        mock_commands = {
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(
+                test_volume['id'].replace('-', ''), fake_pool['pool_id']),
+            'DeleteMap': SUCCEED,
+            'CreateReplica': FAKE_ERROR_RETURN,
+            'CreateMap': SUCCEED,
+            'ShowLV': self._mock_show_lv_for_migrate,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.migrate_volume,
+            test_volume,
+            test_host)
+
+    @mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
+                new=utils.ZeroIntervalLoopingCall)
+    def test_migrate_volume_timeout(self):
+
+        test_host = copy.deepcopy(self.cli_data.test_migrate_host)
+        fake_pool = copy.deepcopy(self.cli_data.fake_pool)
+        test_volume = self.cli_data.test_volume
+        test_volume_id = test_volume['id'].replace('-', '')
+        test_src_part_id = self.cli_data.fake_partition_id[0]
+        test_dst_part_id = self.cli_data.fake_partition_id[2]
+
+        configuration = copy.copy(self.configuration)
+        configuration.infortrend_cli_timeout = 0
+
+        mock_commands = {
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(
+                test_volume_id, fake_pool['pool_id']),
+            'CreateReplica': SUCCEED,
+            'ShowLV': self._mock_show_lv_for_migrate,
+            'ShowReplica':
+                self.cli_data.get_test_show_replica_detail_for_migrate(
+                    test_src_part_id, test_dst_part_id, test_volume_id,
+                    'Copy'),
+        }
+        self._driver_setup(mock_commands, configuration)
+
+        self.assertRaises(
+            exception.VolumeDriverException,
+            self.driver.migrate_volume,
+            test_volume,
+            test_host)
+
+    def test_manage_existing_get_size(self):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_ref_volume
+        test_pool = self.cli_data.fake_lv_id[0]
+        test_partition_id = self.cli_data.fake_partition_id[2]
+        test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
+
+        mock_commands = {
+            'ShowPartition': self.cli_data.get_test_show_partition_detail(
+                'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
+            'ShowMap': SUCCEED,
+        }
+
+        self._driver_setup(mock_commands)
+
+        size = self.driver.manage_existing_get_size(
+            test_volume, test_ref_volume)
+
+        expect_cli_cmd = [
+            mock.call('ShowMap', 'part=%s' % test_partition_id),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertEqual(1, size)
+
+    def test_manage_existing_get_size_with_import(self):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_ref_volume_with_import
+        test_pool = self.cli_data.fake_lv_id[0]
+        test_partition_id = self.cli_data.fake_partition_id[2]
+
+        mock_commands = {
+            'ShowPartition': self.cli_data.get_test_show_partition_detail(
+                test_ref_volume['source-name'], test_pool),
+            'ShowMap': SUCCEED,
+        }
+
+        self._driver_setup(mock_commands)
+
+        size = self.driver.manage_existing_get_size(
+            test_volume, test_ref_volume)
+
+        expect_cli_cmd = [
+            mock.call('ShowMap', 'part=%s' % test_partition_id),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertEqual(1, size)
+
+    def test_manage_existing_get_size_in_use(self):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_ref_volume
+        test_pool = self.cli_data.fake_lv_id[0]
+        test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
+
+        mock_commands = {
+            'ShowPartition': self.cli_data.get_test_show_partition_detail(
+                'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
+            'ShowMap': self.cli_data.get_test_show_map(),
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.VolumeBackendAPIException,
+            self.driver.manage_existing_get_size,
+            test_volume,
+            test_ref_volume)
+
+    def test_manage_existing_get_size_no_source_id(self):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_dst_volume
+        self.driver = self._get_driver(self.configuration)
+
+        self.assertRaises(
+            exception.ManageExistingInvalidReference,
+            self.driver.manage_existing_get_size,
+            test_volume,
+            test_ref_volume)
+
+    def test_manage_existing_get_size_show_part_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_ref_volume
+
+        mock_commands = {
+            'ShowPartition': FAKE_ERROR_RETURN,
+            'ShowMap': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.manage_existing_get_size,
+            test_volume,
+            test_ref_volume)
+
+    def test_manage_existing_get_size_show_map_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_ref_volume
+        test_pool = self.cli_data.fake_lv_id[0]
+        test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
+
+        mock_commands = {
+            'ShowPartition': self.cli_data.get_test_show_partition_detail(
+                'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
+            'ShowMap': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.manage_existing_get_size,
+            test_volume,
+            test_ref_volume)
+
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_manage_existing(self, log_info):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_ref_volume
+        test_pool = self.cli_data.fake_lv_id[0]
+        test_partition_id = self.cli_data.fake_partition_id[2]
+        test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
+
+        mock_commands = {
+            'ShowPartition': self.cli_data.get_test_show_partition_detail(
+                'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
+            'SetPartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.manage_existing(test_volume, test_ref_volume)
+
+        expect_cli_cmd = [
+            mock.call('SetPartition', test_partition_id,
+                      'name=%s' % test_volume['id'].replace('-', '')),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertEqual(1, log_info.call_count)
+
+    def test_manage_existing_rename_fail(self):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_ref_volume
+        test_pool = self.cli_data.fake_lv_id[0]
+        test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
+
+        mock_commands = {
+            'ShowPartition': self.cli_data.get_test_show_partition_detail(
+                'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
+            'SetPartition': FAKE_ERROR_RETURN,
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.manage_existing,
+            test_volume,
+            test_ref_volume)
+
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_manage_existing_with_import(self, log_info):
+
+        test_volume = self.cli_data.test_volume
+        test_ref_volume = self.cli_data.test_ref_volume_with_import
+        test_pool = self.cli_data.fake_lv_id[0]
+        test_partition_id = self.cli_data.fake_partition_id[2]
+
+        mock_commands = {
+            'ShowPartition': self.cli_data.get_test_show_partition_detail(
+                test_ref_volume['source-name'], test_pool),
+            'SetPartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.manage_existing(test_volume, test_ref_volume)
+
+        expect_cli_cmd = [
+            mock.call('SetPartition', test_partition_id,
+                      'name=%s' % test_volume['id'].replace('-', '')),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertEqual(1, log_info.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_unmanage(self, log_info):
+
+        test_volume = self.cli_data.test_volume
+        test_volume_id = test_volume['id'].replace('-', '')
+        test_partition_id = self.cli_data.fake_partition_id[0]
+
+        mock_commands = {
+            'SetPartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        self.driver.unmanage(test_volume)
+
+        expect_cli_cmd = [
+            mock.call(
+                'SetPartition',
+                test_partition_id,
+                'name=cinder-unmanaged-%s' % test_volume_id[:-17]),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertEqual(1, log_info.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'info')
+    def test_retype_without_change(self, log_info):
+
+        test_volume = self.cli_data.test_volume
+        test_new_type = self.cli_data.test_new_type
+        test_diff = {'extra_specs': {}}
+        test_host = self.cli_data.test_migrate_host_2
+
+        self.driver = self._get_driver(self.configuration)
+
+        rc = self.driver.retype(
+            None, test_volume, test_new_type, test_diff, test_host)
+
+        self.assertTrue(rc)
+        self.assertEqual(1, log_info.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'warning')
+    def test_retype_with_change_provision(self, log_warning):
+
+        test_volume = self.cli_data.test_volume
+        test_new_type = self.cli_data.test_new_type
+        test_diff = self.cli_data.test_diff
+        test_host = self.cli_data.test_migrate_host_2
+
+        self.driver = self._get_driver(self.configuration)
+
+        rc = self.driver.retype(
+            None, test_volume, test_new_type, test_diff, test_host)
+
+        self.assertFalse(rc)
+        self.assertEqual(1, log_warning.call_count)
+
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_retype_with_migrate(self):
+
+        fake_pool = copy.deepcopy(self.cli_data.fake_pool)
+        test_host = copy.deepcopy(self.cli_data.test_migrate_host)
+        test_volume = self.cli_data.test_volume
+        test_volume_id = test_volume['id'].replace('-', '')
+        test_new_type = self.cli_data.test_new_type
+        test_diff = self.cli_data.test_diff
+        test_src_part_id = self.cli_data.fake_partition_id[0]
+        test_dst_part_id = self.cli_data.fake_partition_id[2]
+        test_pair_id = self.cli_data.fake_pair_id[0]
+        test_model_update = {
+            'provider_location': 'system_id^%s@partition_id^%s' % (
+                int(self.cli_data.fake_system_id[0], 16),
+                test_dst_part_id),
+        }
+
+        mock_commands = {
+            'ShowSnapshot': SUCCEED,
+            'CreatePartition': SUCCEED,
+            'ShowPartition': self.cli_data.get_test_show_partition(
+                test_volume_id, fake_pool['pool_id']),
+            'CreateReplica': SUCCEED,
+            'ShowLV': self._mock_show_lv_for_migrate,
+            'ShowReplica':
+                self.cli_data.get_test_show_replica_detail_for_migrate(
+                    test_src_part_id, test_dst_part_id, test_volume_id),
+            'DeleteReplica': SUCCEED,
+            'DeleteMap': SUCCEED,
+            'DeletePartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        rc, model_update = self.driver.retype(
+            None, test_volume, test_new_type, test_diff, test_host)
+
+        expect_cli_cmd = [
+            mock.call('ShowSnapshot', 'part=%s' % test_src_part_id),
+            mock.call(
+                'CreatePartition',
+                fake_pool['pool_id'],
+                test_volume['id'].replace('-', ''),
+                'size=%s' % (test_volume['size'] * 1024),
+                'init=disable min=%sMB' % (
+                    int(test_volume['size'] * 1024 * 0.2))
+            ),
+            mock.call('ShowPartition'),
+            mock.call(
+                'CreateReplica',
+                'Cinder-Migrate',
+                'part', test_src_part_id,
+                'part', test_dst_part_id,
+                'type=mirror'
+            ),
+            mock.call('ShowReplica', '-l'),
+            mock.call('DeleteReplica', test_pair_id, '-y'),
+            mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
+            mock.call('DeletePartition', test_src_part_id, '-y'),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertTrue(rc)
+        self.assertDictMatch(model_update, test_model_update)
+
+    @mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
+    @mock.patch.object(common_cli.LOG, 'info', mock.Mock())
+    def test_update_migrated_volume(self):
+        src_volume = self.cli_data.test_volume
+        dst_volume = copy.deepcopy(self.cli_data.test_dst_volume)
+        test_dst_part_id = self.cli_data.fake_partition_id[1]
+        dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
+            int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
+        test_model_update = {
+            'provider_location': dst_volume['provider_location'],
+        }
+
+        mock_commands = {
+            'SetPartition': SUCCEED,
+        }
+        self._driver_setup(mock_commands)
+
+        model_update = self.driver.update_migrated_volume(
+            None, src_volume, dst_volume)
+
+        expect_cli_cmd = [
+            mock.call('SetPartition', test_dst_part_id,
+                      'name=%s' % src_volume['id'].replace('-', '')),
+        ]
+        self._assert_cli_has_calls(expect_cli_cmd)
+        self.assertDictMatch(model_update, test_model_update)
+
+    @mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
+    def test_update_migrated_volume_rename_fail(self):
+        src_volume = self.cli_data.test_volume
+        dst_volume = self.cli_data.test_dst_volume
+        test_dst_part_id = self.cli_data.fake_partition_id[1]
+        dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
+            int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
+
+        mock_commands = {
+            'SetPartition': FAKE_ERROR_RETURN
+        }
+        self._driver_setup(mock_commands)
+
+        self.assertRaises(
+            exception.InfortrendCliException,
+            self.driver.update_migrated_volume,
+            None,
+            src_volume,
+            dst_volume)
diff --git a/cinder/volume/drivers/infortrend/__init__.py b/cinder/volume/drivers/infortrend/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/volume/drivers/infortrend/eonstor_ds_cli/__init__.py b/cinder/volume/drivers/infortrend/eonstor_ds_cli/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py b/cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py
new file mode 100644 (file)
index 0000000..d24bc6d
--- /dev/null
@@ -0,0 +1,735 @@
+# Copyright (c) 2015 Infortrend Technology, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Infortrend basic CLI factory.
+"""
+
+import abc
+
+from oslo_concurrency import processutils
+from oslo_log import log as logging
+import six
+
+from cinder.i18n import _LE
+from cinder import utils
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_RETRY_TIME = 5
+
+
+def retry_cli(func):
+    def inner(self, *args, **kwargs):
+        total_retry_time = self.cli_retry_time
+
+        if total_retry_time is None:
+            total_retry_time = DEFAULT_RETRY_TIME
+
+        retry_time = 0
+        while retry_time < total_retry_time:
+            rc, out = func(self, *args, **kwargs)
+            retry_time += 1
+
+            if rc == 0:
+                break
+
+            LOG.error(_LE(
+                'Retry %(retry)s times: %(method)s Failed '
+                '%(rc)s: %(reason)s'), {
+                    'retry': retry_time,
+                    'method': self.__class__.__name__,
+                    'rc': rc,
+                    'reason': out})
+        LOG.debug(
+            'Method: %(method)s Return Code: %(rc)s '
+            'Output: %(out)s', {
+                'method': self.__class__.__name__, 'rc': rc, 'out': out})
+        return rc, out
+    return inner
+
+
+def util_execute(command_line):
+    content, err = utils.execute(command_line, shell=True)
+    return content
+
+
+def strip_empty_in_list(list):
+    result = []
+    for entry in list:
+        entry = entry.strip()
+        if entry != "":
+            result.append(entry)
+
+    return result
+
+
+def table_to_dict(table):
+    tableHeader = table[0].split("  ")
+    tableHeaderList = strip_empty_in_list(tableHeader)
+
+    result = []
+
+    for i in range(len(table) - 2):
+        if table[i + 2].strip() == "":
+            break
+
+        resultEntry = {}
+        tableEntry = table[i + 2].split("  ")
+        tableEntryList = strip_empty_in_list(tableEntry)
+
+        for key, value in zip(tableHeaderList, tableEntryList):
+            resultEntry[key] = value
+
+        result.append(resultEntry)
+    return result
+
+
+def content_lines_to_dict(content_lines):
+    result = []
+    resultEntry = {}
+
+    for content_line in content_lines:
+
+        if content_line.strip() == "":
+            result.append(resultEntry)
+            resultEntry = {}
+            continue
+
+        split_entry = content_line.strip().split(": ", 1)
+        resultEntry[split_entry[0]] = split_entry[1]
+
+    return result
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseCommand(object):
+
+    """The BaseCommand abstract class."""
+
+    def __init__(self):
+        super(BaseCommand, self).__init__()
+
+    @abc.abstractmethod
+    def execute(self, *args, **kwargs):
+        pass
+
+
+class ExecuteCommand(BaseCommand):
+
+    """The Common ExecuteCommand."""
+
+    def __init__(self, cli_conf):
+        super(ExecuteCommand, self).__init__()
+        self.cli_retry_time = cli_conf.get('cli_retry_time')
+
+    @retry_cli
+    def execute(self, *args, **kwargs):
+        result = None
+        rc = 0
+        try:
+            result, err = utils.execute(*args, **kwargs)
+        except processutils.ProcessExecutionError as pe:
+            rc = pe.exit_code
+            result = pe.stdout
+            result = result.replace('\n', '\\n')
+            LOG.error(_LE(
+                'Error on execute command. '
+                'Error code: %(exit_code)d Error msg: %(result)s'), {
+                    'exit_code': pe.exit_code, 'result': result})
+        return rc, result
+
+
+class CLIBaseCommand(BaseCommand):
+
+    """The CLIBaseCommand class."""
+
+    def __init__(self, cli_conf):
+        super(CLIBaseCommand, self).__init__()
+        self.java = "java -jar"
+        self.execute_file = cli_conf.get('path')
+        self.ip = cli_conf.get('ip')
+        self.password = cli_conf.get('password')
+        self.cli_retry_time = cli_conf.get('cli_retry_time')
+        self.command = ""
+        self.parameters = ()
+        self.command_line = ""
+
+    def _generate_command(self, parameters):
+        """Generate execute Command. use java, execute, command, parameters."""
+        self.parameters = parameters
+        parameters_line = ' '.join(parameters)
+
+        if self.password:
+            parameters_line = 'password=%s %s' % (
+                self.password, parameters_line)
+
+        self.command_line = "{0} {1} {2} {3} {4}".format(
+            self.java,
+            self.execute_file,
+            self.ip,
+            self.command,
+            parameters_line)
+
+        return self.command_line
+
+    def _parser(self, content=None):
+        """The parser to parse command result.
+
+        :param content: The parse Content
+        :returns: parse result
+        """
+        content = content.replace("\r", "")
+        content = content.replace("\\/-", "")
+        content = content.strip()
+        LOG.debug(content)
+
+        if content is not None:
+            content_lines = content.split("\n")
+            rc, out = self._parse_return(content_lines)
+
+            if rc != 0:
+                return rc, out
+            else:
+                return rc, content_lines
+
+        return -1, None
+
+    @retry_cli
+    def execute(self, *args, **kwargs):
+        command_line = self._generate_command(args)
+        LOG.debug('Executing: %(command)s', {'command': command_line})
+        rc = 0
+        result = None
+        try:
+            content = self._execute(command_line)
+            rc, result = self._parser(content)
+        except processutils.ProcessExecutionError as pe:
+            rc = -2  # prevent confusing with cli real rc
+            result = pe.stdout
+            result = result.replace('\n', '\\n')
+            LOG.error(_LE(
+                'Error on execute %(command)s. '
+                'Error code: %(exit_code)d Error msg: %(result)s'), {
+                    'command': command_line,
+                    'exit_code': pe.exit_code,
+                    'result': result})
+        return rc, result
+
+    def _execute(self, command_line):
+        return util_execute(command_line)
+
+    def set_ip(self, ip):
+        """Set the Raid's ip."""
+        self.ip = ip
+
+    def _parse_return(self, content_lines):
+        """Get the end of command line result."""
+        rc = 0
+        return_value = content_lines[-1].strip().split(' ', 1)[1]
+        return_cli_result = content_lines[-2].strip().split(' ', 1)[1]
+
+        rc = int(return_value, 16)
+
+        return rc, return_cli_result
+
+
+class CreateLD(CLIBaseCommand):
+
+    """The Create LD Command."""
+
+    def __init__(self, *args, **kwargs):
+        super(CreateLD, self).__init__(*args, **kwargs)
+        self.command = "create ld"
+
+
+class CreateLV(CLIBaseCommand):
+
+    """The Create LV Command."""
+
+    def __init__(self, *args, **kwargs):
+        super(CreateLV, self).__init__(*args, **kwargs)
+        self.command = "create lv"
+
+
+class CreatePartition(CLIBaseCommand):
+
+    """Create Partition.
+
+    create part [LV-ID] [name] [size={partition-size}]
+                [min={minimal-reserve-size}] [init={switch}]
+                [tier={tier-level-list}]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(CreatePartition, self).__init__(*args, **kwargs)
+        self.command = "create part"
+
+
+class DeletePartition(CLIBaseCommand):
+
+    """Delete Partition.
+
+    delete part [partition-ID] [-y]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(DeletePartition, self).__init__(*args, **kwargs)
+        self.command = "delete part"
+
+
+class SetPartition(CLIBaseCommand):
+
+    """Set Partition.
+
+    set part [partition-ID] [name={partition-name}]
+             [min={minimal-reserve-size}]
+    set part expand [partition-ID] [size={expand-size}]
+    set part purge [partition-ID] [number] [rule-type]
+    set part reclaim [partition-ID]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(SetPartition, self).__init__(*args, **kwargs)
+        self.command = "set part"
+
+
+class CreateMap(CLIBaseCommand):
+
+    """Map the Partition on the channel.
+
+    create map [part] [partition-ID] [Channel-ID]
+               [Target-ID] [LUN-ID] [assign={assign-to}]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(CreateMap, self).__init__(*args, **kwargs)
+        self.command = "create map"
+
+
+class DeleteMap(CLIBaseCommand):
+
+    """Unmap the Partition on the channel.
+
+    delete map [part] [partition-ID] [Channel-ID]
+               [Target-ID] [LUN-ID] [-y]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(DeleteMap, self).__init__(*args, **kwargs)
+        self.command = "delete map"
+
+
+class CreateSnapshot(CLIBaseCommand):
+
+    """Create partition's Snapshot.
+
+    create si [part] [partition-ID]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(CreateSnapshot, self).__init__(*args, **kwargs)
+        self.command = "create si"
+
+
+class DeleteSnapshot(CLIBaseCommand):
+
+    """Delete partition's Snapshot.
+
+    delete si [snapshot-image-ID] [-y]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(DeleteSnapshot, self).__init__(*args, **kwargs)
+        self.command = "delete si"
+
+
+class CreateReplica(CLIBaseCommand):
+
+    """Create partition or snapshot's replica.
+
+    create replica [name] [part | si] [source-volume-ID]
+                   [part] [target-volume-ID] [type={replication-mode}]
+                   [priority={level}] [desc={description}]
+                   [incremental={switch}] [timeout={value}]
+                   [compression={switch}]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(CreateReplica, self).__init__(*args, **kwargs)
+        self.command = "create replica"
+
+
+class DeleteReplica(CLIBaseCommand):
+
+    """Delete and terminate specific replication job.
+
+    delete replica [volume-pair-ID] [-y]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(DeleteReplica, self).__init__(*args, **kwargs)
+        self.command = "delete replica"
+
+
+class CreateIQN(CLIBaseCommand):
+
+    """Create host iqn for CHAP or lun filter.
+
+    create iqn [IQN] [IQN-alias-name] [user={username}] [password={secret}]
+               [target={name}] [target-password={secret}] [ip={ip-address}]
+               [mask={netmask-ip}]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(CreateIQN, self).__init__(*args, **kwargs)
+        self.command = "create iqn"
+
+
+class DeleteIQN(CLIBaseCommand):
+
+    """Delete host iqn by name.
+
+    delete iqn [name]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(DeleteIQN, self).__init__(*args, **kwargs)
+        self.command = "delete iqn"
+
+
+class ShowCommand(CLIBaseCommand):
+
+    """Basic Show Command."""
+
+    def __init__(self, *args, **kwargs):
+        super(ShowCommand, self).__init__(*args, **kwargs)
+        self.param_detail = "-l"
+        self.default_type = "table"
+        self.start_key = ""
+
+    def _parser(self, content=None):
+        """Parse Table or Detail format into dict.
+
+        # Table format
+
+         ID   Name  LD-amount
+        ----------------------
+         123  LV-1  1
+
+        # Result
+
+        {
+            'ID': '123',
+            'Name': 'LV-1',
+            'LD-amount': '1'
+        }
+
+        # Detail format
+
+         ID: 5DE94FF775D81C30
+         Name: LV-1
+         LD-amount: 1
+
+        # Result
+
+        {
+            'ID': '123',
+            'Name': 'LV-1',
+            'LD-amount': '1'
+        }
+
+        :param content: The parse Content.
+        :returns: parse result
+        """
+        rc, out = super(ShowCommand, self)._parser(content)
+
+        # Error.
+        if rc != 0:
+            return rc, out
+
+        # No content.
+        if len(out) < 6:
+            return rc, []
+
+        detect_type = self.detect_type()
+
+        # Show detail content.
+        if detect_type == "list":
+
+            start_id = self.detect_detail_start_index(out)
+            if start_id < 0:
+                return rc, []
+
+            result = content_lines_to_dict(out[start_id:-2])
+        else:
+
+            start_id = self.detect_table_start_index(out)
+            if start_id < 0:
+                return rc, []
+
+            result = table_to_dict(out[start_id:-3])
+
+        return rc, result
+
+    def detect_type(self):
+        if self.param_detail in self.parameters:
+            detect_type = "list"
+        else:
+            detect_type = self.default_type
+        return detect_type
+
+    def detect_table_start_index(self, content):
+        for i in range(3, len(content)):
+            key = content[i].strip().split('  ')
+            if self.start_key in key[0].strip():
+                return i
+
+        return -1
+
+    def detect_detail_start_index(self, content):
+        for i in range(3, len(content)):
+            split_entry = content[i].strip().split(' ')
+            if len(split_entry) >= 2 and ':' in split_entry[0]:
+                return i
+
+        return -1
+
+
+class ShowLD(ShowCommand):
+
+    """Show LD.
+
+    show ld [index-list]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowLD, self).__init__(*args, **kwargs)
+        self.command = "show ld"
+
+
+class ShowLV(ShowCommand):
+
+    """Show LV.
+
+    show lv [lv={LV-IDs}] [-l]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowLV, self).__init__(*args, **kwargs)
+        self.command = "show lv"
+        self.start_key = "ID"
+
+    def detect_table_start_index(self, content):
+        if "tier" in self.parameters:
+            self.start_key = "LV-Name"
+
+        for i in range(3, len(content)):
+            key = content[i].strip().split('  ')
+            if self.start_key in key[0].strip():
+                return i
+
+        return -1
+
+
+class ShowPartition(ShowCommand):
+
+    """Show Partition.
+
+    show part [part={partition-IDs} | lv={LV-IDs}] [-l]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowPartition, self).__init__(*args, **kwargs)
+        self.command = "show part"
+        self.start_key = "ID"
+
+
+class ShowSnapshot(ShowCommand):
+
+    """Show Snapshot.
+
+    show si [si={snapshot-image-IDs} | part={partition-IDs} | lv={LV-IDs}] [-l]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowSnapshot, self).__init__(*args, **kwargs)
+        self.command = "show si"
+        self.start_key = "Index"
+
+
+class ShowDevice(ShowCommand):
+
+    """Show Device.
+
+    show device
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowDevice, self).__init__(*args, **kwargs)
+        self.command = "show device"
+        self.start_key = "Index"
+
+
+class ShowChannel(ShowCommand):
+
+    """Show Channel.
+
+    show channel
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowChannel, self).__init__(*args, **kwargs)
+        self.command = "show channel"
+        self.start_key = "Ch"
+
+
+class ShowDisk(ShowCommand):
+
+    """The Show Disk Command.
+
+    show disk [disk-index-list | channel={ch}]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowDisk, self).__init__(*args, **kwargs)
+        self.command = "show disk"
+
+
+class ShowMap(ShowCommand):
+
+    """Show Map.
+
+    show map [part={partition-IDs} | channel={channel-IDs}] [-l]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowMap, self).__init__(*args, **kwargs)
+        self.command = "show map"
+        self.start_key = "Ch"
+
+
+class ShowNet(ShowCommand):
+
+    """Show IP network.
+
+    show net [id={channel-IDs}] [-l]
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowNet, self).__init__(*args, **kwargs)
+        self.command = "show net"
+        self.start_key = "ID"
+
+
+class ShowLicense(ShowCommand):
+
+    """Show License.
+
+    show license
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowLicense, self).__init__(*args, **kwargs)
+        self.command = "show license"
+        self.start_key = "License"
+
+    def _parser(self, content=None):
+        """Parse License format.
+
+        # License format
+
+         License  Amount(Partition/Subsystem)  Expired
+        ------------------------------------------------
+         EonPath  ---                          True
+
+        # Result
+
+        {
+            'EonPath': {
+                'Amount': '---',
+                'Support': True
+             }
+        }
+
+        :param content: The parse Content.
+        :returns: parse result
+        """
+        rc, out = super(ShowLicense, self)._parser(content)
+
+        if rc != 0:
+            return rc, out
+
+        if len(out) > 0:
+            result = {}
+            for entry in out:
+                if entry['Expired'] == '---' or entry['Expired'] == 'Expired':
+                    support = False
+                else:
+                    support = True
+                result[entry['License']] = {
+                    'Amount':
+                        entry['Amount(Partition/Subsystem)'],
+                    'Support': support
+                }
+            return rc, result
+
+        return rc, []
+
+
+class ShowReplica(ShowCommand):
+
+    """Show information of all replication jobs or specific job.
+
+    show replica [id={volume-pair-IDs}] [-l] id={volume-pair-IDs}
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowReplica, self).__init__(*args, **kwargs)
+        self.command = 'show replica'
+
+
+class ShowWWN(ShowCommand):
+
+    """Show Fibre network.
+
+    show wwn
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(ShowWWN, self).__init__(*args, **kwargs)
+        self.command = "show wwn"
+        self.start_key = "CH"
+
+
+class ShowIQN(ShowCommand):
+
+    """Show iSCSI initiator IQN which is set by create iqn.
+
+    show iqn
+    """
+
+    LIST_START_LINE = "List of initiator IQN(s):"
+
+    def __init__(self, *args, **kwargs):
+        super(ShowIQN, self).__init__(*args, **kwargs)
+        self.command = "show iqn"
+        self.default_type = "list"
+
+    def detect_detail_start_index(self, content):
+        for i in range(3, len(content)):
+            if content[i].strip() == self.LIST_START_LINE:
+                return i + 2
+
+        return -1
diff --git a/cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py b/cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py
new file mode 100644 (file)
index 0000000..6f3f7e6
--- /dev/null
@@ -0,0 +1,1909 @@
+# Copyright (c) 2015 Infortrend Technology, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Infortrend Common CLI.
+"""
+import math
+import time
+
+from oslo_concurrency import lockutils
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import timeutils
+from oslo_utils import units
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder.openstack.common import loopingcall
+from cinder.volume.drivers.infortrend.eonstor_ds_cli import cli_factory as cli
+from cinder.volume.drivers.san import san
+from cinder.volume import volume_types
+from cinder.zonemanager import utils as fczm_utils
+
+LOG = logging.getLogger(__name__)
+
+infortrend_esds_opts = [
+    cfg.StrOpt('infortrend_pools_name',
+               default='',
+               help='Infortrend raid pool name list. '
+               'It is separated with comma.'),
+    cfg.StrOpt('infortrend_cli_path',
+               default='/opt/bin/Infortrend/raidcmd_ESDS10.jar',
+               help='The Infortrend CLI absolute path. '
+               'By default, it is at '
+               '/opt/bin/Infortrend/raidcmd_ESDS10.jar'),
+    cfg.IntOpt('infortrend_cli_max_retries',
+               default=5,
+               help='Maximum retry time for cli. Default is 5.'),
+    cfg.IntOpt('infortrend_cli_timeout',
+               default=30,
+               help='Default timeout for CLI copy operations in minutes. '
+               'Support: migrate volume, create cloned volume and '
+               'create volume from snapshot. '
+               'By Default, it is 30 minutes.'),
+    cfg.StrOpt('infortrend_slots_a_channels_id',
+               default='0,1,2,3,4,5,6,7',
+               help='Infortrend raid channel ID list on Slot A '
+               'for OpenStack usage. It is separated with comma. '
+               'By default, it is the channel 0~7.'),
+    cfg.StrOpt('infortrend_slots_b_channels_id',
+               default='0,1,2,3,4,5,6,7',
+               help='Infortrend raid channel ID list on Slot B '
+               'for OpenStack usage. It is separated with comma. '
+               'By default, it is the channel 0~7.'),
+]
+
+infortrend_esds_extra_opts = [
+    cfg.StrOpt('infortrend_provisioning',
+               default='full',
+               help='Let the volume use specific provisioning. '
+               'By default, it is the full provisioning. '
+               'The supported options are full or thin.'),
+    cfg.StrOpt('infortrend_tiering',
+               default='0',
+               help='Let the volume use specific tiering level. '
+               'By default, it is the level 0. '
+               'The supported levels are 0,2,3,4.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(infortrend_esds_opts)
+CONF.register_opts(infortrend_esds_extra_opts)
+
+CLI_RC_FILTER = {
+    'CreatePartition': {'error': _('Failed to create partition.')},
+    'DeletePartition': {'error': _('Failed to delete partition.')},
+    'SetPartition': {'error': _('Failed to set partition.')},
+    'CreateMap': {
+        'warning': {20: _LW('The MCS Channel is grouped.')},
+        'error': _('Failed to create map.'),
+    },
+    'DeleteMap': {
+        'warning': {11: _LW('No mapping.')},
+        'error': _('Failed to delete map.'),
+    },
+    'CreateSnapshot': {'error': _('Failed to create snapshot.')},
+    'DeleteSnapshot': {'error': _('Failed to delete snapshot.')},
+    'CreateReplica': {'error': _('Failed to create replica.')},
+    'DeleteReplica': {'error': _('Failed to delete replica.')},
+    'CreateIQN': {
+        'warning': {20: _LW('IQN already existed.')},
+        'error': _('Failed to create iqn.'),
+    },
+    'DeleteIQN': {
+        'warning': {
+            20: _LW('IQN has been used to create map.'),
+            11: _LW('No such host alias name.'),
+        },
+        'error': _('Failed to delete iqn.'),
+    },
+    'ShowLV': {'error': _('Failed to get lv info.')},
+    'ShowPartition': {'error': _('Failed to get partition info.')},
+    'ShowSnapshot': {'error': _('Failed to get snapshot info.')},
+    'ShowDevice': {'error': _('Failed to get device info.')},
+    'ShowChannel': {'error': _('Failed to get channel info.')},
+    'ShowMap': {'error': _('Failed to get map info.')},
+    'ShowNet': {'error': _('Failed to get network info.')},
+    'ShowLicense': {'error': _('Failed to get license info.')},
+    'ShowReplica': {'error': _('Failed to get replica info.')},
+    'ShowWWN': {'error': _('Failed to get wwn info.')},
+    'ShowIQN': {'error': _('Failed to get iqn info.')},
+    'ExecuteCommand': {'error': _('Failed to execute common command.')},
+}
+
+
+def log_func(func):
+    def inner(self, *args, **kwargs):
+        LOG.debug('Entering: %(method)s', {'method': func.__name__})
+        start = timeutils.utcnow()
+        ret = func(self, *args, **kwargs)
+        end = timeutils.utcnow()
+        LOG.debug(
+            'Leaving: %(method)s, '
+            'Spent: %(time)s sec, '
+            'Return: %(ret)s.', {
+                'method': func.__name__,
+                'time': timeutils.delta_seconds(start, end),
+                'ret': ret})
+        return ret
+    return inner
+
+
+def mi_to_gi(mi_size):
+    return mi_size * units.Mi / units.Gi
+
+
+def gi_to_mi(gi_size):
+    return gi_size * units.Gi / units.Mi
+
+
+class InfortrendCommon(object):
+
+    """The Infortrend's Common Command using CLI.
+
+    Version history:
+        1.0.0 - Initial driver
+    """
+
+    VERSION = '1.0.0'
+
+    constants = {
+        'ISCSI_PORT': 3260,
+        'MAX_LUN_MAP_PER_CHL': 128
+    }
+
+    provisioning_values = ['thin', 'full']
+
+    tiering_values = ['0', '2', '3', '4']
+
+    def __init__(self, protocol, configuration=None):
+
+        self.protocol = protocol
+        self.configuration = configuration
+        self.configuration.append_config_values(san.san_opts)
+        self.configuration.append_config_values(infortrend_esds_opts)
+        self.configuration.append_config_values(infortrend_esds_extra_opts)
+
+        self.iscsi_multipath = self.configuration.use_multipath_for_image_xfer
+        self.path = self.configuration.infortrend_cli_path
+        self.password = self.configuration.san_password
+        self.ip = self.configuration.san_ip
+        self.cli_retry_time = self.configuration.infortrend_cli_max_retries
+        self.cli_timeout = self.configuration.infortrend_cli_timeout * 60
+        self.iqn = 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s'
+        self.unmanaged_prefix = 'cinder-unmanaged-%s'
+
+        if self.ip == '':
+            msg = _('san_ip is not set.')
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+        self.fc_lookup_service = fczm_utils.create_lookup_service()
+
+        self._volume_stats = None
+        self._model_type = 'R'
+        self._replica_timeout = self.cli_timeout
+
+        self.map_dict = {
+            'slot_a': {},
+            'slot_b': {},
+        }
+        self.map_dict_init = False
+
+        self.target_dict = {
+            'slot_a': {},
+            'slot_b': {},
+        }
+
+        if self.protocol == 'iSCSI':
+            self.mcs_dict = {
+                'slot_a': {},
+                'slot_b': {},
+            }
+
+        self._init_pool_list()
+        self._init_channel_list()
+
+        self.cli_conf = {
+            'path': self.path,
+            'password': self.password,
+            'ip': self.ip,
+            'cli_retry_time': int(self.cli_retry_time),
+        }
+
+    def _init_pool_list(self):
+        pools_name = self.configuration.infortrend_pools_name
+        if pools_name == '':
+            msg = _('Pools name is not set.')
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+        tmp_pool_list = pools_name.split(',')
+        self.pool_list = [pool.strip() for pool in tmp_pool_list]
+
+    def _init_channel_list(self):
+        self.channel_list = {
+            'slot_a': [],
+            'slot_b': [],
+        }
+        tmp_channel_list = (
+            self.configuration.infortrend_slots_a_channels_id.split(',')
+        )
+        self.channel_list['slot_a'] = (
+            [channel.strip() for channel in tmp_channel_list]
+        )
+        tmp_channel_list = (
+            self.configuration.infortrend_slots_b_channels_id.split(',')
+        )
+        self.channel_list['slot_b'] = (
+            [channel.strip() for channel in tmp_channel_list]
+        )
+
+    def _execute_command(self, cli_type, *args, **kwargs):
+        command = getattr(cli, cli_type)
+        return command(self.cli_conf).execute(*args, **kwargs)
+
+    def _execute(self, cli_type, *args, **kwargs):
+        LOG.debug('Executing command type: %(type)s.', {'type': cli_type})
+
+        rc, out = self._execute_command(cli_type, *args, **kwargs)
+
+        if rc != 0:
+            if ('warning' in CLI_RC_FILTER[cli_type] and
+                    rc in CLI_RC_FILTER[cli_type]['warning']):
+                LOG.warning(CLI_RC_FILTER[cli_type]['warning'][rc])
+            else:
+                msg = CLI_RC_FILTER[cli_type]['error']
+                LOG.error(msg)
+                raise exception.InfortrendCliException(
+                    err=msg, param=args, rc=rc, out=out)
+        return rc, out
+
+    @log_func
+    def _init_map_info(self, multipath=False):
+        if not self.map_dict_init:
+
+            rc, channel_info = self._execute('ShowChannel')
+
+            if 'BID' in channel_info[0]:
+                self._model_type = 'R'
+            else:
+                self._model_type = 'G'
+
+            self._set_channel_id(channel_info, 'slot_a', multipath)
+
+            if multipath and self._model_type == 'R':
+                self._set_channel_id(channel_info, 'slot_b', multipath)
+
+            self.map_dict_init = True
+
+    @log_func
+    def _update_map_info(self, multipath=False):
+        """Record the driver mapping information.
+
+        map_dict = {
+            'slot_a': {
+                '0': [1, 2, 3, 4]  # Slot A Channel 0 map lun 1, 2, 3, 4
+            },
+            'slot_b' : {
+                '1': [0, 1, 3]     # Slot B Channel 1 map lun 0, 1, 3
+            }
+        }
+        """
+        rc, map_info = self._execute('ShowMap')
+
+        self._update_map_info_by_slot(map_info, 'slot_a')
+
+        if multipath and self._model_type == 'R':
+            self._update_map_info_by_slot(map_info, 'slot_b')
+        return map_info
+
+    @log_func
+    def _update_map_info_by_slot(self, map_info, slot_key):
+        for key, value in self.map_dict[slot_key].items():
+            self.map_dict[slot_key][key] = list(
+                range(self.constants['MAX_LUN_MAP_PER_CHL']))
+
+        if len(map_info) > 0 and isinstance(map_info, list):
+            for entry in map_info:
+                ch = entry['Ch']
+                lun = entry['LUN']
+                if ch not in self.map_dict[slot_key].keys():
+                    continue
+
+                target_id = self.target_dict[slot_key][ch]
+                if (entry['Target'] == target_id and
+                        int(lun) in self.map_dict[slot_key][ch]):
+                    self.map_dict[slot_key][ch].remove(int(lun))
+
+    def _check_initiator_has_lun_map(self, initiator_wwns, map_info):
+        for initiator in initiator_wwns:
+            for entry in map_info:
+                if initiator.lower() == entry['Host-ID'].lower():
+                    return True
+        return False
+
+    @log_func
+    def _set_channel_id(
+            self, channel_info, controller='slot_a', multipath=False):
+
+        if self.protocol == 'iSCSI':
+            check_channel_type = 'NETWORK'
+        else:
+            check_channel_type = 'FIBRE'
+
+        for entry in channel_info:
+            if entry['Type'] == check_channel_type:
+                if entry['Ch'] in self.channel_list[controller]:
+                    self.map_dict[controller][entry['Ch']] = []
+
+                    if self.protocol == 'iSCSI':
+                        self._update_mcs_dict(
+                            entry['Ch'], entry['MCS'], controller)
+
+                    self._update_target_dict(entry, controller)
+
+    @log_func
+    def _update_target_dict(self, channel, controller):
+        """Record the target id for mapping.
+
+        # R model
+        target_dict = {
+            'slot_a': {
+                '0': '0',
+                '1': '0',
+            },
+            'slot_b': {
+                '0': '1',
+                '1': '1',
+            },
+        }
+
+        # G model
+        target_dict = {
+            'slot_a': {
+                '2': '32',
+                '3': '112',
+            }
+        }
+        """
+        if self._model_type == 'G':
+            self.target_dict[controller][channel['Ch']] = channel['ID']
+        else:
+            if controller == 'slot_a':
+                self.target_dict[controller][channel['Ch']] = channel['AID']
+            else:
+                self.target_dict[controller][channel['Ch']] = channel['BID']
+
+    def _update_mcs_dict(self, channel_id, mcs_id, controller):
+        """Record the iSCSI MCS topology.
+
+        # R model with mcs, but it not working with iSCSI multipath
+        mcs_dict = {
+            'slot_a': {
+                '0': ['0', '1'],
+                '1': ['2']
+            },
+            'slot_b': {
+                '0': ['0', '1'],
+                '1': ['2']
+            }
+        }
+
+        # G model with mcs
+        mcs_dict = {
+            'slot_a': {
+                '0': ['0', '1'],
+                '1': ['2']
+            },
+            'slot_b': {}
+        }
+        """
+        if mcs_id not in self.mcs_dict[controller]:
+            self.mcs_dict[controller][mcs_id] = []
+        self.mcs_dict[controller][mcs_id].append(channel_id)
+
+    def _check_tiers_setup(self):
+        tiering = self.configuration.infortrend_tiering
+        if tiering != '0':
+            self._check_extraspec_value(
+                tiering, self.tiering_values)
+            tier_levels_list = list(range(int(tiering)))
+            tier_levels_list = list(map(str, tier_levels_list))
+
+            rc, lv_info = self._execute('ShowLV', 'tier')
+
+            for pool in self.pool_list:
+                support_tier_levels = tier_levels_list[:]
+                for entry in lv_info:
+                    if (entry['LV-Name'] == pool and
+                            entry['Tier'] in support_tier_levels):
+                        support_tier_levels.remove(entry['Tier'])
+                    if len(support_tier_levels) == 0:
+                        break
+                if len(support_tier_levels) != 0:
+                    msg = _('Please create %(tier_levels)s '
+                            'tier in pool %(pool)s in advance!') % {
+                                'tier_levels': support_tier_levels,
+                                'pool': pool}
+                    LOG.error(msg)
+                    raise exception.VolumeDriverException(message=msg)
+
+    def _check_pools_setup(self):
+        pool_list = self.pool_list[:]
+
+        rc, lv_info = self._execute('ShowLV')
+
+        for lv in lv_info:
+            if lv['Name'] in pool_list:
+                pool_list.remove(lv['Name'])
+            if len(pool_list) == 0:
+                break
+
+        if len(pool_list) != 0:
+            msg = _('Please create %(pool_list)s pool in advance!') % {
+                'pool_list': pool_list}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+    def check_for_setup_error(self):
+        self._check_pools_setup()
+        self._check_tiers_setup()
+
+    def create_volume(self, volume):
+        """Create a Infortrend partition."""
+        volume_id = volume['id'].replace('-', '')
+
+        self._create_partition_by_default(volume)
+        part_id = self._get_part_id(volume_id)
+
+        system_id = self._get_system_id(self.ip)
+
+        model_dict = {
+            'system_id': system_id,
+            'partition_id': part_id,
+        }
+
+        model_update = {
+            "provider_location": self._concat_provider_location(model_dict),
+        }
+        LOG.info(_LI('Create Volume %(volume_id)s completed.'), {
+            'volume_id': volume_id})
+        return model_update
+
+    def _create_partition_by_default(self, volume):
+        pool_id = self._get_target_pool_id(volume)
+        self._create_partition_with_pool(volume, pool_id)
+
+    def _create_partition_with_pool(
+            self, volume, pool_id, extraspecs=None):
+
+        volume_id = volume['id'].replace('-', '')
+        volume_size = gi_to_mi(volume['size'])
+
+        if extraspecs is None:
+            extraspecs = self._get_extraspecs_dict(volume['volume_type_id'])
+
+        provisioning = self._get_extraspecs_value(extraspecs, 'provisioning')
+        tiering = self._get_extraspecs_value(extraspecs, 'tiering')
+
+        extraspecs_dict = {}
+        cmd = ''
+        if provisioning == 'thin':
+            provisioning = int(volume_size * 0.2)
+            extraspecs_dict['provisioning'] = provisioning
+            extraspecs_dict['init'] = 'disable'
+        else:
+            self._check_extraspec_value(
+                provisioning, self.provisioning_values)
+
+        if tiering != '0':
+            self._check_extraspec_value(
+                tiering, self.tiering_values)
+            tier_levels_list = list(range(int(tiering)))
+            tier_levels_list = list(map(str, tier_levels_list))
+            self._check_tiering_existing(tier_levels_list, pool_id)
+            extraspecs_dict['provisioning'] = 0
+            extraspecs_dict['init'] = 'disable'
+
+        if extraspecs_dict:
+            cmd = self._create_part_parameters_str(extraspecs_dict)
+
+        commands = (pool_id, volume_id, 'size=%s' % volume_size, cmd)
+        self._execute('CreatePartition', *commands)
+
+    def _create_part_parameters_str(self, extraspecs_dict):
+        parameters_list = []
+        parameters = {
+            'provisioning': 'min=%sMB',
+            'tiering': 'tier=%s',
+            'init': 'init=%s',
+        }
+        for extraspec in extraspecs_dict.keys():
+            value = parameters[extraspec] % (extraspecs_dict[extraspec])
+            parameters_list.append(value)
+
+        cmd = ' '.join(parameters_list)
+        return cmd
+
+    def _check_tiering_existing(self, tier_levels, pool_id):
+        rc, lv_info = self._execute('ShowLV', 'tier')
+
+        for entry in lv_info:
+            if entry['LV-ID'] == pool_id and entry['Tier'] in tier_levels:
+                tier_levels.remove(entry['Tier'])
+                if len(tier_levels) == 0:
+                    break
+        if len(tier_levels) != 0:
+            msg = _('Have not created %(tier_levels)s tier(s).') % {
+                'tier_levels': tier_levels}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+    @log_func
+    def _create_map_with_lun_filter(
+            self, part_id, channel_id, lun_id, host, controller='slot_a'):
+
+        host_filter = self._create_target_id_and_host_filter(
+            controller, host)
+        target_id = self.target_dict[controller][channel_id]
+
+        commands = (
+            'part', part_id, channel_id, target_id, lun_id, host_filter
+        )
+        self._execute('CreateMap', *commands)
+
+    @log_func
+    def _create_map_with_mcs(
+            self, part_id, channel_list, lun_id, host, controller='slot_a'):
+
+        map_channel_id = None
+        for channel_id in channel_list:
+
+            host_filter = self._create_target_id_and_host_filter(
+                controller, host)
+            target_id = self.target_dict[controller][channel_id]
+
+            commands = (
+                'part', part_id, channel_id, target_id, lun_id,
+                host_filter
+            )
+            rc, out = self._execute('CreateMap', *commands)
+            if rc == 0:
+                map_channel_id = channel_id
+                break
+
+        if map_channel_id is None:
+            msg = _('Failed to create map on mcs, no channel can map.')
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+        return map_channel_id
+
+    def _create_target_id_and_host_filter(self, controller, host):
+        if self.protocol == 'iSCSI':
+            host_filter = 'iqn=%s' % host
+        else:
+            host_filter = 'wwn=%s' % host
+
+        return host_filter
+
+    def _get_extraspecs_dict(self, volume_type_id):
+        extraspecs = {}
+        if volume_type_id:
+            extraspecs = volume_types.get_volume_type_extra_specs(
+                volume_type_id)
+
+        return extraspecs
+
+    def _get_extraspecs_value(self, extraspecs, key):
+        value = None
+        if key == 'provisioning':
+            if (extraspecs and
+                    'infortrend_provisioning' in extraspecs.keys()):
+                value = extraspecs['infortrend_provisioning'].lower()
+            else:
+                value = self.configuration.infortrend_provisioning.lower()
+        elif key == 'tiering':
+            value = self.configuration.infortrend_tiering
+        return value
+
+    def _select_most_free_capacity_pool_id(self, lv_info):
+        largest_free_capacity_gb = 0.0
+        dest_pool_id = None
+
+        for lv in lv_info:
+            if lv['Name'] in self.pool_list:
+                available_space = float(lv['Available'].split(' ', 1)[0])
+                free_capacity_gb = round(mi_to_gi(available_space))
+                if free_capacity_gb > largest_free_capacity_gb:
+                    largest_free_capacity_gb = free_capacity_gb
+                    dest_pool_id = lv['ID']
+        return dest_pool_id
+
+    def _get_target_pool_id(self, volume):
+        extraspecs = self._get_extraspecs_dict(volume['volume_type_id'])
+        pool_id = None
+        rc, lv_info = self._execute('ShowLV')
+
+        if 'pool_name' in extraspecs.keys():
+            poolname = extraspecs['pool_name']
+
+            for entry in lv_info:
+                if entry['Name'] == poolname:
+                    pool_id = entry['ID']
+        else:
+            pool_id = self._select_most_free_capacity_pool_id(lv_info)
+
+        if pool_id is None:
+            msg = _('Failed to get pool id with volume %(volume_id)s.') % {
+                'volume_id': volume['id']}
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        return pool_id
+
+    def _get_system_id(self, system_ip):
+        rc, device_info = self._execute('ShowDevice')
+
+        for entry in device_info:
+            if system_ip == entry['Connected-IP']:
+                return str(int(entry['ID'], 16))
+        return
+
+    @log_func
+    def _get_lun_id(self, ch_id, controller='slot_a'):
+        lun_id = -1
+
+        if len(self.map_dict[controller][ch_id]) > 0:
+            lun_id = self.map_dict[controller][ch_id][0]
+            self.map_dict[controller][ch_id].remove(lun_id)
+
+        if lun_id == -1:
+            msg = _('LUN number is out of bound '
+                    'on channel id: %(ch_id)s.') % {'ch_id': ch_id}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+        else:
+            return lun_id
+
+    @log_func
+    def _get_mapping_info(self, multipath):
+        if self.iscsi_multipath or multipath:
+            return self._get_mapping_info_with_mcs()
+        else:
+            return self._get_mapping_info_with_normal()
+
+    def _get_mapping_info_with_mcs(self):
+        """Get the minimun mapping channel id and multi lun id mapping info.
+
+        # R model with mcs
+        map_chl = {
+            'slot_a': ['0', '1']
+        }
+        map_lun = ['0']
+
+        # G model with mcs
+        map_chl = {
+            'slot_a': ['1', '2']
+        }
+        map_lun = ['0']
+
+        :returns: minimun mapping channel id per slot and multi lun id
+        """
+        map_chl = {
+            'slot_a': []
+        }
+
+        min_lun_num = 0
+        map_mcs_group = None
+        for mcs in self.mcs_dict['slot_a']:
+            if len(self.mcs_dict['slot_a'][mcs]) > 1:
+                if min_lun_num < self._get_mcs_channel_lun_map_num(mcs):
+                    min_lun_num = self._get_mcs_channel_lun_map_num(mcs)
+                    map_mcs_group = mcs
+
+        if map_mcs_group is None:
+            msg = _('Raid did not have MCS Channel.')
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+        map_chl['slot_a'] = self.mcs_dict['slot_a'][map_mcs_group]
+        map_lun = self._get_mcs_channel_lun_map(map_chl['slot_a'])
+        return map_chl, map_lun, map_mcs_group
+
+    def _get_mcs_channel_lun_map_num(self, mcs_id):
+        lun_num = 0
+        for channel in self.mcs_dict['slot_a'][mcs_id]:
+            lun_num += len(self.map_dict['slot_a'][channel])
+        return lun_num
+
+    def _get_mcs_channel_lun_map(self, channel_list):
+        """Find the common lun id in mcs channel."""
+
+        map_lun = []
+        for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']):
+            check_map = True
+            for channel_id in channel_list:
+                if lun_id not in self.map_dict['slot_a'][channel_id]:
+                    check_map = False
+            if check_map:
+                map_lun.append(str(lun_id))
+                break
+        return map_lun
+
+    @log_func
+    def _get_mapping_info_with_normal(self):
+        """Get the minimun mapping channel id and lun id mapping info.
+
+        # G model and R model
+        map_chl = {
+            'slot_a': ['1']
+        }
+        map_lun = ['0']
+
+        :returns: minimun mapping channel id per slot and lun id
+        """
+        map_chl = {
+            'slot_a': []
+        }
+        map_lun = []
+
+        ret_chl = self._get_minimun_mapping_channel_id('slot_a')
+        lun_id = self._get_lun_id(ret_chl, 'slot_a')
+        mcs_id = self._get_mcs_id_by_channel_id(ret_chl)
+
+        map_chl['slot_a'].append(ret_chl)
+        map_lun.append(str(lun_id))
+
+        return map_chl, map_lun, mcs_id
+
+    @log_func
+    def _get_minimun_mapping_channel_id(self, controller):
+        empty_lun_num = 0
+        min_map_chl = -1
+
+        for key, value in self.map_dict[controller].items():
+            if empty_lun_num < len(value):
+                min_map_chl = key
+                empty_lun_num = len(value)
+
+        if int(min_map_chl) < 0:
+            msg = _('LUN map overflow on every channel.')
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+        else:
+            return min_map_chl
+
+    def _get_common_lun_map_id(self, wwpn_channel_info):
+        map_lun = None
+
+        for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']):
+            lun_id_exist = False
+            for slot_name in ['slot_a', 'slot_b']:
+                for wwpn in wwpn_channel_info:
+                    channel_id = wwpn_channel_info[wwpn]['channel']
+                    if channel_id not in self.map_dict[slot_name]:
+                        continue
+                    elif lun_id not in self.map_dict[slot_name][channel_id]:
+                        lun_id_exist = True
+            if not lun_id_exist:
+                map_lun = str(lun_id)
+                break
+        return map_lun
+
+    def _get_mcs_id_by_channel_id(self, channel_id):
+        mcs_id = None
+
+        for mcs in self.mcs_dict['slot_a']:
+            if channel_id in self.mcs_dict['slot_a'][mcs]:
+                mcs_id = mcs
+                break
+
+        if mcs_id is None:
+            msg = _('Cannot get mcs_id by channel id: %(channel_id)s.') % {
+                'channel_id': channel_id}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+        return mcs_id
+
+    def _concat_provider_location(self, model_dict):
+        return '@'.join([i + '^' + str(model_dict[i]) for i in model_dict])
+
+    def delete_volume(self, volume):
+        """Delete the specific volume."""
+
+        volume_id = volume['id'].replace('-', '')
+        has_pair = False
+        have_map = False
+
+        part_id = self._extract_specific_provider_location(
+            volume['provider_location'], 'partition_id')
+
+        (check_exist, have_map, part_id) = (
+            self._check_volume_exist(volume_id, part_id)
+        )
+
+        if not check_exist:
+            LOG.warning(_LW('Volume %(volume_id)s already deleted.'), {
+                'volume_id': volume_id})
+            return
+
+        rc, replica_list = self._execute('ShowReplica', '-l')
+
+        for entry in replica_list:
+            if (volume_id == entry['Source-Name'] and
+                    part_id == entry['Source']):
+                if not self._check_replica_completed(entry):
+                    has_pair = True
+                    LOG.warning(_LW('Volume still %(status)s '
+                                    'Cannot delete volume.'), {
+                                        'status': entry['Status']})
+                else:
+                    have_map = entry['Source-Mapped'] == 'Yes'
+                    self._execute('DeleteReplica', entry['Pair-ID'], '-y')
+
+            elif (volume_id == entry['Target-Name'] and
+                    part_id == entry['Target']):
+                have_map = entry['Target-Mapped'] == 'Yes'
+                self._execute('DeleteReplica', entry['Pair-ID'], '-y')
+
+        if not has_pair:
+
+            rc, snapshot_list = self._execute(
+                'ShowSnapshot', 'part=%s' % part_id)
+
+            for snapshot in snapshot_list:
+                si_has_pair = self._delete_pair_with_snapshot(
+                    snapshot['SI-ID'], replica_list)
+
+                if si_has_pair:
+                    msg = _('Failed to delete SI '
+                            'for volume_id: %(volume_id)s '
+                            'because it has pair.') % {
+                                'volume_id': volume_id}
+                    LOG.error(msg)
+                    raise exception.VolumeDriverException(message=msg)
+
+                self._execute('DeleteSnapshot', snapshot['SI-ID'], '-y')
+
+            rc, map_info = self._execute('ShowMap', 'part=%s' % part_id)
+
+            if have_map or len(map_info) > 0:
+                self._execute('DeleteMap', 'part', part_id, '-y')
+
+            self._execute('DeletePartition', part_id, '-y')
+
+            LOG.info(_LI('Delete Volume %(volume_id)s completed.'), {
+                'volume_id': volume_id})
+        else:
+            msg = _('Failed to delete volume '
+                    'for volume_id: %(volume_id)s '
+                    'because it has pair.') % {
+                        'volume_id': volume_id}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+    def _check_replica_completed(self, replica):
+        if ((replica['Type'] == 'Copy' and replica['Status'] == 'Completed') or
+                (replica['Type'] == 'Mirror' and
+                    replica['Status'] == 'Mirror')):
+            return True
+
+        return False
+
+    def _check_volume_exist(self, volume_id, part_id):
+        check_exist = False
+        have_map = False
+        result_part_id = part_id
+
+        rc, part_list = self._execute('ShowPartition', '-l')
+
+        for entry in part_list:
+            if entry['Name'] == volume_id:
+                check_exist = True
+
+                if part_id is None:
+                    result_part_id = entry['ID']
+                if entry['Mapped'] == 'true':
+                    have_map = True
+
+        if check_exist:
+            return (check_exist, have_map, result_part_id)
+        else:
+            return (False, False, None)
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Create a clone of the volume by volume copy."""
+
+        volume_id = volume['id'].replace('-', '')
+        #  Step1 create a snapshot of the volume
+        src_part_id = self._extract_specific_provider_location(
+            src_vref['provider_location'], 'partition_id')
+
+        if src_part_id is None:
+            src_part_id = self._get_part_id(volume_id)
+
+        model_update = self._create_volume_from_volume(volume, src_part_id)
+
+        LOG.info(_LI('Create Cloned Volume %(volume_id)s completed.'), {
+            'volume_id': volume['id']})
+        return model_update
+
+    def _create_volume_from_volume(self, dst_volume, src_part_id):
+        # create the target volume for volume copy
+        dst_volume_id = dst_volume['id'].replace('-', '')
+
+        self._create_partition_by_default(dst_volume)
+
+        dst_part_id = self._get_part_id(dst_volume_id)
+        # prepare return value
+        system_id = self._get_system_id(self.ip)
+        model_dict = {
+            'system_id': system_id,
+            'partition_id': dst_part_id,
+        }
+
+        model_info = self._concat_provider_location(model_dict)
+        model_update = {"provider_location": model_info}
+
+        # clone the volume from the origin partition
+        commands = (
+            'Cinder-Cloned', 'part', src_part_id, 'part', dst_part_id
+        )
+        self._execute('CreateReplica', *commands)
+        self._wait_replica_complete(dst_part_id)
+
+        return model_update
+
+    def _extract_specific_provider_location(self, provider_location, key):
+        provider_location_dict = self._extract_all_provider_location(
+            provider_location)
+
+        result = provider_location_dict.get(key, None)
+        return result
+
+    @log_func
+    def _extract_all_provider_location(self, provider_location):
+        provider_location_dict = {}
+        dict_entry = provider_location.split("@")
+        for entry in dict_entry:
+            key, value = entry.split('^', 1)
+            if value == 'None':
+                value = None
+            provider_location_dict[key] = value
+
+        return provider_location_dict
+
+    def create_export(self, context, volume):
+        model_update = volume['provider_location']
+
+        LOG.info(_LI('Create export done from Volume %(volume_id)s.'), {
+            'volume_id': volume['id']})
+
+        return {'provider_location': model_update}
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume status.
+
+        If refresh is True, update the status first.
+        """
+        if self._volume_stats is None or refresh:
+            self._update_volume_stats()
+
+        LOG.info(_LI(
+            'Successfully update volume stats. '
+            'backend: %(volume_backend_name)s, '
+            'vendor: %(vendor_name)s, '
+            'driver version: %(driver_version)s, '
+            'storage protocol: %(storage_protocol)s.'), self._volume_stats)
+
+        return self._volume_stats
+
+    def _update_volume_stats(self):
+
+        backend_name = self.configuration.safe_get('volume_backend_name')
+
+        data = {
+            'volume_backend_name': backend_name,
+            'vendor_name': 'Infortrend',
+            'driver_version': self.VERSION,
+            'storage_protocol': self.protocol,
+            'pools': self._update_pools_stats(),
+        }
+        self._volume_stats = data
+
+    def _update_pools_stats(self):
+        enable_specs_dict = self._get_enable_specs_on_array()
+
+        if 'Thin Provisioning' in enable_specs_dict.keys():
+            provisioning = 'thin'
+            provisioning_support = True
+        else:
+            provisioning = 'full'
+            provisioning_support = False
+
+        rc, part_list = self._execute('ShowPartition', '-l')
+        rc, pools_info = self._execute('ShowLV')
+        pools = []
+
+        for pool in pools_info:
+            if pool['Name'] in self.pool_list:
+                total_space = float(pool['Size'].split(' ', 1)[0])
+                available_space = float(pool['Available'].split(' ', 1)[0])
+
+                total_capacity_gb = round(mi_to_gi(total_space), 2)
+                free_capacity_gb = round(mi_to_gi(available_space), 2)
+                provisioning_factor = self.configuration.safe_get(
+                    'max_over_subscription_ratio')
+                provisioned_space = self._get_provisioned_space(
+                    pool['ID'], part_list)
+                provisioned_capacity_gb = round(mi_to_gi(provisioned_space), 2)
+
+                new_pool = {
+                    'pool_name': pool['Name'],
+                    'pool_id': pool['ID'],
+                    'total_capacity_gb': total_capacity_gb,
+                    'free_capacity_gb': free_capacity_gb,
+                    'reserved_percentage': 0,
+                    'QoS_support': False,
+                    'provisioned_capacity_gb': provisioned_capacity_gb,
+                    'max_over_subscription_ratio': provisioning_factor,
+                    'thin_provisioning_support': provisioning_support,
+                    'thick_provisioning_support': True,
+                    'infortrend_provisioning': provisioning,
+                }
+                pools.append(new_pool)
+        return pools
+
+    def _get_provisioned_space(self, pool_id, part_list):
+        provisioning_space = 0
+        for entry in part_list:
+            if entry['LV-ID'] == pool_id:
+                provisioning_space += int(entry['Size'])
+        return provisioning_space
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot."""
+
+        snapshot_id = snapshot['id'].replace('-', '')
+        volume_id = snapshot['volume_id'].replace('-', '')
+
+        LOG.debug('Create Snapshot %(snapshot)s volume %(volume)s.',
+                  {'snapshot': snapshot_id, 'volume': volume_id})
+
+        model_update = {}
+        part_id = self._get_part_id(volume_id)
+
+        if part_id is None:
+            msg = _('Failed to get Partition ID for volume %(volume_id)s.') % {
+                'volume_id': volume_id}
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        @lockutils.synchronized(
+            'snapshot-' + part_id, 'infortrend-', True)
+        def do_create_snapshot():
+            self._execute('CreateSnapshot', 'part', part_id)
+            rc, tmp_snapshot_list = self._execute(
+                'ShowSnapshot', 'part=%s' % part_id)
+            return tmp_snapshot_list
+
+        snapshot_list = do_create_snapshot()
+
+        LOG.info(_LI(
+            'Create success. '
+            'Snapshot: %(snapshot)s, '
+            'Snapshot ID in raid: %(raid_snapshot_id)s, '
+            'volume: %(volume)s.'), {
+                'snapshot': snapshot_id,
+                'raid_snapshot_id': snapshot_list[-1]['SI-ID'],
+                'volume': volume_id})
+        model_update['provider_location'] = snapshot_list[-1]['SI-ID']
+        return model_update
+
+    def delete_snapshot(self, snapshot):
+        """Delete the snapshot."""
+
+        snapshot_id = snapshot['id'].replace('-', '')
+        volume_id = snapshot['volume_id'].replace('-', '')
+
+        LOG.debug('Delete Snapshot %(snapshot)s volume %(volume)s.',
+                  {'snapshot': snapshot_id, 'volume': volume_id})
+
+        raid_snapshot_id = self._get_raid_snapshot_id(snapshot)
+
+        if raid_snapshot_id:
+
+            rc, replica_list = self._execute('ShowReplica', '-l')
+
+            has_pair = self._delete_pair_with_snapshot(
+                raid_snapshot_id, replica_list)
+
+            if not has_pair:
+                self._execute('DeleteSnapshot', raid_snapshot_id, '-y')
+
+                LOG.info(_LI('Delete Snapshot %(snapshot_id)s completed.'), {
+                    'snapshot_id': snapshot_id})
+            else:
+                msg = _('Failed to delete snapshot '
+                        'for snapshot_id: %s '
+                        'because it has pair.') % snapshot_id
+                LOG.error(msg)
+                raise exception.VolumeDriverException(message=msg)
+        else:
+            msg = _(
+                'Failed to get Raid Snapshot ID '
+                'from Snapshot %(snapshot_id)s.') % {
+                    'snapshot_id': snapshot_id}
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+    def _get_raid_snapshot_id(self, snapshot):
+        if 'provider_location' not in snapshot:
+            LOG.warning(_LW(
+                'Failed to get Raid Snapshot ID and '
+                'did not store in snapshot.'))
+            return
+        return snapshot['provider_location']
+
+    def _delete_pair_with_snapshot(self, snapshot_id, replica_list):
+        has_pair = False
+        for entry in replica_list:
+            if entry['Source'] == snapshot_id:
+
+                if not self._check_replica_completed(entry):
+                    has_pair = True
+                    LOG.warning(_LW(
+                        'Snapshot still %(status)s Cannot delete snapshot.'), {
+                            'status': entry['Status']})
+                else:
+                    self._execute('DeleteReplica', entry['Pair-ID'], '-y')
+        return has_pair
+
+    def _get_part_id(self, volume_id, pool_id=None, part_list=None):
+        if part_list is None:
+            rc, part_list = self._execute('ShowPartition')
+        for entry in part_list:
+            if pool_id is None:
+                if entry['Name'] == volume_id:
+                    return entry['ID']
+            else:
+                if entry['Name'] == volume_id and entry['LV-ID'] == pool_id:
+                    return entry['ID']
+        return
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        raid_snapshot_id = self._get_raid_snapshot_id(snapshot)
+
+        if raid_snapshot_id is None:
+            msg = _('Failed to get Raid Snapshot ID '
+                    'from snapshot: %(snapshot_id)s.') % {
+                        'snapshot_id': snapshot['id']}
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        src_part_id = self._check_snapshot_filled_block(raid_snapshot_id)
+
+        model_update = self._create_volume_from_snapshot_id(
+            volume, raid_snapshot_id, src_part_id)
+
+        LOG.info(_LI(
+            'Create Volume %(volume_id)s from '
+            'snapshot %(snapshot_id)s completed.'), {
+                'volume_id': volume['id'],
+                'snapshot_id': snapshot['id']})
+
+        return model_update
+
+    def _check_snapshot_filled_block(self, raid_snapshot_id):
+        rc, snapshot_list = self._execute(
+            'ShowSnapshot', 'si=%s' % raid_snapshot_id, '-l')
+
+        if snapshot_list and snapshot_list[0]['Total-filled-block'] == '0':
+            return snapshot_list[0]['Partition-ID']
+        return
+
+    def _create_volume_from_snapshot_id(
+            self, dst_volume, raid_snapshot_id, src_part_id):
+        # create the target volume for volume copy
+        dst_volume_id = dst_volume['id'].replace('-', '')
+
+        self._create_partition_by_default(dst_volume)
+
+        dst_part_id = self._get_part_id(dst_volume_id)
+        # prepare return value
+        system_id = self._get_system_id(self.ip)
+        model_dict = {
+            'system_id': system_id,
+            'partition_id': dst_part_id,
+        }
+
+        model_info = self._concat_provider_location(model_dict)
+        model_update = {"provider_location": model_info}
+
+        if src_part_id:
+            # clone the volume from the origin partition
+            commands = (
+                'Cinder-Snapshot', 'part', src_part_id, 'part', dst_part_id
+            )
+            self._execute('CreateReplica', *commands)
+            self._wait_replica_complete(dst_part_id)
+
+        # clone the volume from the snapshot
+        commands = (
+            'Cinder-Snapshot', 'si', raid_snapshot_id, 'part', dst_part_id
+        )
+        self._execute('CreateReplica', *commands)
+        self._wait_replica_complete(dst_part_id)
+
+        return model_update
+
+    @lockutils.synchronized('connection', 'infortrend-', True)
+    def initialize_connection(self, volume, connector):
+        if self.protocol == 'iSCSI':
+            multipath = connector.get('multipath', False)
+            return self._initialize_connection_iscsi(
+                volume, connector, multipath)
+        elif self.protocol == 'FC':
+            return self._initialize_connection_fc(
+                volume, connector)
+        else:
+            msg = _('Unknown protocol: %(protocol)s.') % {
+                'protocol': self.protocol}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+    def _initialize_connection_fc(self, volume, connector):
+        self._init_map_info(True)
+        self._update_map_info(True)
+
+        map_lun, target_wwpns, initiator_target_map = (
+            self._do_fc_connection(volume, connector)
+        )
+
+        properties = self._generate_fc_connection_properties(
+            map_lun, target_wwpns, initiator_target_map)
+
+        LOG.info(_LI('Successfully initialized connection. '
+                     'target_wwn: %(target_wwn)s, '
+                     'initiator_target_map: %(initiator_target_map)s, '
+                     'lun: %(target_lun)s.'), properties['data'])
+        return properties
+
+    def _do_fc_connection(self, volume, connector):
+        volume_id = volume['id'].replace('-', '')
+        target_wwpns = []
+
+        partition_data = self._extract_all_provider_location(
+            volume['provider_location'])
+        part_id = partition_data['partition_id']
+
+        if part_id is None:
+            part_id = self._get_part_id(volume_id)
+
+        wwpn_list, wwpn_channel_info = self._get_wwpn_list()
+
+        initiator_target_map, target_wwpns = self._build_initiator_target_map(
+            connector, wwpn_list)
+
+        map_lun = self._get_common_lun_map_id(wwpn_channel_info)
+
+        for initiator_wwpn in initiator_target_map:
+            for target_wwpn in initiator_target_map[initiator_wwpn]:
+                channel_id = wwpn_channel_info[target_wwpn.upper()]['channel']
+                controller = wwpn_channel_info[target_wwpn.upper()]['slot']
+                self._create_map_with_lun_filter(
+                    part_id, channel_id, map_lun, initiator_wwpn,
+                    controller=controller)
+
+        return map_lun, target_wwpns, initiator_target_map
+
+    def _build_initiator_target_map(self, connector, all_target_wwpns):
+        initiator_target_map = {}
+        target_wwpns = []
+
+        if self.fc_lookup_service:
+            lookup_map = (
+                self.fc_lookup_service.get_device_mapping_from_network(
+                    connector['wwpns'], all_target_wwpns)
+            )
+            for fabric_name in lookup_map:
+                fabric = lookup_map[fabric_name]
+                target_wwpns.extend(fabric['target_port_wwn_list'])
+                for initiator in fabric['initiator_port_wwn_list']:
+                    initiator_target_map[initiator] = (
+                        fabric['target_port_wwn_list']
+                    )
+        else:
+            initiator_wwns = connector['wwpns']
+            target_wwpns = all_target_wwpns
+            for initiator in initiator_wwns:
+                initiator_target_map[initiator] = all_target_wwpns
+
+        return initiator_target_map, target_wwpns
+
+    def _generate_fc_connection_properties(
+            self, lun_id, target_wwpns, initiator_target_map):
+
+        return {
+            'driver_volume_type': 'fibre_channel',
+            'data': {
+                'target_discovered': True,
+                'target_lun': int(lun_id),
+                'target_wwn': target_wwpns,
+                'access_mode': 'rw',
+                'initiator_target_map': initiator_target_map,
+            },
+        }
+
+    @log_func
+    def _initialize_connection_iscsi(self, volume, connector, multipath):
+        self._init_map_info(multipath)
+        self._update_map_info(multipath)
+
+        volume_id = volume['id'].replace('-', '')
+
+        partition_data = self._extract_all_provider_location(
+            volume['provider_location'])  # system_id, part_id
+
+        part_id = partition_data['partition_id']
+
+        if part_id is None:
+            part_id = self._get_part_id(volume_id)
+
+        self._set_host_iqn(connector['initiator'])
+
+        map_chl, map_lun, mcs_id = self._get_mapping_info(multipath)
+
+        lun_id = map_lun[0]
+
+        if self.iscsi_multipath or multipath:
+            channel_id = self._create_map_with_mcs(
+                part_id, map_chl['slot_a'], lun_id, connector['initiator'])
+        else:
+            channel_id = map_chl['slot_a'][0]
+
+            self._create_map_with_lun_filter(
+                part_id, channel_id, lun_id, connector['initiator'])
+
+        rc, net_list = self._execute('ShowNet')
+        ip = self._get_ip_by_channel(channel_id, net_list)
+
+        if ip is None:
+            msg = _(
+                'Failed to get ip on Channel %(channel_id)s '
+                'with volume: %(volume_id)s.') % {
+                    'channel_id': channel_id, 'volume_id': volume_id}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+        partition_data = self._combine_channel_lun_target_id(
+            partition_data, mcs_id, lun_id, channel_id)
+
+        property_value = [{
+            'lun_id': partition_data['lun_id'],
+            'iqn': self._generate_iqn(partition_data),
+            'ip': ip,
+            'port': self.constants['ISCSI_PORT'],
+        }]
+
+        properties = self._generate_iscsi_connection_properties(
+            property_value, volume)
+        LOG.info(_LI('Successfully initialized connection '
+                     'with volume: %(volume_id)s.'), properties['data'])
+        return properties
+
+    @log_func
+    def _combine_channel_lun_target_id(
+            self, partition_data, mcs_id, lun_id, channel_id):
+
+        target_id = self.target_dict['slot_a'][channel_id]
+
+        partition_data['mcs_id'] = mcs_id
+        partition_data['lun_id'] = lun_id
+        partition_data['target_id'] = target_id
+        partition_data['slot_id'] = 1
+
+        return partition_data
+
+    def _set_host_iqn(self, host_iqn):
+
+        rc, iqn_list = self._execute('ShowIQN')
+
+        check_iqn_exist = False
+        for entry in iqn_list:
+            if entry['IQN'] == host_iqn:
+                check_iqn_exist = True
+
+        if not check_iqn_exist:
+            self._execute(
+                'CreateIQN', host_iqn, self._truncate_host_name(host_iqn))
+
+    def _truncate_host_name(self, iqn):
+        if len(iqn) > 16:
+            return iqn[-16:]
+        else:
+            return iqn
+
+    @log_func
+    def _generate_iqn(self, partition_data):
+        return self.iqn % (
+            partition_data['system_id'],
+            partition_data['mcs_id'],
+            partition_data['target_id'],
+            partition_data['slot_id'])
+
+    @log_func
+    def _get_ip_by_channel(
+            self, channel_id, net_list, controller='slot_a'):
+
+        slot_name = 'slotA' if controller == 'slot_a' else 'slotB'
+
+        for entry in net_list:
+            if entry['ID'] == channel_id and entry['Slot'] == slot_name:
+                return entry['IPv4']
+        return
+
+    def _get_wwpn_list(self):
+        rc, wwn_list = self._execute('ShowWWN')
+
+        wwpn_list = []
+        wwpn_channel_info = {}
+
+        for entry in wwn_list:
+            channel_id = entry['CH']
+            if 'BID:113' == entry['ID']:
+                slot_name = 'slot_b'
+            else:
+                slot_name = 'slot_a'
+
+            if channel_id in self.map_dict[slot_name]:
+                wwpn_list.append(entry['WWPN'])
+
+                wwpn_channel_info[entry['WWPN']] = {
+                    'channel': channel_id,
+                    'slot': slot_name,
+                }
+
+        return wwpn_list, wwpn_channel_info
+
+    @log_func
+    def _generate_iscsi_connection_properties(
+            self, property_value, volume):
+
+        properties = {}
+        discovery_exist = False
+
+        specific_property = property_value[0]
+
+        discovery_ip = '%s:%s' % (
+            specific_property['ip'], specific_property['port'])
+        discovery_iqn = specific_property['iqn']
+
+        if self._do_iscsi_discovery(discovery_iqn, discovery_ip):
+            properties['target_portal'] = discovery_ip
+            properties['target_iqn'] = discovery_iqn
+            properties['target_lun'] = int(specific_property['lun_id'])
+            discovery_exist = True
+
+        if not discovery_exist:
+            msg = _(
+                'Could not find iSCSI target '
+                'for volume: %(volume_id)s.') % {
+                    'volume_id': volume['id']}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+        properties['target_discovered'] = discovery_exist
+        properties['volume_id'] = volume['id']
+
+        if 'provider_auth' in volume:
+            auth = volume['provider_auth']
+            if auth:
+                (auth_method, auth_username, auth_secret) = auth.split()
+                properties['auth_method'] = auth_method
+                properties['auth_username'] = auth_username
+                properties['auth_password'] = auth_secret
+
+        return {
+            'driver_volume_type': 'iscsi',
+            'data': properties,
+        }
+
+    @log_func
+    def _do_iscsi_discovery(self, target_iqn, target_ip):
+        rc, out = self._execute(
+            'ExecuteCommand',
+            'iscsiadm', '-m', 'discovery',
+            '-t', 'sendtargets', '-p',
+            target_ip,
+            run_as_root=True)
+
+        if rc != 0:
+            LOG.error(_LE(
+                'Can not discovery in %(target_ip)s with %(target_iqn)s.'), {
+                    'target_ip': target_ip, 'target_iqn': target_iqn})
+            return False
+        else:
+            for target in out.splitlines():
+                if target_iqn in target and target_ip in target:
+                    return True
+        return False
+
+    def extend_volume(self, volume, new_size):
+        volume_id = volume['id'].replace('-', '')
+
+        part_id = self._extract_specific_provider_location(
+            volume['provider_location'], 'partition_id')
+
+        if part_id is None:
+            part_id = self._get_part_id(volume_id)
+
+        expand_size = new_size - volume['size']
+
+        if '.' in ('%s' % expand_size):
+            expand_size = round(gi_to_mi(float(expand_size)))
+            expand_command = 'size=%sMB' % expand_size
+        else:
+            expand_command = 'size=%sGB' % expand_size
+
+        self._execute('SetPartition', 'expand', part_id, expand_command)
+
+        LOG.info(_LI(
+            'Successfully extended volume %(volume_id)s to size %(size)s.'), {
+                'volume_id': volume['id'], 'size': new_size})
+
+    @lockutils.synchronized('connection', 'infortrend-', True)
+    def terminate_connection(self, volume, connector):
+        volume_id = volume['id'].replace('-', '')
+        multipath = connector.get('multipath', False)
+        conn_info = None
+
+        part_id = self._extract_specific_provider_location(
+            volume['provider_location'], 'partition_id')
+
+        if part_id is None:
+            part_id = self._get_part_id(volume_id)
+
+        self._execute('DeleteMap', 'part', part_id, '-y')
+
+        if self.protocol == 'iSCSI':
+            self._execute(
+                'DeleteIQN', self._truncate_host_name(connector['initiator']))
+        map_info = self._update_map_info(multipath)
+
+        if self.protocol == 'FC' and self.fc_lookup_service:
+            lun_map_exist = self._check_initiator_has_lun_map(
+                connector['wwpns'], map_info)
+
+            if not lun_map_exist:
+                conn_info = {'driver_volume_type': 'fibre_channel',
+                             'data': {}}
+                wwpn_list, wwpn_channel_info = self._get_wwpn_list()
+                init_target_map, target_wwpns = (
+                    self._build_initiator_target_map(connector, wwpn_list)
+                )
+                conn_info['data']['initiator_target_map'] = init_target_map
+
+        LOG.info(_LI(
+            'Successfully terminated connection for volume: %(volume_id)s.'), {
+                'volume_id': volume['id']})
+
+        return conn_info
+
+    def migrate_volume(self, volume, host, new_extraspecs=None):
+        is_valid, dst_pool_id = (
+            self._is_valid_for_storage_assisted_migration(host)
+        )
+        if not is_valid:
+            return (False, None)
+
+        model_dict = self._migrate_volume_with_pool(
+            volume, dst_pool_id, new_extraspecs)
+
+        model_update = {
+            "provider_location": self._concat_provider_location(model_dict),
+        }
+
+        LOG.info(_LI('Migrate Volume %(volume_id)s completed.'), {
+            'volume_id': volume['id']})
+
+        return (True, model_update)
+
+    def _is_valid_for_storage_assisted_migration(self, host):
+        if 'pool_id' not in host['capabilities']:
+            LOG.warning(_LW('Failed to get target pool id.'))
+            return (False, None)
+
+        dst_pool_id = host['capabilities']['pool_id']
+        if dst_pool_id is None:
+            return (False, None)
+
+        return (True, dst_pool_id)
+
+    def _migrate_volume_with_pool(self, volume, dst_pool_id, extraspecs=None):
+        volume_id = volume['id'].replace('-', '')
+
+        # Get old partition data for delete map
+        partition_data = self._extract_all_provider_location(
+            volume['provider_location'])
+
+        src_part_id = partition_data['partition_id']
+
+        if src_part_id is None:
+            src_part_id = self._get_part_id(volume_id)
+
+        # Create New Partition
+        self._create_partition_with_pool(volume, dst_pool_id, extraspecs)
+
+        dst_part_id = self._get_part_id(
+            volume_id, pool_id=dst_pool_id)
+
+        if dst_part_id is None:
+            msg = _('Failed to get new part id in new pool: %(pool_id)s.') % {
+                'pool_id': dst_pool_id}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+        # Volume Mirror from old partition into new partition
+        commands = (
+            'Cinder-Migrate', 'part', src_part_id, 'part', dst_part_id,
+            'type=mirror'
+        )
+        self._execute('CreateReplica', *commands)
+
+        self._wait_replica_complete(dst_part_id)
+
+        self._execute('DeleteMap', 'part', src_part_id, '-y')
+        self._execute('DeletePartition', src_part_id, '-y')
+
+        model_dict = {
+            'system_id': partition_data['system_id'],
+            'partition_id': dst_part_id,
+        }
+
+        return model_dict
+
+    def _wait_replica_complete(self, part_id):
+        start_time = int(time.time())
+        timeout = self._replica_timeout
+
+        def _inner():
+            check_done = False
+            try:
+                rc, replica_list = self._execute('ShowReplica', '-l')
+                for entry in replica_list:
+                    if (entry['Target'] == part_id and
+                            self._check_replica_completed(entry)):
+                        check_done = True
+                        self._execute('DeleteReplica', entry['Pair-ID'], '-y')
+            except Exception:
+                check_done = False
+                LOG.exception(_LE('Cannot detect replica status.'))
+
+            if check_done:
+                raise loopingcall.LoopingCallDone()
+
+            if int(time.time()) - start_time > timeout:
+                msg = _('Wait replica complete timeout.')
+                LOG.error(msg)
+                raise exception.VolumeDriverException(message=msg)
+
+        timer = loopingcall.FixedIntervalLoopingCall(_inner)
+        timer.start(interval=10).wait()
+
+    def _check_extraspec_value(self, extraspec, validvalues):
+        if not extraspec:
+            LOG.debug("The given extraspec is None.")
+        elif extraspec not in validvalues:
+            msg = _("The extraspec: %(extraspec)s is not valid.") % {
+                'extraspec': extraspec}
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+
+    def _get_enable_specs_on_array(self):
+        enable_specs = {}
+        rc, license_list = self._execute('ShowLicense')
+
+        for key, value in license_list.items():
+            if value['Support']:
+                enable_specs[key] = value
+
+        return enable_specs
+
+    def manage_existing_get_size(self, volume, ref):
+        """Return size of volume to be managed by manage_existing."""
+
+        volume_name = self._get_existing_volume_ref_name(ref)
+        part_entry = self._get_latter_volume_dict(volume_name)
+
+        rc, map_info = self._execute('ShowMap', 'part=%s' % part_entry['ID'])
+
+        if len(map_info) != 0:
+            msg = _('The specified volume is mapped to a host.')
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        return int(math.ceil(mi_to_gi(float(part_entry['Size']))))
+
+    def manage_existing(self, volume, ref):
+        volume_name = self._get_existing_volume_ref_name(ref)
+        volume_id = volume['id'].replace('-', '')
+
+        part_entry = self._get_latter_volume_dict(volume_name)
+
+        self._execute('SetPartition', part_entry['ID'], 'name=%s' % volume_id)
+
+        LOG.info(_LI('Rename Volume %(volume_id)s completed.'), {
+            'volume_id': volume['id']})
+
+    def _get_existing_volume_ref_name(self, ref):
+        volume_name = None
+        if 'source-name' in ref:
+            volume_name = ref['source-name']
+        elif 'source-id' in ref:
+            volume_name = self._get_unmanaged_volume_name(
+                ref['source-id'].replace('-', ''))
+        else:
+            msg = _('Reference must contain source-id or source-name.')
+            LOG.error(msg)
+            raise exception.ManageExistingInvalidReference(
+                existing_ref=ref, reason=msg)
+
+        return volume_name
+
+    def unmanage(self, volume):
+        volume_id = volume['id'].replace('-', '')
+        part_id = self._extract_specific_provider_location(
+            volume['provider_location'], 'partition_id')
+
+        if part_id is None:
+            part_id = self._get_part_id(volume_id)
+
+        new_vol_name = self._get_unmanaged_volume_name(volume_id)
+        self._execute('SetPartition', part_id, 'name=%s' % new_vol_name)
+
+        LOG.info(_LI('Unmanage volume %(volume_id)s completed.'), {
+            'volume_id': volume_id})
+
+    def _get_unmanaged_volume_name(self, volume_id):
+        return self.unmanaged_prefix % volume_id[:-17]
+
+    def _get_specific_volume_dict(self, volume_id):
+        ref_dict = {}
+        rc, part_list = self._execute('ShowPartition')
+
+        for entry in part_list:
+            if entry['Name'] == volume_id:
+                ref_dict = entry
+                break
+
+        return ref_dict
+
+    def _get_latter_volume_dict(self, volume_name):
+        rc, part_list = self._execute('ShowPartition', '-l')
+
+        latest_timestamps = 0
+        ref_dict = {}
+
+        for entry in part_list:
+            if entry['Name'] == volume_name:
+
+                timestamps = self._get_part_timestamps(
+                    entry['Creation-time'])
+
+                if timestamps > latest_timestamps:
+                    ref_dict = entry
+                    latest_timestamps = timestamps
+
+        return ref_dict
+
+    def _get_part_timestamps(self, time_string):
+        """Transform 'Sat, Jan 11 22:18:40 2020' into timestamps with sec."""
+
+        first, value = time_string.split(',')
+        timestamps = time.mktime(
+            time.strptime(value, " %b %d %H:%M:%S %Y"))
+
+        return timestamps
+
+    def _check_volume_attachment(self, volume):
+        if not volume['volume_attachment']:
+            return False
+        return True
+
+    def _check_volume_has_snapshot(self, volume):
+        part_id = self._extract_specific_provider_location(
+            volume['provider_location'], 'partition_id')
+
+        rc, snapshot_list = self._execute('ShowSnapshot', 'part=%s' % part_id)
+
+        if len(snapshot_list) > 0:
+            return True
+        return False
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Convert the volume to be of the new type."""
+
+        if volume['host'] != host['host']:
+            if self._check_volume_attachment(volume):
+                LOG.warning(_LW(
+                    'Volume %(volume_id)s cannot be retyped '
+                    'during attachment.'), {
+                        'volume_id': volume['id']})
+                return False
+
+            if self._check_volume_has_snapshot(volume):
+                LOG.warning(_LW(
+                    'Volume %(volume_id)s cannot be retyped '
+                    'because it has snapshot.'), {
+                        'volume_id': volume['id']})
+                return False
+
+            new_extraspecs = new_type['extra_specs']
+            rc, model_update = self.migrate_volume(
+                volume, host, new_extraspecs)
+
+            if rc:
+                LOG.info(_LI(
+                    'Retype Volume %(volume_id)s is done '
+                    'and migrated to pool %(pool_id)s.'), {
+                        'volume_id': volume['id'],
+                        'pool_id': host['capabilities']['pool_id']})
+
+            return (rc, model_update)
+        else:
+            if ('infortrend_provisioning' in diff['extra_specs'] and
+                    (diff['extra_specs']['infortrend_provisioning'][0] !=
+                        diff['extra_specs']['infortrend_provisioning'][1])):
+
+                LOG.warning(_LW(
+                    'The provisioning: %(provisioning)s '
+                    'is not valid.'), {
+                        'provisioning':
+                            diff['extra_specs']['infortrend_provisioning'][1]})
+                return False
+
+            LOG.info(_LI('Retype Volume %(volume_id)s is completed.'), {
+                'volume_id': volume['id']})
+            return True
+
+    def update_migrated_volume(self, ctxt, volume, new_volume):
+        """Return model update for migrated volume."""
+
+        src_volume_id = volume['id'].replace('-', '')
+        dst_volume_id = new_volume['id'].replace('-', '')
+        part_id = self._extract_specific_provider_location(
+            new_volume['provider_location'], 'partition_id')
+
+        if part_id is None:
+            part_id = self._get_part_id(dst_volume_id)
+
+        LOG.debug(
+            'Rename partition %(part_id)s '
+            'into new volume %(new_volume)s.', {
+                'part_id': part_id, 'new_volume': dst_volume_id})
+
+        self._execute('SetPartition', part_id, 'name=%s' % src_volume_id)
+
+        LOG.info(_LI('Update migrated volume %(new_volume)s completed.'), {
+            'new_volume': new_volume['id']})
+
+        model_update = {
+            'provider_location': new_volume['provider_location'],
+        }
+        return model_update
diff --git a/cinder/volume/drivers/infortrend/infortrend_fc_cli.py b/cinder/volume/drivers/infortrend/infortrend_fc_cli.py
new file mode 100644 (file)
index 0000000..42514a6
--- /dev/null
@@ -0,0 +1,277 @@
+# Copyright (c) 2015 Infortrend Technology, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Fibre Channel Driver for Infortrend Eonstor based on CLI.
+"""
+
+
+from oslo_log import log as logging
+
+from cinder.volume import driver
+from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli
+from cinder.zonemanager import utils as fczm_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class InfortrendCLIFCDriver(driver.FibreChannelDriver):
+
+    """Infortrend Fibre Channel Driver for Eonstor DS using CLI.
+
+    Version history:
+        1.0.0 - Initial driver
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(InfortrendCLIFCDriver, self).__init__(*args, **kwargs)
+        self.common = common_cli.InfortrendCommon(
+            'FC', configuration=self.configuration)
+        self.VERSION = self.common.VERSION
+
+    def check_for_setup_error(self):
+        LOG.debug('check_for_setup_error start')
+        self.common.check_for_setup_error()
+
+    def create_volume(self, volume):
+        """Creates a volume.
+
+        Can optionally return a Dictionary of changes
+        to the volume object to be persisted.
+        """
+        LOG.debug('create_volume volume id=%(volume_id)s', {
+            'volume_id': volume['id']})
+        return self.common.create_volume(volume)
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot."""
+        LOG.debug(
+            'create_volume_from_snapshot volume id=%(volume_id)s '
+            'snapshot id=%(snapshot_id)s', {
+                'volume_id': volume['id'], 'snapshot_id': snapshot['id']})
+        return self.common.create_volume_from_snapshot(volume, snapshot)
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Creates a clone of the specified volume."""
+        LOG.debug(
+            'create_cloned_volume volume id=%(volume_id)s '
+            'src_vref provider_location=%(provider_location)s', {
+                'volume_id': volume['id'],
+                'provider_location': src_vref['provider_location']})
+        return self.common.create_cloned_volume(volume, src_vref)
+
+    def extend_volume(self, volume, new_size):
+        """Extend a volume."""
+        LOG.debug(
+            'extend_volume volume id=%(volume_id)s new size=%(size)s', {
+                'volume_id': volume['id'], 'size': new_size})
+        self.common.extend_volume(volume, new_size)
+
+    def delete_volume(self, volume):
+        """Deletes a volume."""
+        LOG.debug('delete_volume volume id=%(volume_id)s', {
+            'volume_id': volume['id']})
+        return self.common.delete_volume(volume)
+
+    def migrate_volume(self, ctxt, volume, host):
+        """Migrate the volume to the specified host.
+
+        Returns a boolean indicating whether the migration occurred, as well as
+        model_update.
+
+        :param ctxt: Context
+        :param volume: A dictionary describing the volume to migrate
+        :param host: A dictionary describing the host to migrate to, where
+                     host['host'] is its name, and host['capabilities'] is a
+                     dictionary of its reported capabilities.
+        """
+        LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', {
+            'volume_id': volume['id'], 'host': host['host']})
+        return self.common.migrate_volume(volume, host)
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot."""
+        LOG.debug(
+            'create_snapshot snapshot id=%(snapshot_id)s '
+            'volume id=%(volume_id)s', {
+                'snapshot_id': snapshot['id'],
+                'volume_id': snapshot['volume_id']})
+        return self.common.create_snapshot(snapshot)
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+        LOG.debug(
+            'delete_snapshot snapshot id=%(snapshot_id)s '
+            'volume id=%(volume_id)s', {
+                'snapshot_id': snapshot['id'],
+                'volume_id': snapshot['volume_id']})
+        self.common.delete_snapshot(snapshot)
+
+    def ensure_export(self, context, volume):
+        """Synchronously recreates an export for a volume."""
+        pass
+
+    def create_export(self, context, volume):
+        """Exports the volume.
+
+        Can optionally return a Dictionary of changes
+        to the volume object to be persisted.
+        """
+        LOG.debug(
+            'create_export volume provider_location=%(provider_location)s', {
+                'provider_location': volume['provider_location']})
+        return self.common.create_export(context, volume)
+
+    def remove_export(self, context, volume):
+        """Removes an export for a volume."""
+        pass
+
+    @fczm_utils.AddFCZone
+    def initialize_connection(self, volume, connector):
+        """Initializes the connection and returns connection information.
+
+        Assign any created volume to a compute node/host so that it can be
+        used from that host.
+
+        The  driver returns a driver_volume_type of 'fibre_channel'.
+        The target_wwn can be a single entry or a list of wwns that
+        correspond to the list of remote wwn(s) that will export the volume.
+        The initiator_target_map is a map that represents the remote wwn(s)
+        and a list of wwns which are visible to the remote wwn(s).
+        Example return values:
+
+            {
+                'driver_volume_type': 'fibre_channel'
+                'data': {
+                    'target_discovered': True,
+                    'target_lun': 1,
+                    'target_wwn': '1234567890123',
+                    'access_mode': 'rw'
+                    'initiator_target_map': {
+                        '1122334455667788': ['1234567890123']
+                    }
+                }
+            }
+
+            or
+
+             {
+                'driver_volume_type': 'fibre_channel'
+                'data': {
+                    'target_discovered': True,
+                    'target_lun': 1,
+                    'target_wwn': ['1234567890123', '0987654321321'],
+                    'access_mode': 'rw'
+                    'initiator_target_map': {
+                        '1122334455667788': ['1234567890123',
+                                             '0987654321321']
+                    }
+                }
+            }
+        """
+        LOG.debug(
+            'initialize_connection volume id=%(volume_id)s '
+            'connector initiator=%(initiator)s', {
+                'volume_id': volume['id'],
+                'initiator': connector['initiator']})
+        return self.common.initialize_connection(volume, connector)
+
+    @fczm_utils.RemoveFCZone
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Disallow connection from connector."""
+        LOG.debug('terminate_connection volume id=%(volume_id)s', {
+            'volume_id': volume['id']})
+        return self.common.terminate_connection(volume, connector)
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume stats.
+
+        If 'refresh' is True, run update the stats first.
+        """
+        LOG.debug('get_volume_stats refresh=%(refresh)s', {
+            'refresh': refresh})
+        return self.common.get_volume_stats(refresh)
+
+    def manage_existing(self, volume, existing_ref):
+        """Manage an existing lun in the array.
+
+        The lun should be in a manageable pool backend, otherwise
+        error would return.
+        Rename the backend storage object so that it matches the,
+        volume['name'] which is how drivers traditionally map between a
+        cinder volume and the associated backend storage object.
+
+        existing_ref:{
+            'id':lun_id
+        }
+        """
+        LOG.debug(
+            'manage_existing volume id=%(volume_id)s '
+            'existing_ref source id=%(source_id)s', {
+                'volume_id': volume['id'],
+                'source_id': existing_ref['source-id']})
+        return self.common.manage_existing(volume, existing_ref)
+
+    def unmanage(self, volume):
+        """Removes the specified volume from Cinder management.
+
+        Does not delete the underlying backend storage object.
+
+        :param volume: Cinder volume to unmanage
+        """
+        LOG.debug('unmanage volume id=%(volume_id)s', {
+            'volume_id': volume['id']})
+        self.common.unmanage(volume)
+
+    def manage_existing_get_size(self, volume, existing_ref):
+        """Return size of volume to be managed by manage_existing.
+
+        When calculating the size, round up to the next GB.
+        """
+        LOG.debug(
+            'manage_existing_get_size volume id=%(volume_id)s '
+            'existing_ref source id=%(source_id)s', {
+                'volume_id': volume['id'],
+                'source_id': existing_ref['source-id']})
+        return self.common.manage_existing_get_size(volume, existing_ref)
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Convert the volume to be of the new type.
+
+        :param ctxt: Context
+        :param volume: A dictionary describing the volume to migrate
+        :param new_type: A dictionary describing the volume type to convert to
+        :param diff: A dictionary with the difference between the two types
+        :param host: A dictionary describing the host to migrate to, where
+                     host['host'] is its name, and host['capabilities'] is a
+                     dictionary of its reported capabilities.
+        """
+        LOG.debug(
+            'retype volume id=%(volume_id)s new_type id=%(type_id)s', {
+                'volume_id': volume['id'], 'type_id': new_type['id']})
+        return self.common.retype(ctxt, volume, new_type, diff, host)
+
+    def update_migrated_volume(self, ctxt, volume, new_volume):
+        """Return model update for migrated volume.
+
+        :param volume: The original volume that was migrated to this backend
+        :param new_volume: The migration volume object that was created on
+                           this backend as part of the migration process
+        :return model_update to update DB with any needed changes
+        """
+        LOG.debug(
+            'update migrated volume original volume id= %(volume_id)s '
+            'new volume id=%(new_volume_id)s', {
+                'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
+        return self.common.update_migrated_volume(ctxt, volume, new_volume)
diff --git a/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py b/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py
new file mode 100644 (file)
index 0000000..6c86228
--- /dev/null
@@ -0,0 +1,249 @@
+# Copyright (c) 2015 Infortrend Technology, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+iSCSI Driver for Infortrend Eonstor based on CLI.
+"""
+
+from oslo_log import log as logging
+
+from cinder.volume import driver
+from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli
+
+LOG = logging.getLogger(__name__)
+
+
+class InfortrendCLIISCSIDriver(driver.ISCSIDriver):
+
+    """Infortrend iSCSI Driver for Eonstor DS using CLI.
+
+    Version history:
+        1.0.0 - Initial driver
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs)
+        self.common = common_cli.InfortrendCommon(
+            'iSCSI', configuration=self.configuration)
+        self.VERSION = self.common.VERSION
+
+    def check_for_setup_error(self):
+        LOG.debug('check_for_setup_error start')
+        self.common.check_for_setup_error()
+
+    def create_volume(self, volume):
+        """Creates a volume.
+
+        Can optionally return a Dictionary of changes
+        to the volume object to be persisted.
+        """
+        LOG.debug('create_volume volume id=%(volume_id)s', {
+            'volume_id': volume['id']})
+        return self.common.create_volume(volume)
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot."""
+        LOG.debug(
+            'create_volume_from_snapshot volume id=%(volume_id)s '
+            'snapshot id=%(snapshot_id)s', {
+                'volume_id': volume['id'], 'snapshot_id': snapshot['id']})
+        return self.common.create_volume_from_snapshot(volume, snapshot)
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Creates a clone of the specified volume."""
+        LOG.debug(
+            'create_cloned_volume volume id=%(volume_id)s '
+            'src_vref provider_location=%(provider_location)s', {
+                'volume_id': volume['id'],
+                'provider_location': src_vref['provider_location']})
+        return self.common.create_cloned_volume(volume, src_vref)
+
+    def extend_volume(self, volume, new_size):
+        """Extend a volume."""
+        LOG.debug(
+            'extend_volume volume id=%(volume_id)s new size=%(size)s', {
+                'volume_id': volume['id'], 'size': new_size})
+        self.common.extend_volume(volume, new_size)
+
+    def delete_volume(self, volume):
+        """Deletes a volume."""
+        LOG.debug('delete_volume volume id=%(volume_id)s', {
+            'volume_id': volume['id']})
+        return self.common.delete_volume(volume)
+
+    def migrate_volume(self, ctxt, volume, host):
+        """Migrate the volume to the specified host.
+
+        Returns a boolean indicating whether the migration occurred, as well as
+        model_update.
+
+        :param ctxt: Context
+        :param volume: A dictionary describing the volume to migrate
+        :param host: A dictionary describing the host to migrate to, where
+                     host['host'] is its name, and host['capabilities'] is a
+                     dictionary of its reported capabilities.
+        """
+        LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', {
+            'volume_id': volume['id'], 'host': host['host']})
+        return self.common.migrate_volume(volume, host)
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot."""
+        LOG.debug(
+            'create_snapshot snapshot id=%(snapshot_id)s '
+            'volume_id=%(volume_id)s', {
+                'snapshot_id': snapshot['id'],
+                'volume_id': snapshot['volume_id']})
+        return self.common.create_snapshot(snapshot)
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+        LOG.debug(
+            'delete_snapshot snapshot id=%(snapshot_id)s '
+            'volume_id=%(volume_id)s', {
+                'snapshot_id': snapshot['id'],
+                'volume_id': snapshot['volume_id']})
+        self.common.delete_snapshot(snapshot)
+
+    def ensure_export(self, context, volume):
+        """Synchronously recreates an export for a volume."""
+        pass
+
+    def create_export(self, context, volume):
+        """Exports the volume.
+
+        Can optionally return a Dictionary of changes
+        to the volume object to be persisted.
+        """
+        LOG.debug(
+            'create_export volume provider_location=%(provider_location)s', {
+                'provider_location': volume['provider_location']})
+        return self.common.create_export(context, volume)
+
+    def remove_export(self, context, volume):
+        """Removes an export for a volume."""
+        pass
+
+    def initialize_connection(self, volume, connector):
+        """Initializes the connection and returns connection information.
+
+        The iscsi driver returns a driver_volume_type of 'iscsi'.
+        The format of the driver data is defined in _get_iscsi_properties.
+        Example return value::
+
+            {
+                'driver_volume_type': 'iscsi'
+                'data': {
+                    'target_discovered': True,
+                    'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
+                    'target_portal': '127.0.0.0.1:3260',
+                    'volume_id': 1,
+                    'access_mode': 'rw'
+                }
+            }
+        """
+        LOG.debug(
+            'initialize_connection volume id=%(volume_id)s '
+            'connector initiator=%(initiator)s', {
+                'volume_id': volume['id'],
+                'initiator': connector['initiator']})
+        return self.common.initialize_connection(volume, connector)
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Disallow connection from connector."""
+        LOG.debug('terminate_connection volume id=%(volume_id)s', {
+            'volume_id': volume['id']})
+        self.common.terminate_connection(volume, connector)
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume stats.
+
+        If 'refresh' is True, run update the stats first.
+        """
+        LOG.debug('get_volume_stats refresh=%(refresh)s', {
+            'refresh': refresh})
+        return self.common.get_volume_stats(refresh)
+
+    def manage_existing(self, volume, existing_ref):
+        """Manage an existing lun in the array.
+
+        The lun should be in a manageable pool backend, otherwise
+        error would return.
+        Rename the backend storage object so that it matches the,
+        volume['name'] which is how drivers traditionally map between a
+        cinder volume and the associated backend storage object.
+
+        existing_ref:{
+            'id':lun_id
+        }
+        """
+        LOG.debug(
+            'manage_existing volume id=%(volume_id)s '
+            'existing_ref source id=%(source_id)s', {
+                'volume_id': volume['id'],
+                'source_id': existing_ref['source-id']})
+        return self.common.manage_existing(volume, existing_ref)
+
+    def unmanage(self, volume):
+        """Removes the specified volume from Cinder management.
+
+        Does not delete the underlying backend storage object.
+
+        :param volume: Cinder volume to unmanage
+        """
+        LOG.debug('unmanage volume id=%(volume_id)s', {
+            'volume_id': volume['id']})
+        self.common.unmanage(volume)
+
+    def manage_existing_get_size(self, volume, existing_ref):
+        """Return size of volume to be managed by manage_existing.
+
+        When calculating the size, round up to the next GB.
+        """
+        LOG.debug(
+            'manage_existing_get_size volume id=%(volume_id)s '
+            'existing_ref source id=%(source_id)s', {
+                'volume_id': volume['id'],
+                'source_id': existing_ref['source-id']})
+        return self.common.manage_existing_get_size(volume, existing_ref)
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Convert the volume to be of the new type.
+
+        :param ctxt: Context
+        :param volume: A dictionary describing the volume to migrate
+        :param new_type: A dictionary describing the volume type to convert to
+        :param diff: A dictionary with the difference between the two types
+        :param host: A dictionary describing the host to migrate to, where
+                     host['host'] is its name, and host['capabilities'] is a
+                     dictionary of its reported capabilities.
+        """
+        LOG.debug(
+            'retype volume id=%(volume_id)s new_type id=%(type_id)s', {
+                'volume_id': volume['id'], 'type_id': new_type['id']})
+        return self.common.retype(ctxt, volume, new_type, diff, host)
+
+    def update_migrated_volume(self, ctxt, volume, new_volume):
+        """Return model update for migrated volume.
+
+        :param volume: The original volume that was migrated to this backend
+        :param new_volume: The migration volume object that was created on
+                           this backend as part of the migration process
+        :return model_update to update DB with any needed changes
+        """
+        LOG.debug(
+            'update migrated volume original volume id= %(volume_id)s '
+            'new volume id=%(new_volume_id)s', {
+                'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
+        return self.common.update_migrated_volume(ctxt, volume, new_volume)