]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Introduce Hitachi storage volume driver
authorSeiji Aguchi <seiji.aguchi.tr@hitachi.com>
Fri, 29 Aug 2014 02:47:35 +0000 (11:47 +0900)
committerSeiji Aguchi <seiji.aguchi.tr@hitachi.com>
Fri, 29 Aug 2014 03:00:01 +0000 (12:00 +0900)
This patch introduces Hitachi storage volume driver.

Implements: blueprint hitachi-block-storage-driver

Certification test result for FC:
https://bugs.launchpad.net/cinder/+bug/1336661/+attachment/4189194/+files/FC%20tmp.pTAkWV3eWb

Certification test result for iSCSI:
https://bugs.launchpad.net/cinder/+bug/1336661/+attachment/4189195/+files/iSCSI%20tmp.1Q7C1rkzTY

Change-Id: Ie9b5df6d223b47d176c4e80fcf7e110543ce1d37
Signed-off-by: Seiji Aguchi <seiji.aguchi.tr@hitachi.com>
13 files changed:
cinder/exception.py
cinder/tests/test_hitachi_hbsd_horcm_fc.py [new file with mode: 0644]
cinder/tests/test_hitachi_hbsd_snm2_fc.py [new file with mode: 0644]
cinder/tests/test_hitachi_hbsd_snm2_iscsi.py [new file with mode: 0644]
cinder/volume/drivers/hitachi/__init__.py [new file with mode: 0644]
cinder/volume/drivers/hitachi/hbsd_basiclib.py [new file with mode: 0644]
cinder/volume/drivers/hitachi/hbsd_common.py [new file with mode: 0644]
cinder/volume/drivers/hitachi/hbsd_fc.py [new file with mode: 0644]
cinder/volume/drivers/hitachi/hbsd_horcm.py [new file with mode: 0644]
cinder/volume/drivers/hitachi/hbsd_iscsi.py [new file with mode: 0644]
cinder/volume/drivers/hitachi/hbsd_snm2.py [new file with mode: 0644]
etc/cinder/cinder.conf.sample
etc/cinder/rootwrap.d/volume.filters

index c921e9f4c02a946d470e559dc763a2321cdaa193..bba8210c1501d12523341b0623dcaa71357e4a4f 100644 (file)
@@ -813,3 +813,25 @@ class CgSnapshotNotFound(NotFound):
 
 class InvalidCgSnapshot(Invalid):
     message = _("Invalid CgSnapshot: %(reason)s")
+
+
+# Hitachi Block Storage Driver
+class HBSDError(CinderException):
+    message = _("HBSD error occurs.")
+
+
+class HBSDCmdError(HBSDError):
+
+    def __init__(self, message=None, ret=None, err=None):
+        self.ret = ret
+        self.stderr = err
+
+        super(HBSDCmdError, self).__init__(message=message)
+
+
+class HBSDBusy(HBSDError):
+    message = "Device or resource is busy."
+
+
+class HBSDNotFound(NotFound):
+    message = _("Storage resource could not be found.")
diff --git a/cinder/tests/test_hitachi_hbsd_horcm_fc.py b/cinder/tests/test_hitachi_hbsd_horcm_fc.py
new file mode 100644 (file)
index 0000000..1cb2c21
--- /dev/null
@@ -0,0 +1,670 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""
+Self test for Hitachi Block Storage Driver
+"""
+
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder import utils
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.hitachi import hbsd_basiclib
+from cinder.volume.drivers.hitachi import hbsd_common
+from cinder.volume.drivers.hitachi import hbsd_fc
+from cinder.volume.drivers.hitachi import hbsd_horcm
+
+
+def _exec_raidcom(*args, **kargs):
+    return HBSDHORCMFCDriverTest.horcm_vals.get(args)
+
+
+class HBSDHORCMFCDriverTest(test.TestCase):
+    """Test HBSDHORCMFCDriver."""
+
+    raidqry_result = "DUMMY\n\
+Ver&Rev: 01-31-03/06"
+
+    raidcom_get_host_grp_result = "DUMMY\n\
+CL1-A 0 HBSD-127.0.0.1 None -\n\
+CL1-A 1 - None -"
+
+    raidcom_get_result = "LDEV : 0\n\
+VOL_TYPE : OPEN-V-CVS\n\
+LDEV : 1\n\
+VOL_TYPE : NOT DEFINED"
+
+    raidcom_get_result2 = "DUMMY\n\
+LDEV : 1\n\
+DUMMY\n\
+DUMMY\n\
+VOL_TYPE : OPEN-V-CVS"
+
+    raidcom_get_result3 = "Serial#  : 210944\n\
+LDEV : 0\n\
+SL : 0\n\
+CL : 0\n\
+VOL_TYPE : NOT DEFINED\n\
+VOL_Capacity(BLK) : 2098560\n\
+NUM_LDEV : 1\n\
+LDEVs : 0\n\
+NUM_PORT : 3\n\
+PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\
+F_POOLID : NONE\n\
+VOL_ATTR : CVS\n\
+RAID_LEVEL  : RAID5\n\
+RAID_TYPE   : 3D+1P\n\
+NUM_GROUP : 1\n\
+RAID_GROUPs : 01-01\n\
+DRIVE_TYPE  : DKR5C-J600SS\n\
+DRIVE_Capa : 1143358736\n\
+LDEV_NAMING : test\n\
+STS : NML\n\
+OPE_TYPE : NONE\n\
+OPE_RATE : 100\n\
+MP# : 0\n\
+SSID : 0004"
+
+    raidcom_get_command_status_result = "HANDLE   SSB1    SSB2    ERR_CNT\
+        Serial#     Description\n\
+00d4        -       -          0         210944     -"
+
+    raidcom_get_result4 = "Serial#  : 210944\n\
+LDEV : 0\n\
+SL : 0\n\
+CL : 0\n\
+VOL_TYPE : DEFINED\n\
+VOL_Capacity(BLK) : 2098560\n\
+NUM_LDEV : 1\n\
+LDEVs : 0\n\
+NUM_PORT : 3\n\
+PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\
+F_POOLID : NONE\n\
+VOL_ATTR : CVS\n\
+RAID_LEVEL  : RAID5\n\
+RAID_TYPE   : 3D+1P\n\
+NUM_GROUP : 1\n\
+RAID_GROUPs : 01-01\n\
+DRIVE_TYPE  : DKR5C-J600SS\n\
+DRIVE_Capa : 1143358736\n\
+LDEV_NAMING : test\n\
+STS : NML\n\
+OPE_TYPE : NONE\n\
+OPE_RATE : 100\n\
+MP# : 0\n\
+SSID : 0004"
+
+    raidcom_get_copy_grp_result = "DUMMY\n\
+HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31P - - None\n\
+HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31S - - None"
+
+    raidcom_get_device_grp_result1 = "DUMMY\n\
+HBSD-127.0.0.1None1A31P HBSD-ldev-0-2 0 None"
+
+    raidcom_get_device_grp_result2 = "DUMMY\n\
+HBSD-127.0.0.1None1A31S HBSD-ldev-0-2 2 None"
+
+    raidcom_get_snapshot_result = "DUMMY\n\
+HBSD-sanp P-VOL PSUS None 0 3 3 18 100 G--- 53ee291f\n\
+HBSD-sanp P-VOL PSUS None 0 4 4 18 100 G--- 53ee291f"
+
+    raidcom_dp_pool_result = "DUMMY \n\
+030  POLN   0        6006        6006   75   80    1 14860    32     167477"
+
+    raidcom_port_result = "DUMMY\n\
+CL1-A  FIBRE TAR AUT 01 Y PtoP Y 0 None 50060E801053C2E0 -"
+
+    raidcom_port_result2 = "DUMMY\n\
+CL1-A 12345678912345aa None -\n\
+CL1-A 12345678912345bb None -"
+
+    raidcom_host_grp_result = "DUMMY\n\
+CL1-A 0 HBSD-127.0.0.1 None LINUX/IRIX"
+
+    raidcom_hba_wwn_result = "DUMMY\n\
+CL1-A 0 HBSD-127.0.0.1 12345678912345aa None -"
+
+    raidcom_get_lun_result = "DUMMY\n\
+CL1-A 0 LINUX/IRIX 254 1 5 - None"
+
+    pairdisplay_result = "DUMMY\n\
+HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 L CL1-A-0 0 0 0 None 0 P-VOL PSUS None 2\
+ -\n\
+HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 R CL1-A-0 0 0 0 None 2 S-VOL SSUS - 0 -"
+
+    pairdisplay_result2 = "DUMMY\n\
+HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 L CL1-A-1 0 0 0 None 1 P-VOL PAIR None 1\
+ -\n\
+HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 R CL1-A-1 0 0 0 None 1 S-VOL PAIR - 1 -"
+
+    horcm_vals = {
+        ('raidqry', u'-h'):
+        [0, "%s" % raidqry_result, ""],
+        ('raidcom', '-login user pasword'):
+        [0, "", ""],
+        ('raidcom', u'get host_grp -port CL1-A -key host_grp'):
+        [0, "%s" % raidcom_get_host_grp_result, ""],
+        ('raidcom', u'add host_grp -port CL1-A-1 -host_grp_name HBSD-pair00'):
+        [0, "", ""],
+        ('raidcom',
+         u'add host_grp -port CL1-A-1 -host_grp_name HBSD-127.0.0.2'):
+        [0, "", ""],
+        ('raidcom', u'delete host_grp -port CL1-A-1 HBSD-127.0.0.2'):
+        [1, "", ""],
+        ('raidcom', 'get ldev -ldev_id 0 -cnt 2'):
+        [0, "%s" % raidcom_get_result, ""],
+        ('raidcom', 'lock resource'):
+        [0, "", ""],
+        ('raidcom',
+         'add ldev -pool 30 -ldev_id 1 -capacity 128G -emulation OPEN-V'):
+        [0, "", ""],
+        ('raidcom',
+         'add ldev -pool 30 -ldev_id 1 -capacity 256G -emulation OPEN-V'):
+        [1, "", "SSB=0x2E22,0x0001"],
+        ('raidcom', 'get command_status'):
+        [0, "%s" % raidcom_get_command_status_result, ""],
+        ('raidcom', 'get ldev -ldev_id 1'):
+        [0, "%s" % raidcom_get_result2, ""],
+        ('raidcom', 'get ldev -ldev_id 1 -check_status NML -time 120'):
+        [0, "", ""],
+        ('raidcom', 'get snapshot -ldev_id 0'):
+        [0, "", ""],
+        ('raidcom', 'get snapshot -ldev_id 1'):
+        [0, "%s" % raidcom_get_snapshot_result, ""],
+        ('raidcom', 'get snapshot -ldev_id 2'):
+        [0, "", ""],
+        ('raidcom', 'get snapshot -ldev_id 3'):
+        [0, "", ""],
+        ('raidcom', 'get copy_grp'):
+        [0, "%s" % raidcom_get_copy_grp_result, ""],
+        ('raidcom', 'delete ldev -ldev_id 0'):
+        [0, "", ""],
+        ('raidcom', 'delete ldev -ldev_id 1'):
+        [0, "", ""],
+        ('raidcom', 'delete ldev -ldev_id 2'):
+        [1, "", "error"],
+        ('raidcom', 'delete ldev -ldev_id 3'):
+        [1, "", "SSB=0x2E20,0x0000"],
+        ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30P'):
+        [0, "", ""],
+        ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30S'):
+        [0, "", ""],
+        ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31P'):
+        [0, "%s" % raidcom_get_device_grp_result1, ""],
+        ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31S'):
+        [0, "%s" % raidcom_get_device_grp_result2, ""],
+        ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -CLI'):
+        [0, "", ""],
+        ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -CLI'):
+        [0, "", ""],
+        ('pairdisplay', '-g HBSD-127.0.0.1None1A31 -CLI'):
+        [0, "%s" % pairdisplay_result, ""],
+        ('pairdisplay', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -CLI'):
+        [0, "%s" % pairdisplay_result, ""],
+        ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -CLI'):
+        [0, "%s" % pairdisplay_result2, ""],
+        ('raidcom',
+         'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \
+HBSD-ldev-0-1 -ldev_id 0'):
+        [0, "", ""],
+        ('raidcom',
+         'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \
+HBSD-ldev-0-1 -ldev_id 1'):
+        [0, "", ""],
+        ('raidcom',
+         'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \
+HBSD-ldev-1-1 -ldev_id 1'):
+        [0, "", ""],
+        ('raidcom',
+         'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \
+HBSD-ldev-1-1 -ldev_id 1'):
+        [0, "", ""],
+        ('raidcom',
+         'add copy_grp -copy_grp_name HBSD-127.0.0.1None1A30 \
+HBSD-127.0.0.1None1A30P HBSD-127.0.0.1None1A30S -mirror_id 0'):
+        [0, "", ""],
+        ('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 \
+-split -fq quick -c 3 -vl'):
+        [0, "", ""],
+        ('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 \
+-split -fq quick -c 3 -vl'):
+        [0, "", ""],
+        ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowait'):
+        [4, "", ""],
+        ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowaits'):
+        [4, "", ""],
+        ('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowait'):
+        [1, "", ""],
+        ('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowaits'):
+        [1, "", ""],
+        ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowait'):
+        [4, "", ""],
+        ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowaits'):
+        [200, "", ""],
+        ('pairsplit', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -S'):
+        [0, "", ""],
+        ('raidcom', 'extend ldev -ldev_id 0 -capacity 128G'):
+        [0, "", ""],
+        ('raidcom', 'get dp_pool'):
+        [0, "%s" % raidcom_dp_pool_result, ""],
+        ('raidcom', 'get port'):
+        [0, "%s" % raidcom_port_result, ""],
+        ('raidcom', 'get port -port CL1-A'):
+        [0, "%s" % raidcom_port_result2, ""],
+        ('raidcom', 'get host_grp -port CL1-A'):
+        [0, "%s" % raidcom_host_grp_result, ""],
+        ('raidcom', 'get hba_wwn -port CL1-A-0'):
+        [0, "%s" % raidcom_hba_wwn_result, ""],
+        ('raidcom', 'get hba_wwn -port CL1-A-1'):
+        [0, "", ""],
+        ('raidcom', 'add hba_wwn -port CL1-A-0 -hba_wwn 12345678912345bb'):
+        [0, "", ""],
+        ('raidcom', 'add hba_wwn -port CL1-A-1 -hba_wwn 12345678912345bb'):
+        [1, "", ""],
+        ('raidcom', u'get lun -port CL1-A-0'):
+        [0, "%s" % raidcom_get_lun_result, ""],
+        ('raidcom', u'get lun -port CL1-A-1'):
+        [0, "", ""],
+        ('raidcom', u'add lun -port CL1-A-0 -ldev_id 0 -lun_id 0'):
+        [0, "", ""],
+        ('raidcom', u'add lun -port CL1-A-0 -ldev_id 1 -lun_id 0'):
+        [0, "", ""],
+        ('raidcom', u'add lun -port CL1-A-1 -ldev_id 0 -lun_id 0'):
+        [0, "", ""],
+        ('raidcom', u'add lun -port CL1-A-1 -ldev_id 1 -lun_id 0'):
+        [0, "", ""],
+        ('raidcom', u'delete lun -port CL1-A-0 -ldev_id 0'):
+        [0, "", ""],
+        ('raidcom', u'delete lun -port CL1-A-0 -ldev_id 1'):
+        [0, "", ""],
+        ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 0'):
+        [0, "", ""],
+        ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 2'):
+        [0, "", ""],
+        ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 1'):
+        [1, "", ""]}
+
+# The following information is passed on to tests, when creating a volume
+
+    _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
+               'provider_location': '0', 'name': 'test',
+               'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
+
+    test_volume = {'name': 'test_volume', 'size': 128,
+                   'id': 'test-volume',
+                   'provider_location': '1', 'status': 'available'}
+
+    test_volume_error = {'name': 'test_volume', 'size': 256,
+                         'id': 'test-volume',
+                         'status': 'creating'}
+
+    test_volume_error2 = {'name': 'test_volume2', 'size': 128,
+                          'id': 'test-volume2',
+                          'provider_location': '1', 'status': 'available'}
+
+    test_volume_error3 = {'name': 'test_volume3', 'size': 128,
+                          'id': 'test-volume3',
+                          'volume_metadata': [{'key': 'type',
+                                               'value': 'V-VOL'}],
+                          'provider_location': '1', 'status': 'available'}
+
+    test_volume_error4 = {'name': 'test_volume4', 'size': 128,
+                          'id': 'test-volume2',
+                          'provider_location': '3', 'status': 'available'}
+
+    test_volume_error5 = {'name': 'test_volume', 'size': 256,
+                          'id': 'test-volume',
+                          'provider_location': '1', 'status': 'available'}
+
+    test_snapshot = {'volume_name': 'test', 'size': 128,
+                     'volume_size': 128, 'name': 'test-snap',
+                     'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
+                     'provider_location': '0', 'status': 'available'}
+
+    test_snapshot_error = {'volume_name': 'test', 'size': 128,
+                           'volume_size': 128, 'name': 'test-snap',
+                           'volume_id': 0, 'id': 'test-snap-0',
+                           'volume': _VOLUME,
+                           'provider_location': '2', 'status': 'available'}
+
+    test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
+                            'volume_size': 128, 'name': 'test-snap',
+                            'volume_id': 0, 'id': 'test-snap-0',
+                            'volume': _VOLUME,
+                            'provider_location': '1', 'status': 'available'}
+
+    def __init__(self, *args, **kwargs):
+        super(HBSDHORCMFCDriverTest, self).__init__(*args, **kwargs)
+
+    @mock.patch.object(utils, 'brick_get_connector_properties',
+                       return_value={'ip': '127.0.0.1',
+                                     'wwpns': ['12345678912345aa']})
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(utils, 'execute',
+                       return_value=['%s' % raidqry_result, ''])
+    def setUp(self, arg1, arg2, arg3, arg4):
+        super(HBSDHORCMFCDriverTest, self).setUp()
+        self._setup_config()
+        self._setup_driver()
+        self.driver.check_param()
+        self.driver.common.pair_flock = hbsd_basiclib.NopLock()
+        self.driver.common.command.horcmgr_flock = hbsd_basiclib.NopLock()
+        self.driver.common.create_lock_file()
+        self.driver.common.command.connect_storage()
+        self.driver.max_hostgroups = \
+            self.driver.common.command.get_max_hostgroups()
+        self.driver.add_hostgroup()
+        self.driver.output_param_to_log()
+        self.driver.do_setup_status.set()
+
+    def _setup_config(self):
+        self.configuration = mock.Mock(conf.Configuration)
+        self.configuration.hitachi_pool_id = 30
+        self.configuration.hitachi_thin_pool_id = 31
+        self.configuration.hitachi_target_ports = "CL1-A"
+        self.configuration.hitachi_debug_level = 0
+        self.configuration.hitachi_serial_number = "None"
+        self.configuration.hitachi_unit_name = None
+        self.configuration.hitachi_group_request = True
+        self.configuration.hitachi_group_range = None
+        self.configuration.hitachi_zoning_request = False
+        self.configuration.config_group = "None"
+        self.configuration.hitachi_ldev_range = "0-1"
+        self.configuration.hitachi_default_copy_method = 'FULL'
+        self.configuration.hitachi_copy_check_interval = 1
+        self.configuration.hitachi_async_copy_check_interval = 1
+        self.configuration.hitachi_copy_speed = 3
+        self.configuration.hitachi_horcm_add_conf = True
+        self.configuration.hitachi_horcm_numbers = "409,419"
+        self.configuration.hitachi_horcm_user = "user"
+        self.configuration.hitachi_horcm_password = "pasword"
+
+    def _setup_driver(self):
+        self.driver = hbsd_fc.HBSDFCDriver(
+            configuration=self.configuration)
+        context = None
+        db = None
+        self.driver.common = hbsd_common.HBSDCommon(
+            self.configuration, self.driver, context, db)
+
+# API test cases
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    def test_create_volume(self, arg1, arg2, arg3):
+        """test create_volume."""
+        ret = self.driver.create_volume(self._VOLUME)
+        vol = self._VOLUME.copy()
+        vol['provider_location'] = ret['provider_location']
+        self.assertEqual(vol['provider_location'], '1')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    def test_create_volume_error(self, arg1, arg2, arg3):
+        """test create_volume."""
+        self.assertRaises(exception.HBSDError, self.driver.create_volume,
+                          self.test_volume_error)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    def test_get_volume_stats(self, arg1, arg2):
+        """test get_volume_stats."""
+        stats = self.driver.get_volume_stats(True)
+        self.assertEqual(stats['vendor_name'], 'Hitachi')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    def test_get_volume_stats_error(self, arg1, arg2):
+        """test get_volume_stats."""
+        self.configuration.hitachi_pool_id = 29
+        stats = self.driver.get_volume_stats(True)
+        self.assertEqual(stats, {})
+        self.configuration.hitachi_pool_id = 30
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_extend_volume(self, arg1, arg2, arg3, arg4):
+        """test extend_volume."""
+        self.driver.extend_volume(self._VOLUME, 256)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_extend_volume_error(self, arg1, arg2, arg3, arg4):
+        """test extend_volume."""
+        self.assertRaises(exception.HBSDError, self.driver.extend_volume,
+                          self.test_volume_error3, 256)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_delete_volume(self, arg1, arg2, arg3, arg4):
+        """test delete_volume."""
+        self.driver.delete_volume(self._VOLUME)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_delete_volume_error(self, arg1, arg2, arg3, arg4):
+        """test delete_volume."""
+        self.driver.delete_volume(self.test_volume_error4)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
+                       return_value={'dummy_snapshot_meta': 'snapshot_meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=_VOLUME)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5, arg6, arg7):
+        """test create_snapshot."""
+        ret = self.driver.create_volume(self._VOLUME)
+        ret = self.driver.create_snapshot(self.test_snapshot)
+        self.assertEqual(ret['provider_location'], '1')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
+                       return_value={'dummy_snapshot_meta': 'snapshot_meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=_VOLUME)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5, arg6,
+                                   arg7):
+        """test create_snapshot."""
+        ret = self.driver.create_volume(self.test_volume)
+        ret = self.driver.create_snapshot(self.test_snapshot_error)
+        self.assertEqual(ret['provider_location'], '1')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_delete_snapshot(self, arg1, arg2, arg3, arg4):
+        """test delete_snapshot."""
+        self.driver.delete_snapshot(self.test_snapshot)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_delete_snapshot_error(self, arg1, arg2, arg3, arg4):
+        """test delete_snapshot."""
+        self.assertRaises(exception.HBSDCmdError,
+                          self.driver.delete_snapshot,
+                          self.test_snapshot_error)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_create_volume_from_snapshot(self, arg1, arg2, arg3, arg4, arg5):
+        """test create_volume_from_snapshot."""
+        vol = self.driver.create_volume_from_snapshot(self.test_volume,
+                                                      self.test_snapshot)
+        self.assertIsNotNone(vol)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3, arg4,
+                                               arg5):
+        """test create_volume_from_snapshot."""
+        self.assertRaises(exception.HBSDError,
+                          self.driver.create_volume_from_snapshot,
+                          self.test_volume_error5, self.test_snapshot_error2)
+        return
+
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=_VOLUME)
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_create_cloned_volume(self, arg1, arg2, arg3, arg4, arg5, arg6):
+        """test create_cloned_volume."""
+        vol = self.driver.create_cloned_volume(self.test_volume,
+                                               self._VOLUME)
+        self.assertEqual(vol['provider_location'], '1')
+        return
+
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=_VOLUME)
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm',
+                       return_value=[0, "", ""])
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm',
+                       return_value=[0, "", ""])
+    def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4, arg5,
+                                        arg6):
+        """test create_cloned_volume."""
+        self.assertRaises(exception.HBSDCmdError,
+                          self.driver.create_cloned_volume,
+                          self.test_volume, self.test_volume_error2)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    def test_initialize_connection(self, arg1, arg2):
+        """test initialize connection."""
+        connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
+                     'ip': '127.0.0.1'}
+        rc = self.driver.initialize_connection(self._VOLUME, connector)
+        self.assertEqual(rc['driver_volume_type'], 'fibre_channel')
+        self.assertEqual(rc['data']['target_wwn'], ['50060E801053C2E0'])
+        self.assertEqual(rc['data']['target_lun'], 0)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    def test_initialize_connection_error(self, arg1, arg2):
+        """test initialize connection."""
+        connector = {'wwpns': ['12345678912345bb'], 'ip': '127.0.0.2'}
+        self.assertRaises(exception.HBSDError,
+                          self.driver.initialize_connection,
+                          self._VOLUME, connector)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    def test_terminate_connection(self, arg1, arg2):
+        """test terminate connection."""
+        connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
+                     'ip': '127.0.0.1'}
+        rc = self.driver.terminate_connection(self._VOLUME, connector)
+        self.assertEqual(rc['driver_volume_type'], 'fibre_channel')
+        self.assertEqual(rc['data']['target_wwn'], ['50060E801053C2E0'])
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom',
+                       side_effect=_exec_raidcom)
+    def test_terminate_connection_error(self, arg1, arg2):
+        """test terminate connection."""
+        connector = {'ip': '127.0.0.1'}
+        self.assertRaises(exception.HBSDError,
+                          self.driver.terminate_connection,
+                          self._VOLUME, connector)
+        return
diff --git a/cinder/tests/test_hitachi_hbsd_snm2_fc.py b/cinder/tests/test_hitachi_hbsd_snm2_fc.py
new file mode 100644 (file)
index 0000000..4f5b5e8
--- /dev/null
@@ -0,0 +1,379 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""
+Self test for Hitachi Block Storage Driver
+"""
+
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.hitachi import hbsd_basiclib
+from cinder.volume.drivers.hitachi import hbsd_common
+from cinder.volume.drivers.hitachi import hbsd_fc
+from cinder.volume.drivers.hitachi import hbsd_snm2
+
+
+def _exec_hsnm(*args, **kargs):
+    return HBSDSNM2FCDriverTest.hsnm_vals.get(args)
+
+
+class HBSDSNM2FCDriverTest(test.TestCase):
+    """Test HBSDSNM2FCDriver."""
+
+    audppool_result = "  DP                RAID                               \
+                        Current Utilization  Current Over          Replication\
+ Available        Current Replication                    Rotational \
+                                                                              \
+                                                                       Stripe \
+ Needing Preparation\n\
+  Pool  Tier Mode   Level         Total Capacity        Consumed Capacity     \
+   Percent              Provisioning Percent  Capacity                     \
+Utilization Percent  Type                   Speed  Encryption  Status         \
+                                                                        \
+Reconstruction Progress                          Size    Capacity\n\
+     30  Disable       1( 1D+1D)           532.0 GB                   2.0 GB  \
+                     1%                24835%                 532.0 GB        \
+               1%  SAS                 10000rpm  N/A         Normal           \
+                                                                      N/A     \
+                                          256KB                 0.0 GB"
+
+    aureplicationlocal_result = "Pair Name                          LUN  Pair \
+LUN  Status                                              Copy Type    Group   \
+    Point-in-Time  MU Number\n\
+                                     0         10  0 Split( 99%)             \
+                        ShadowImage   ---:Ungrouped                        N/A\
+                   "
+
+    auluref_result = "                            Stripe  RAID     DP    Tier \
+  RAID                           Rotational  Number\n\
+   LU       Capacity        Size    Group    Pool  Mode     Level        Type\
+                   Speed  of Paths  Status\n\
+    0       2097152 blocks   256KB      0       0  Enable     5( 3D+1P)  SAS"
+
+    auhgwwn_result = "Port 00 Host Group Security  ON\n  Detected WWN\n    \
+Name                              Port Name         Host Group\n\
+HBSD-00                              10000000C97BCE7A  001:HBSD-01\n\
+  Assigned WWN\n    Name                              Port Name         \
+Host Group\n    abcdefg                           10000000C97BCE7A  \
+001:HBSD-01"
+
+    aufibre1_result = "Port Information\n\
+                                                    Port Address\n  CTL  Port\
+   Node Name          Port Name          Setting Current\n    0     0   \
+50060E801053C2E0   50060E801053C2E0   0000EF  272700"
+
+    auhgmap_result = "Mapping Mode = ON\nPort  Group                          \
+    H-LUN    LUN\n  00  001:HBSD-00                               0   1000"
+
+    hsnm_vals = {
+        ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
+        ('aureplicationlocal',
+         '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
+        [0, "", ""],
+        ('aureplicationlocal',
+         '-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
+        [1, "", ""],
+        ('aureplicationlocal', '-unit None -refer -pvol 1'):
+        [0, "%s" % aureplicationlocal_result, ""],
+        ('aureplicationlocal', '-unit None -refer -pvol 3'):
+        [1, "", "DMEC002015"],
+        ('aureplicationlocal', '-unit None -refer -svol 3'):
+        [1, "", "DMEC002015"],
+        ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
+        [0, "", ""],
+        ('auluchgsize', '-unit None -lu 1 -size 256g'):
+        [0, "", ""],
+        ('auludel', '-unit None -lu 1 -f'): [0, 0, ""],
+        ('auludel', '-unit None -lu 3 -f'): [1, 0, ""],
+        ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, 0, ""],
+        ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
+        ('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
+        ('auhgmap', '-unit None -add 0 0 1 1 1'): [0, 0, ""],
+        ('auhgwwn', '-unit None -refer'): [0, "%s" % auhgwwn_result, ""],
+        ('aufibre1', '-unit None -refer'): [0, "%s" % aufibre1_result, ""],
+        ('auhgmap', '-unit None -refer'): [0, "%s" % auhgmap_result, ""]}
+
+# The following information is passed on to tests, when creating a volume
+
+    _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
+               'provider_location': '1', 'name': 'test',
+               'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
+
+    test_volume = {'name': 'test_volume', 'size': 128,
+                   'id': 'test-volume-0',
+                   'provider_location': '1', 'status': 'available'}
+
+    test_volume_error = {'name': 'test_volume_error', 'size': 256,
+                         'id': 'test-volume-error',
+                         'provider_location': '3', 'status': 'available'}
+
+    test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
+                          'id': 'test-volume-error',
+                          'provider_location': None, 'status': 'available'}
+
+    test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
+                          'id': 'test-volume-error',
+                          'provider_location': '1', 'status': 'available'}
+
+    test_volume_error3 = {'name': 'test_volume3', 'size': 128,
+                          'id': 'test-volume3',
+                          'volume_metadata': [{'key': 'type',
+                                               'value': 'V-VOL'}],
+                          'provider_location': '1', 'status': 'available'}
+
+    test_volume_error4 = {'name': 'test_volume4', 'size': 128,
+                          'id': 'test-volume2',
+                          'provider_location': '3', 'status': 'available'}
+
+    test_snapshot = {'volume_name': 'test', 'size': 128,
+                     'volume_size': 128, 'name': 'test-snap',
+                     'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
+                     'provider_location': '1', 'status': 'available'}
+
+    test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
+                            'volume_size': 128, 'name': 'test-snap',
+                            'volume_id': 0, 'id': 'test-snap-0',
+                            'volume': test_volume_error,
+                            'provider_location': None, 'status': 'available'}
+
+    def __init__(self, *args, **kwargs):
+        super(HBSDSNM2FCDriverTest, self).__init__(*args, **kwargs)
+
+    def setUp(self):
+        super(HBSDSNM2FCDriverTest, self).setUp()
+        self._setup_config()
+        self._setup_driver()
+
+    def _setup_config(self):
+        self.configuration = mock.Mock(conf.Configuration)
+        self.configuration.hitachi_pool_id = 30
+        self.configuration.hitachi_target_ports = "00"
+        self.configuration.hitachi_debug_level = 0
+        self.configuration.hitachi_serial_number = "None"
+        self.configuration.hitachi_unit_name = "None"
+        self.configuration.hitachi_group_request = False
+        self.configuration.hitachi_zoning_request = False
+        self.configuration.config_group = "None"
+        self.configuration.hitachi_ldev_range = [0, 100]
+        self.configuration.hitachi_default_copy_method = 'SI'
+        self.configuration.hitachi_copy_check_interval = 1
+        self.configuration.hitachi_copy_speed = 3
+
+    def _setup_driver(self):
+        self.driver = hbsd_fc.HBSDFCDriver(
+            configuration=self.configuration)
+        context = None
+        db = None
+        self.driver.common = hbsd_common.HBSDCommon(
+            self.configuration, self.driver, context, db)
+        self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration)
+        self.driver.common.pair_flock = \
+            self.driver.common.command.set_pair_flock()
+        self.driver.do_setup_status.set()
+
+# API test cases
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_volume(self, arg1, arg2, arg3):
+        """test create_volume."""
+        ret = self.driver.create_volume(self._VOLUME)
+        vol = self._VOLUME.copy()
+        vol['provider_location'] = ret['provider_location']
+        self.assertEqual(vol['provider_location'], '1')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_volume_error(self, arg1, arg2, arg3):
+        """test create_volume."""
+        self.assertRaises(exception.HBSDCmdError,
+                          self.driver.create_volume,
+                          self.test_volume_error)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_get_volume_stats(self, arg1, arg2):
+        """test get_volume_stats."""
+        stats = self.driver.get_volume_stats(True)
+        self.assertEqual(stats['vendor_name'], 'Hitachi')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_get_volume_stats_error(self, arg1, arg2):
+        """test get_volume_stats."""
+        self.configuration.hitachi_pool_id = 29
+        stats = self.driver.get_volume_stats(True)
+        self.assertEqual(stats, {})
+        self.configuration.hitachi_pool_id = 30
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_extend_volume(self, arg1, arg2):
+        """test extend_volume."""
+        self.driver.extend_volume(self._VOLUME, 256)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_extend_volume_error(self, arg1, arg2):
+        """test extend_volume."""
+        self.assertRaises(exception.HBSDError, self.driver.extend_volume,
+                          self.test_volume_error3, 256)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_delete_volume(self, arg1, arg2):
+        """test delete_volume."""
+        self.driver.delete_volume(self._VOLUME)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_delete_volume_error(self, arg1, arg2):
+        """test delete_volume."""
+        self.assertRaises(exception.HBSDCmdError,
+                          self.driver.delete_volume,
+                          self.test_volume_error4)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
+                       return_value={'dummy_snapshot_meta': 'snapshot_meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=_VOLUME)
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
+        """test create_snapshot."""
+        ret = self.driver.create_volume(self._VOLUME)
+        ret = self.driver.create_snapshot(self.test_snapshot)
+        self.assertEqual(ret['provider_location'], '1')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
+                       return_value={'dummy_snapshot_meta': 'snapshot_meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=test_volume_error)
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
+        """test create_snapshot."""
+        self.assertRaises(exception.HBSDCmdError,
+                          self.driver.create_snapshot,
+                          self.test_snapshot_error2)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_delete_snapshot(self, arg1, arg2):
+        """test delete_snapshot."""
+        self.driver.delete_snapshot(self.test_snapshot)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_delete_snapshot_error(self, arg1, arg2):
+        """test delete_snapshot."""
+        self.driver.delete_snapshot(self.test_snapshot_error2)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
+        """test create_volume_from_snapshot."""
+        vol = self.driver.create_volume_from_snapshot(self._VOLUME,
+                                                      self.test_snapshot)
+        self.assertIsNotNone(vol)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
+        """test create_volume_from_snapshot."""
+        self.assertRaises(exception.HBSDError,
+                          self.driver.create_volume_from_snapshot,
+                          self.test_volume_error2, self.test_snapshot)
+        return
+
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=_VOLUME)
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
+        """test create_cloned_volume."""
+        vol = self.driver.create_cloned_volume(self._VOLUME,
+                                               self.test_volume)
+        self.assertIsNotNone(vol)
+        return
+
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=test_volume_error1)
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
+        """test create_cloned_volume."""
+        self.assertRaises(exception.HBSDError,
+                          self.driver.create_cloned_volume,
+                          self._VOLUME, self.test_volume_error1)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_initialize_connection(self, arg1, arg2):
+        """test initialize connection."""
+        connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
+        rc = self.driver.initialize_connection(self._VOLUME, connector)
+        self.assertEqual(rc['driver_volume_type'], 'fibre_channel')
+        self.assertEqual(rc['data']['target_wwn'], ['50060E801053C2E0'])
+        self.assertEqual(rc['data']['target_lun'], 1)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_initialize_connection_error(self, arg1, arg2):
+        """test initialize connection."""
+        connector = {'wwpns': 'x', 'ip': '0xc0a80100'}
+        self.assertRaises(exception.HBSDError,
+                          self.driver.initialize_connection,
+                          self._VOLUME, connector)
+        return
+
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_terminate_connection(self, arg1):
+        """test terminate connection."""
+        connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'}
+        rc = self.driver.terminate_connection(self._VOLUME, connector)
+        self.assertEqual(rc['driver_volume_type'], 'fibre_channel')
+        self.assertEqual(rc['data']['target_wwn'], ['50060E801053C2E0'])
+        return
+
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_terminate_connection_error(self, arg1):
+        """test terminate connection."""
+        connector = {'ip': '0xc0a80100'}
+        self.assertRaises(exception.HBSDError,
+                          self.driver.terminate_connection,
+                          self._VOLUME, connector)
+        return
diff --git a/cinder/tests/test_hitachi_hbsd_snm2_iscsi.py b/cinder/tests/test_hitachi_hbsd_snm2_iscsi.py
new file mode 100644 (file)
index 0000000..757c28d
--- /dev/null
@@ -0,0 +1,494 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""
+Self test for Hitachi Block Storage Driver
+"""
+
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder import utils
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.hitachi import hbsd_basiclib
+from cinder.volume.drivers.hitachi import hbsd_common
+from cinder.volume.drivers.hitachi import hbsd_iscsi
+from cinder.volume.drivers.hitachi import hbsd_snm2
+
+
+def _exec_hsnm(*args, **kargs):
+    return HBSDSNM2ISCSIDriverTest.hsnm_vals.get(args)
+
+
+def _exec_hsnm_init(*args, **kargs):
+    return HBSDSNM2ISCSIDriverTest.hsnm_vals_init.get(args)
+
+
+class HBSDSNM2ISCSIDriverTest(test.TestCase):
+    """Test HBSDSNM2ISCSIDriver."""
+
+    audppool_result = "  DP                RAID                               \
+                        Current Utilization  Current Over          Replication\
+ Available        Current Replication                    Rotational \
+                                                                              \
+                                                                       Stripe \
+ Needing Preparation\n\
+  Pool  Tier Mode   Level         Total Capacity        Consumed Capacity     \
+   Percent              Provisioning Percent  Capacity                     \
+Utilization Percent  Type                   Speed  Encryption  Status         \
+                                                                        \
+Reconstruction Progress                          Size    Capacity\n\
+     30  Disable       1( 1D+1D)           532.0 GB                   2.0 GB  \
+                     1%                24835%                 532.0 GB        \
+               1%  SAS                 10000rpm  N/A         Normal           \
+                                                                      N/A     \
+                                          256KB                 0.0 GB"
+
+    aureplicationlocal_result = "Pair Name                          LUN  Pair \
+LUN  Status                                              Copy Type    Group   \
+    Point-in-Time  MU Number\n\
+                                     0         10  0 Split( 99%)             \
+                        ShadowImage   ---:Ungrouped                        N/A\
+                   "
+
+    auluref_result = "                            Stripe  RAID     DP    Tier \
+  RAID                           Rotational  Number\n\
+   LU       Capacity        Size    Group    Pool  Mode     Level        Type\
+                   Speed  of Paths  Status\n\
+    0       2097152 blocks   256KB      0       0  Enable     5( 3D+1P)  SAS"
+
+    auhgwwn_result = "Port 00 Host Group Security  ON\n  Detected WWN\n    \
+Name                              Port Name         Host Group\n\
+HBSD-00                              10000000C97BCE7A  001:HBSD-01\n\
+  Assigned WWN\n    Name                              Port Name         \
+Host Group\n    abcdefg                           10000000C97BCE7A  \
+001:HBSD-01"
+
+    autargetini_result = "Port 00  Target Security  ON\n\
+  Target                               Name                             \
+iSCSI Name\n\
+  001:HBSD-01                                                              \
+iqn"
+
+    autargetini_result2 = "Port 00  Target Security  ON\n\
+  Target                               Name                             \
+iSCSI Name"
+
+    autargetmap_result = "Mapping Mode = ON\n\
+Port  Target                                H-LUN    LUN\n\
+  00  001:HBSD-01                                  0     1000"
+
+    auiscsi_result = "Port 00\n\
+  Port Number            : 3260\n\
+  Keep Alive Timer[sec.] : 60\n\
+  MTU                    : 1500\n\
+  Transfer Rate          : 1Gbps\n\
+  Link Status            : Link Up\n\
+  Ether Address          : 00:00:87:33:D1:3E\n\
+  IPv4\n\
+    IPv4 Address               : 192.168.0.1\n\
+    IPv4 Subnet Mask           : 255.255.252.0\n\
+    IPv4 Default Gateway       : 0.0.0.0\n\
+  IPv6 Status            : Disable\n\
+  Connecting Hosts       : 0\n\
+  Result                 : Normal\n\
+  VLAN Status            : Disable\n\
+  VLAN ID                : N/A\n\
+  Header Digest          : Enable\n\
+  Data Digest            : Enable\n\
+  Window Scale           : Disable"
+
+    autargetdef_result = "Port 00\n\
+                                       Authentication                 Mutual\n\
+  Target                               Method         CHAP Algorithm  \
+Authentication\n\
+  001:T000                             None           ---              ---\n\
+    User Name  : ---\n\
+    iSCSI Name : iqn-target"
+
+    hsnm_vals = {
+        ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
+        ('aureplicationlocal',
+         '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
+        [0, "", ""],
+        ('aureplicationlocal',
+         '-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
+        [1, "", ""],
+        ('aureplicationlocal', '-unit None -refer -pvol 1'):
+        [0, "%s" % aureplicationlocal_result, ""],
+        ('aureplicationlocal', '-unit None -refer -pvol 3'):
+        [1, "", "DMEC002015"],
+        ('aureplicationlocal', '-unit None -refer -svol 3'):
+        [1, "", "DMEC002015"],
+        ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
+        [0, "", ""],
+        ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 1'):
+        [1, "", ""],
+        ('auluchgsize', '-unit None -lu 1 -size 256g'):
+        [0, "", ""],
+        ('auludel', '-unit None -lu 1 -f'): [0, "", ""],
+        ('auludel', '-unit None -lu 3 -f'): [1, "", ""],
+        ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
+        ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
+        ('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
+        ('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
+        ('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
+        ('autargetini', '-unit None -refer'):
+        [0, "%s" % autargetini_result, ""],
+        ('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
+        [0, "", ""],
+        ('autargetmap', '-unit None -refer'):
+        [0, "%s" % autargetmap_result, ""],
+        ('autargetdef',
+         '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
+-authmethod None'):
+        [0, "", ""],
+        ('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 \
+-iname iqnX.target -authmethod None'):
+        [1, "", ""],
+        ('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
+-ReportFullPortalList enable'):
+        [0, "", ""],
+        ('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
+        ('autargetdef', '-unit None -refer'):
+        [0, "%s" % autargetdef_result, ""]}
+
+    hsnm_vals_init = {
+        ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
+        ('aureplicationlocal',
+         '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
+        [0, 0, ""],
+        ('aureplicationlocal', '-unit None -refer -pvol 1'):
+        [0, "%s" % aureplicationlocal_result, ""],
+        ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
+        [0, 0, ""],
+        ('auluchgsize', '-unit None -lu 1 -size 256g'):
+        [0, 0, ""],
+        ('auludel', '-unit None -lu 1 -f'): [0, "", ""],
+        ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
+        ('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
+        ('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
+        ('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
+        ('autargetini', '-unit None -refer'):
+        [0, "%s" % autargetini_result2, ""],
+        ('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
+        [0, "", ""],
+        ('autargetmap', '-unit None -refer'):
+        [0, "%s" % autargetmap_result, ""],
+        ('autargetdef',
+         '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
+-authmethod None'):
+        [0, "", ""],
+        ('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
+-ReportFullPortalList enable'):
+        [0, "", ""],
+        ('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
+        ('autargetdef', '-unit None -refer'):
+        [0, "%s" % autargetdef_result, ""],
+        ('auman', '-help'):
+        [0, "Version 27.50", ""]}
+
+# The following information is passed on to tests, when creating a volume
+
+    _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
+               'provider_location': '1', 'name': 'test',
+               'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
+
+    test_volume = {'name': 'test_volume', 'size': 128,
+                   'id': 'test-volume-0',
+                   'provider_location': '1', 'status': 'available'}
+
+    test_volume_error = {'name': 'test_volume_error', 'size': 256,
+                         'id': 'test-volume-error',
+                         'provider_location': '3', 'status': 'available'}
+
+    test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
+                          'id': 'test-volume-error',
+                          'provider_location': None, 'status': 'available'}
+
+    test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
+                          'id': 'test-volume-error',
+                          'provider_location': '1', 'status': 'available'}
+
+    test_volume_error3 = {'name': 'test_volume3', 'size': 128,
+                          'id': 'test-volume3',
+                          'volume_metadata': [{'key': 'type',
+                                               'value': 'V-VOL'}],
+                          'provider_location': '1', 'status': 'available'}
+
+    test_volume_error4 = {'name': 'test_volume4', 'size': 128,
+                          'id': 'test-volume2',
+                          'provider_location': '3', 'status': 'available'}
+
+    test_snapshot = {'volume_name': 'test', 'size': 128,
+                     'volume_size': 128, 'name': 'test-snap',
+                     'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
+                     'provider_location': '1', 'status': 'available'}
+
+    test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
+                            'volume_size': 128, 'name': 'test-snap',
+                            'volume_id': 0, 'id': 'test-snap-0',
+                            'volume': test_volume_error,
+                            'provider_location': None, 'status': 'available'}
+
+    def __init__(self, *args, **kwargs):
+        super(HBSDSNM2ISCSIDriverTest, self).__init__(*args, **kwargs)
+
+    @mock.patch.object(utils, 'brick_get_connector_properties',
+                       return_value={'ip': '0.0.0.0',
+                                     'initiator': 'iqn'})
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
+                       side_effect=_exec_hsnm_init)
+    @mock.patch.object(utils, 'execute',
+                       return_value=['', ''])
+    def setUp(self, args1, arg2, arg3, arg4):
+        super(HBSDSNM2ISCSIDriverTest, self).setUp()
+        self._setup_config()
+        self._setup_driver()
+        self.driver.check_param()
+        self.driver.common.create_lock_file()
+        self.driver.common.command.connect_storage()
+        self.driver.max_hostgroups = \
+            self.driver.common.command.get_max_hostgroups()
+        self.driver.add_hostgroup()
+        self.driver.output_param_to_log()
+        self.driver.do_setup_status.set()
+
+    def _setup_config(self):
+        self.configuration = mock.Mock(conf.Configuration)
+        self.configuration.hitachi_pool_id = 30
+        self.configuration.hitachi_thin_pool_id = 31
+        self.configuration.hitachi_target_ports = "00"
+        self.configuration.hitachi_debug_level = 0
+        self.configuration.hitachi_serial_number = None
+        self.configuration.hitachi_unit_name = "None"
+        self.configuration.hitachi_group_request = True
+        self.configuration.hitachi_group_range = "0-1"
+        self.configuration.config_group = "None"
+        self.configuration.hitachi_ldev_range = "0-100"
+        self.configuration.hitachi_default_copy_method = 'FULL'
+        self.configuration.hitachi_copy_check_interval = 1
+        self.configuration.hitachi_async_copy_check_interval = 1
+        self.configuration.hitachi_copy_speed = 3
+        self.configuration.hitachi_auth_method = None
+        self.configuration.hitachi_auth_user = "HBSD-CHAP-user"
+        self.configuration.hitachi_auth_password = "HBSD-CHAP-password"
+        self.configuration.hitachi_add_chap_user = "False"
+
+    def _setup_driver(self):
+        self.driver = hbsd_iscsi.HBSDISCSIDriver(
+            configuration=self.configuration)
+        context = None
+        db = None
+        self.driver.common = hbsd_common.HBSDCommon(
+            self.configuration, self.driver, context, db)
+
+# API test cases
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_volume(self, arg1, arg2, arg3):
+        """test create_volume."""
+        ret = self.driver.create_volume(self._VOLUME)
+        vol = self._VOLUME.copy()
+        vol['provider_location'] = ret['provider_location']
+        self.assertEqual(vol['provider_location'], '1')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_volume_error(self, arg1, arg2, arg3):
+        """test create_volume."""
+        self.assertRaises(exception.HBSDCmdError,
+                          self.driver.create_volume,
+                          self.test_volume_error)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_get_volume_stats(self, arg1, arg2):
+        """test get_volume_stats."""
+        stats = self.driver.get_volume_stats(True)
+        self.assertEqual(stats['vendor_name'], 'Hitachi')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_get_volume_stats_error(self, arg1, arg2):
+        """test get_volume_stats."""
+        self.configuration.hitachi_pool_id = 29
+        stats = self.driver.get_volume_stats(True)
+        self.assertEqual(stats, {})
+        self.configuration.hitachi_pool_id = 30
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_extend_volume(self, arg1, arg2):
+        """test extend_volume."""
+        self.driver.extend_volume(self._VOLUME, 256)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_extend_volume_error(self, arg1, arg2):
+        """test extend_volume."""
+        self.assertRaises(exception.HBSDError, self.driver.extend_volume,
+                          self.test_volume_error3, 256)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_delete_volume(self, arg1, arg2):
+        """test delete_volume."""
+        self.driver.delete_volume(self._VOLUME)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_delete_volume_error(self, arg1, arg2):
+        """test delete_volume."""
+        self.assertRaises(exception.HBSDCmdError,
+                          self.driver.delete_volume,
+                          self.test_volume_error4)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
+                       return_value={'dummy_snapshot_meta': 'snapshot_meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=_VOLUME)
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
+        """test create_snapshot."""
+        ret = self.driver.create_volume(self._VOLUME)
+        ret = self.driver.create_snapshot(self.test_snapshot)
+        self.assertEqual(ret['provider_location'], '1')
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
+                       return_value={'dummy_snapshot_meta': 'snapshot_meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=test_volume_error)
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
+        """test create_snapshot."""
+        self.assertRaises(exception.HBSDCmdError,
+                          self.driver.create_snapshot,
+                          self.test_snapshot_error2)
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_delete_snapshot(self, arg1, arg2):
+        """test delete_snapshot."""
+        self.driver.delete_snapshot(self.test_snapshot)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_delete_snapshot_error(self, arg1, arg2):
+        """test delete_snapshot."""
+        self.driver.delete_snapshot(self.test_snapshot_error2)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
+        """test create_volume_from_snapshot."""
+        vol = self.driver.create_volume_from_snapshot(self._VOLUME,
+                                                      self.test_snapshot)
+        self.assertIsNotNone(vol)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
+        """test create_volume_from_snapshot."""
+        self.assertRaises(exception.HBSDError,
+                          self.driver.create_volume_from_snapshot,
+                          self.test_volume_error2, self.test_snapshot)
+        return
+
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=_VOLUME)
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
+        """test create_cloned_volume."""
+        vol = self.driver.create_cloned_volume(self._VOLUME,
+                                               self.test_snapshot)
+        self.assertIsNotNone(vol)
+        return
+
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
+                       return_value={'dummy_volume_meta': 'meta'})
+    @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
+                       return_value=test_volume_error1)
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
+        """test create_cloned_volume."""
+        self.assertRaises(exception.HBSDError,
+                          self.driver.create_cloned_volume,
+                          self._VOLUME, self.test_volume_error1)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_initialize_connection(self, arg1, arg2):
+        """test initialize connection."""
+        connector = {
+            'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
+            'iqn'}
+        rc = self.driver.initialize_connection(self._VOLUME, connector)
+        self.assertEqual(rc['driver_volume_type'], 'iscsi')
+        self.assertEqual(rc['data']['target_iqn'], 'iqn-target')
+        self.assertEqual(rc['data']['target_lun'], 1)
+        return
+
+    @mock.patch.object(hbsd_basiclib, 'get_process_lock')
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_initialize_connection_error(self, arg1, arg2):
+        """test initialize connection."""
+        connector = {
+            'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
+            'iqnX'}
+        self.assertRaises(exception.HBSDError,
+                          self.driver.initialize_connection,
+                          self._VOLUME, connector)
+        return
+
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_terminate_connection(self, arg1):
+        """test terminate connection."""
+        connector = {
+            'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
+            'iqn'}
+        self.driver.terminate_connection(self._VOLUME, connector)
+        return
+
+    @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
+    def test_terminate_connection_error(self, arg1):
+        """test terminate connection."""
+        connector = {'ip': '0.0.0.0'}
+        self.assertRaises(exception.HBSDError,
+                          self.driver.terminate_connection,
+                          self._VOLUME, connector)
+        return
diff --git a/cinder/volume/drivers/hitachi/__init__.py b/cinder/volume/drivers/hitachi/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/volume/drivers/hitachi/hbsd_basiclib.py b/cinder/volume/drivers/hitachi/hbsd_basiclib.py
new file mode 100644 (file)
index 0000000..74c5782
--- /dev/null
@@ -0,0 +1,265 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import inspect
+import os
+import shlex
+
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import excutils
+from cinder.openstack.common import lockutils
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import processutils as putils
+from cinder import utils
+
+SMPL = 1
+COPY = 2
+PAIR = 3
+PSUS = 4
+PSUE = 5
+UNKN = 0xff
+
+FULL = 'Full copy'
+THIN = 'Thin copy'
+
+DEFAULT_TRY_RANGE = range(3)
+MAX_PROCESS_WAITTIME = 86400
+DEFAULT_PROCESS_WAITTIME = 900
+
+GETSTORAGEARRAY_ONCE = 100
+
+WARNING_ID = 300
+
+DEFAULT_GROUP_RANGE = [0, 65535]
+
+NAME_PREFIX = 'HBSD-'
+
+LOCK_DIR = '/var/lock/hbsd/'
+
+LOG = logging.getLogger(__name__)
+
+HBSD_INFO_MSG = {
+    1: _('The parameter of the storage backend. '
+         '(config_group: %(config_group)s)'),
+    3: _('The storage backend can be used. (config_group: %(config_group)s)'),
+}
+
+HBSD_WARN_MSG = {
+    301: _('A LUN (HLUN) was not found. (LDEV: %(ldev)s)'),
+    302: _('Failed to specify a logical device for the volume '
+           '%(volume_id)s to be unmapped.'),
+    303: _('An iSCSI CHAP user could not be deleted. (username: %(user)s)'),
+    304: _('Failed to specify a logical device to be deleted. '
+           '(method: %(method)s, id: %(id)s)'),
+    305: _('The logical device for specified %(type)s %(id)s '
+           'was already deleted.'),
+    306: _('A host group could not be deleted. (port: %(port)s, '
+           'gid: %(gid)s, name: %(name)s)'),
+    307: _('An iSCSI target could not be deleted. (port: %(port)s, '
+           'tno: %(tno)s, alias: %(alias)s)'),
+    308: _('A host group could not be added. (port: %(port)s, '
+           'name: %(name)s)'),
+    309: _('An iSCSI target could not be added. '
+           '(port: %(port)s, alias: %(alias)s, reason: %(reason)s)'),
+    310: _('Failed to unmap a logical device. (LDEV: %(ldev)s, '
+           'reason: %(reason)s)'),
+    311: _('A free LUN (HLUN) was not found. Add a different host'
+           ' group. (LDEV: %(ldev)s)'),
+    312: _('Failed to get a storage resource. The system will attempt '
+           'to get the storage resource again. (resource: %(resource)s)'),
+    313: _('Failed to delete a logical device. (LDEV: %(ldev)s, '
+           'reason: %(reason)s)'),
+    314: _('Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, '
+           'port: %(port)s, id: %(id)s)'),
+    315: _('Failed to perform a zero-page reclamation. '
+           '(LDEV: %(ldev)s, reason: %(reason)s)'),
+    316: _('Failed to assign the iSCSI initiator IQN. (port: %(port)s, '
+           'reason: %(reason)s)'),
+}
+
+HBSD_ERR_MSG = {
+    600: _('The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, '
+           'stderr: %(err)s)'),
+    601: _('A parameter is invalid. (%(param)s)'),
+    602: _('A parameter value is invalid. (%(meta)s)'),
+    603: _('Failed to acquire a resource lock. (serial: %(serial)s, '
+           'inst: %(inst)s, ret: %(ret)s, stderr: %(err)s)'),
+    604: _('Cannot set both hitachi_serial_number and hitachi_unit_name.'),
+    605: _('Either hitachi_serial_number or hitachi_unit_name is required.'),
+    615: _('A pair could not be created. The maximum number of pair is '
+           'exceeded. (copy method: %(copy_method)s, P-VOL: %(pvol)s)'),
+    616: _('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)'),
+    617: _('The specified operation is not supported. The volume size '
+           'must be the same as the source %(type)s. (volume: %(volume_id)s)'),
+    618: _('The volume %(volume_id)s could not be extended. '
+           'The volume type must be Normal.'),
+    619: _('The volume %(volume_id)s to be mapped was not found.'),
+    624: _('The %(type)s %(id)s source to be replicated was not found.'),
+    631: _('Failed to create a file. (file: %(file)s, ret: %(ret)s, '
+           'stderr: %(err)s)'),
+    632: _('Failed to open a file. (file: %(file)s, ret: %(ret)s, '
+           'stderr: %(err)s)'),
+    633: _('%(file)s: Permission denied.'),
+    636: _('Failed to add the logical device.'),
+    637: _('The method %(method)s is timed out. (timeout value: %(timeout)s)'),
+    640: _('A pool could not be found. (pool id: %(pool_id)s)'),
+    641: _('The host group or iSCSI target could not be added.'),
+    642: _('An iSCSI CHAP user could not be added. (username: %(user)s)'),
+    643: _('The iSCSI CHAP user %(user)s does not exist.'),
+    648: _('There are no resources available for use. '
+           '(resource: %(resource)s)'),
+    649: _('The host group or iSCSI target was not found.'),
+    650: _('The resource %(resource)s was not found.'),
+    651: _('The IP Address was not found.'),
+    653: _('The creation of a logical device could not be '
+           'completed. (LDEV: %(ldev)s)'),
+    654: _('A volume status is invalid. (status: %(status)s)'),
+    655: _('A snapshot status is invalid. (status: %(status)s)'),
+    659: _('A host group is invalid. (host group: %(gid)s)'),
+    660: _('The specified %(desc)s is busy.'),
+}
+
+
+def set_msg(msg_id, **kwargs):
+    if msg_id < WARNING_ID:
+        msg_header = 'MSGID%04d-I:' % msg_id
+        msg_body = HBSD_INFO_MSG.get(msg_id)
+    else:
+        msg_header = 'MSGID%04d-W:' % msg_id
+        msg_body = HBSD_WARN_MSG.get(msg_id)
+
+    return '%(header)s %(body)s' % {'header': msg_header,
+                                    'body': msg_body % kwargs}
+
+
+def output_err(msg_id, **kwargs):
+    msg = HBSD_ERR_MSG.get(msg_id) % kwargs
+
+    LOG.error("MSGID%04d-E: %s", msg_id, msg)
+
+    return msg
+
+
+def get_process_lock(file):
+    if not os.access(file, os.W_OK):
+        msg = output_err(633, file=file)
+        raise exception.HBSDError(message=msg)
+    return lockutils.InterProcessLock(file)
+
+
+def create_empty_file(filename):
+    if not os.path.exists(filename):
+        try:
+            utils.execute('touch', filename)
+        except putils.ProcessExecutionError as ex:
+            msg = output_err(
+                631, file=filename, ret=ex.exit_code, err=ex.stderr)
+            raise exception.HBSDError(message=msg)
+
+
+class FileLock(lockutils.InterProcessLock):
+
+    def __init__(self, name, lock_object):
+        self.lock_object = lock_object
+
+        super(FileLock, self).__init__(name)
+
+    def __enter__(self):
+        if not os.access(self.fname, os.W_OK):
+            msg = output_err(633, file=self.fname)
+            raise exception.HBSDError(message=msg)
+
+        self.lock_object.acquire()
+
+        try:
+            ret = super(FileLock, self).__enter__()
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                self.lock_object.release()
+
+        return ret
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        try:
+            super(FileLock, self).__exit__(exc_type, exc_val, exc_tb)
+        finally:
+            self.lock_object.release()
+
+
+class NopLock(object):
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        pass
+
+
+class HBSDBasicLib(object):
+
+    def __init__(self, conf=None):
+        self.conf = conf
+
+    def exec_command(self, cmd, args=None, printflag=True):
+        if printflag:
+            if args:
+                LOG.debug('cmd: %(cmd)s, args: %(args)s' %
+                          {'cmd': cmd, 'args': args})
+            else:
+                LOG.debug('cmd: %s' % cmd)
+
+        cmd = [cmd]
+
+        if args:
+            if isinstance(args, six.text_type):
+                cmd += shlex.split(args.encode())
+            else:
+                cmd += shlex.split(args)
+
+        try:
+            stdout, stderr = utils.execute(*cmd, run_as_root=True)
+            ret = 0
+        except putils.ProcessExecutionError as e:
+            ret = e.exit_code
+            stdout = e.stdout
+            stderr = e.stderr
+
+            LOG.debug('cmd: %s' % six.text_type(cmd))
+            LOG.debug('from: %s' % six.text_type(inspect.stack()[2]))
+            LOG.debug('ret: %d' % ret)
+            LOG.debug('stdout: %s' % stdout.replace(os.linesep, ' '))
+            LOG.debug('stderr: %s' % stderr.replace(os.linesep, ' '))
+
+        return ret, stdout, stderr
+
+    def set_pair_flock(self):
+        return NopLock()
+
+    def discard_zero_page(self, ldev):
+        pass
+
+    def output_param_to_log(self, conf):
+        pass
+
+    def connect_storage(self):
+        pass
+
+    def get_max_hostgroups(self):
+        pass
+
+    def restart_pair_horcm(self):
+        pass
diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py
new file mode 100644 (file)
index 0000000..cf1e5d5
--- /dev/null
@@ -0,0 +1,736 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Common class for Hitachi storage drivers.
+
+"""
+
+from contextlib import nested
+import re
+import threading
+
+from oslo.config import cfg
+import six
+
+from cinder.db.sqlalchemy import api
+from cinder.db.sqlalchemy import models
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import excutils
+from cinder.openstack.common import log as logging
+from cinder import utils
+from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
+from cinder.volume.drivers.hitachi import hbsd_horcm as horcm
+from cinder.volume.drivers.hitachi import hbsd_snm2 as snm2
+from cinder.volume import utils as volume_utils
+
+
+VERSION = '1.0.0'
+
+PARAM_RANGE = {
+    'hitachi_copy_check_interval': {'min': 1, 'max': 600},
+    'hitachi_async_copy_check_interval': {'min': 1, 'max': 600},
+    'hitachi_copy_speed': {'min': 1, 'max': 15},
+}
+
+DEFAULT_LDEV_RANGE = [0, 65535]
+
+COPY_METHOD = ('FULL', 'THIN')
+VALID_DP_VOLUME_STATUS = ['available', 'in-use']
+VALID_V_VOLUME_STATUS = ['available']
+SYSTEM_LOCK_FILE = basic_lib.LOCK_DIR + 'system'
+SERVICE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'service_'
+STORAGE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'storage_'
+
+LOG = logging.getLogger(__name__)
+
+volume_opts = [
+    cfg.StrOpt('hitachi_serial_number',
+               default=None,
+               help='Serial number of storage system'),
+    cfg.StrOpt('hitachi_unit_name',
+               default=None,
+               help='Name of an array unit'),
+    cfg.IntOpt('hitachi_pool_id',
+               default=None,
+               help='Pool ID of storage system'),
+    cfg.IntOpt('hitachi_thin_pool_id',
+               default=None,
+               help='Thin pool ID of storage system'),
+    cfg.StrOpt('hitachi_ldev_range',
+               default=None,
+               help='Range of logical device of storage system'),
+    cfg.StrOpt('hitachi_default_copy_method',
+               default='FULL',
+               help='Default copy method of storage system'),
+    cfg.IntOpt('hitachi_copy_speed',
+               default=3,
+               help='Copy speed of storage system'),
+    cfg.IntOpt('hitachi_copy_check_interval',
+               default=3,
+               help='Interval to check copy'),
+    cfg.IntOpt('hitachi_async_copy_check_interval',
+               default=10,
+               help='Interval to check copy asynchronously'),
+    cfg.StrOpt('hitachi_target_ports',
+               default=None,
+               help='Control port names for HostGroup or iSCSI Target'),
+    cfg.StrOpt('hitachi_group_range',
+               default=None,
+               help='Range of group number'),
+    cfg.BoolOpt('hitachi_group_request',
+                default=False,
+                secret=True,
+                help='Request for creating HostGroup or iSCSI Target'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
+
+
+class TryLock(object):
+
+    def __init__(self):
+        self.lock = threading.RLock()
+        self.desc = None
+
+    def set_desc(self, description):
+        self.desc = description
+
+    def __enter__(self):
+        if not self.lock.acquire(False):
+            msg = basic_lib.output_err(660, desc=self.desc)
+            raise exception.HBSDError(message=msg)
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.lock.release()
+
+
+class HBSDCommon(object):
+
+    def __init__(self, conf, parent, context, db):
+        self.configuration = conf
+        self.generated_from = parent
+        self.context = context
+        self.db = db
+
+        self.system_lock_file = SYSTEM_LOCK_FILE
+        self.service_lock_file = '%s%s' % (SERVICE_LOCK_PATH_BASE,
+                                           conf.config_group)
+        if conf.hitachi_serial_number:
+            self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE,
+                                               six.text_type(
+                                                   conf.hitachi_serial_number))
+        elif conf.hitachi_unit_name:
+            self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE,
+                                               six.text_type(
+                                                   conf.hitachi_unit_name))
+
+        self.storage_obj_lock = threading.Lock()
+        self.volinfo_lock = threading.Lock()
+        self.volume_info = {}
+        self.output_first = True
+
+    def get_volume(self, volume_id):
+        return self.db.volume_get(self.context, volume_id)
+
+    def get_volume_metadata(self, volume_id):
+        return self.db.volume_metadata_get(self.context, volume_id)
+
+    def get_snapshot_metadata(self, snapshot_id):
+        return self.db.snapshot_metadata_get(self.context, snapshot_id)
+
+    def get_ldev(self, obj):
+        if not obj:
+            return None
+
+        ldev = obj.get('provider_location')
+        if not ldev or not ldev.isdigit():
+            return None
+        else:
+            return int(ldev)
+
+    def get_value(self, obj, name, key):
+        if not obj:
+            return None
+
+        if obj.get(name):
+            for i in obj[name]:
+                if i['key'] == key:
+                    return i['value']
+        return None
+
+    def get_is_vvol(self, obj, name):
+        return self.get_value(obj, name, 'type') == 'V-VOL'
+
+    def get_volume_is_vvol(self, volume):
+        return self.get_is_vvol(volume, 'volume_metadata')
+
+    def get_snapshot_is_vvol(self, snapshot):
+        return self.get_is_vvol(snapshot, 'snapshot_metadata')
+
+    def get_copy_method(self, volume):
+        method = self.get_value(volume, 'volume_metadata', 'copy_method')
+        if method:
+            if method not in COPY_METHOD:
+                msg = basic_lib.output_err(602, meta='copy_method')
+                raise exception.HBSDError(message=msg)
+            elif (method == 'THIN'
+                  and self.configuration.hitachi_thin_pool_id is None):
+                msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
+                raise exception.HBSDError(message=msg)
+        else:
+            method = self.configuration.hitachi_default_copy_method
+        return method
+
+    def _range2list(self, conf, param):
+        str = getattr(conf, param)
+        lists = str.split('-')
+        if len(lists) != 2:
+            msg = basic_lib.output_err(601, param=param)
+            raise exception.HBSDError(message=msg)
+
+        first_type = None
+        for i in range(len(lists)):
+            if lists[i].isdigit():
+                lists[i] = int(lists[i], 10)
+                if first_type == 'hex':
+                    msg = basic_lib.output_err(601, param=param)
+                    raise exception.HBSDError(message=msg)
+                first_type = 'dig'
+            else:
+                if (first_type == 'dig'
+                        or not re.match('\w\w:\w\w:\w\w', lists[i])):
+                    msg = basic_lib.output_err(601, param=param)
+                    raise exception.HBSDError(message=msg)
+                try:
+                    lists[i] = int(lists[i].replace(':', ''), 16)
+                    first_type = 'hex'
+                except Exception:
+                    msg = basic_lib.output_err(601, param=param)
+                    raise exception.HBSDError(message=msg)
+        if lists[0] > lists[1]:
+            msg = basic_lib.output_err(601, param=param)
+            raise exception.HBSDError(message=msg)
+        return lists
+
+    def output_param_to_log(self, storage_protocol):
+        essential_inherited_param = ['volume_backend_name', 'volume_driver']
+        conf = self.configuration
+
+        msg = basic_lib.set_msg(1, config_group=conf.config_group)
+        LOG.info(msg)
+        version = self.command.get_comm_version()
+        if conf.hitachi_unit_name:
+            prefix = 'HSNM2 version'
+        else:
+            prefix = 'RAID Manager version'
+        LOG.info('\t%-35s%s' % (prefix + ': ', six.text_type(version)))
+        for param in essential_inherited_param:
+            value = conf.safe_get(param)
+            LOG.info('\t%-35s%s' % (param + ': ', six.text_type(value)))
+        for opt in volume_opts:
+            if not opt.secret:
+                value = getattr(conf, opt.name)
+                LOG.info('\t%-35s%s' % (opt.name + ': ',
+                         six.text_type(value)))
+
+        if storage_protocol == 'iSCSI':
+            value = getattr(conf, 'hitachi_group_request')
+            LOG.info('\t%-35s%s' % ('hitachi_group_request: ',
+                     six.text_type(value)))
+
+    def check_param(self):
+        conf = self.configuration
+
+        if conf.hitachi_unit_name and conf.hitachi_serial_number:
+            msg = basic_lib.output_err(604)
+            raise exception.HBSDError(message=msg)
+
+        if not conf.hitachi_unit_name and not conf.hitachi_serial_number:
+            msg = basic_lib.output_err(605)
+            raise exception.HBSDError(message=msg)
+
+        if conf.hitachi_pool_id is None:
+            msg = basic_lib.output_err(601, param='hitachi_pool_id')
+            raise exception.HBSDError(message=msg)
+
+        for param in PARAM_RANGE.keys():
+            _value = getattr(conf, param)
+            if (_value and
+                    (not PARAM_RANGE[param]['min'] <= _value <=
+                     PARAM_RANGE[param]['max'])):
+                msg = basic_lib.output_err(601, param=param)
+                raise exception.HBSDError(message=msg)
+
+        if conf.hitachi_default_copy_method not in COPY_METHOD:
+            msg = basic_lib.output_err(601,
+                                       param='hitachi_default_copy_method')
+            raise exception.HBSDError(message=msg)
+
+        if (conf.hitachi_default_copy_method == 'THIN'
+                and conf.hitachi_thin_pool_id is None):
+            msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
+            raise exception.HBSDError(message=msg)
+
+        for param in ('hitachi_ldev_range', 'hitachi_group_range'):
+            if not getattr(conf, param):
+                continue
+            else:
+                _value = self._range2list(conf, param)
+                setattr(conf, param, _value)
+
+        if conf.hitachi_target_ports:
+            conf.hitachi_target_ports = conf.hitachi_target_ports.split(',')
+
+        for opt in volume_opts:
+            getattr(conf, opt.name)
+
+        if conf.hitachi_unit_name:
+            self.command = snm2.HBSDSNM2(conf)
+        else:
+            conf.append_config_values(horcm.volume_opts)
+            self.command = horcm.HBSDHORCM(conf)
+            self.command.check_param()
+        self.pair_flock = self.command.set_pair_flock()
+
+    def create_lock_file(self):
+        basic_lib.create_empty_file(self.system_lock_file)
+        basic_lib.create_empty_file(self.service_lock_file)
+        basic_lib.create_empty_file(self.storage_lock_file)
+        self.command.create_lock_file()
+
+    def _add_ldev(self, volume_num, capacity, pool_id, is_vvol):
+        self.command.comm_add_ldev(pool_id, volume_num, capacity, is_vvol)
+
+    def _get_unused_volume_num(self, ldev_range):
+        return self.command.get_unused_ldev(ldev_range)
+
+    def add_volinfo(self, ldev, id=None, type='volume'):
+        with self.volinfo_lock:
+            if ldev not in self.volume_info:
+                self.init_volinfo(self.volume_info, ldev)
+            if id:
+                desc = '%s %s' % (type, id)
+                self.volume_info[ldev]['in_use'].set_desc(desc)
+
+    def delete_pair(self, ldev, all_split=True, is_vvol=None):
+        paired_info = self.command.get_paired_info(ldev)
+        LOG.debug('paired_info: %s' % six.text_type(paired_info))
+        pvol = paired_info['pvol']
+        svols = paired_info['svol']
+        driver = self.generated_from
+        restart = False
+        svol_list = []
+        try:
+            if pvol is None:
+                return
+            elif pvol == ldev:
+                for svol in svols[:]:
+                    if svol['is_vvol'] or svol['status'] != basic_lib.PSUS:
+                        continue
+
+                    self.command.delete_pair(pvol, svol['lun'], False)
+                    restart = True
+                    driver.pair_terminate_connection(svol['lun'])
+                    svols.remove(svol)
+
+                if all_split and svols:
+                    svol_list.append(six.text_type(svols[0]['lun']))
+                    for svol in svols[1:]:
+                        svol_list.append(', %d' % svol['lun'])
+
+                    msg = basic_lib.output_err(616, pvol=pvol,
+                                               svol=''.join(svol_list))
+                    raise exception.HBSDBusy(message=msg)
+
+                if not svols:
+                    driver.pair_terminate_connection(pvol)
+
+            else:
+                self.add_volinfo(pvol)
+                if not self.volume_info[pvol]['in_use'].lock.acquire(False):
+                    desc = self.volume_info[pvol]['in_use'].desc
+                    msg = basic_lib.output_err(660, desc=desc)
+                    raise exception.HBSDBusy(message=msg)
+                try:
+                    paired_info = self.command.get_paired_info(ldev)
+                    if paired_info['pvol'] is None:
+                        return
+                    svol = paired_info['svol'][0]
+                    if svol['status'] != basic_lib.PSUS:
+                        msg = basic_lib.output_err(616, pvol=pvol, svol=ldev)
+                        raise exception.HBSDBusy(message=msg)
+
+                    self.command.delete_pair(pvol, ldev, svol['is_vvol'])
+                    if not svol['is_vvol']:
+                        restart = True
+                    driver.pair_terminate_connection(ldev)
+                    paired_info = self.command.get_paired_info(pvol)
+                    if paired_info['pvol'] is None:
+                        driver.pair_terminate_connection(pvol)
+                finally:
+                    self.volume_info[pvol]['in_use'].lock.release()
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                if restart:
+                    try:
+                        self.command.restart_pair_horcm()
+                    except Exception as e:
+                        LOG.warning(_('Failed to restart horcm: %s') %
+                                    six.text_type(e))
+        else:
+            if (all_split or is_vvol) and restart:
+                try:
+                    self.command.restart_pair_horcm()
+                except Exception as e:
+                    LOG.warning(_('Failed to restart horcm: %s') %
+                                six.text_type(e))
+
+    def copy_async_data(self, pvol, svol, is_vvol):
+        path_list = []
+        driver = self.generated_from
+        try:
+            with self.pair_flock:
+                self.delete_pair(pvol, all_split=False, is_vvol=is_vvol)
+                paired_info = self.command.get_paired_info(pvol)
+                if paired_info['pvol'] is None:
+                    driver.pair_initialize_connection(pvol)
+                    path_list.append(pvol)
+                driver.pair_initialize_connection(svol)
+                path_list.append(svol)
+                self.command.comm_create_pair(pvol, svol, is_vvol)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                for ldev in path_list:
+                    try:
+                        driver.pair_terminate_connection(ldev)
+                    except Exception as ex:
+                        msg = basic_lib.set_msg(
+                            310, ldev=ldev, reason=six.text_type(ex))
+                        LOG.warning(msg)
+
+    def copy_sync_data(self, src_ldev, dest_ldev, size):
+        src_vol = {'provider_location': six.text_type(src_ldev),
+                   'id': 'src_vol'}
+        dest_vol = {'provider_location': six.text_type(dest_ldev),
+                    'id': 'dest_vol'}
+        properties = utils.brick_get_connector_properties()
+        driver = self.generated_from
+        src_info = None
+        dest_info = None
+        try:
+            dest_info = driver._attach_volume(self.context, dest_vol,
+                                              properties)
+            src_info = driver._attach_volume(self.context, src_vol,
+                                             properties)
+            volume_utils.copy_volume(src_info['device']['path'],
+                                     dest_info['device']['path'], size * 1024,
+                                     self.configuration.volume_dd_blocksize)
+        finally:
+            if dest_info:
+                driver._detach_volume(self.context, dest_info,
+                                      dest_vol, properties)
+            if src_info:
+                driver._detach_volume(self.context, src_info,
+                                      src_vol, properties)
+        self.command.discard_zero_page(dest_ldev)
+
+    def copy_data(self, pvol, size, p_is_vvol, method):
+        type = 'Normal'
+        is_vvol = method == 'THIN'
+        svol = self._create_volume(size, is_vvol=is_vvol)
+        try:
+            if p_is_vvol:
+                self.copy_sync_data(pvol, svol, size)
+            else:
+                if is_vvol:
+                    type = 'V-VOL'
+                self.copy_async_data(pvol, svol, is_vvol)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                try:
+                    self.delete_ldev(svol, is_vvol)
+                except Exception as ex:
+                    msg = basic_lib.set_msg(
+                        313, ldev=svol, reason=six.text_type(ex))
+                    LOG.warning(msg)
+
+        return six.text_type(svol), type
+
+    def add_lun(self, command, hostgroups, ldev, is_once=False):
+        lock = basic_lib.get_process_lock(self.storage_lock_file)
+        with lock:
+            self.command.comm_add_lun(command, hostgroups, ldev, is_once)
+
+    def create_ldev(self, size, ldev_range, pool_id, is_vvol):
+        LOG.debug('create start (normal)')
+        for i in basic_lib.DEFAULT_TRY_RANGE:
+            LOG.debug('Try number: %(tries)s / %(max_tries)s' %
+                      {'tries': i + 1,
+                       'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)})
+            new_ldev = self._get_unused_volume_num(ldev_range)
+            try:
+                self._add_ldev(new_ldev, size, pool_id, is_vvol)
+            except exception.HBSDNotFound:
+                msg = basic_lib.set_msg(312, resource='LDEV')
+                LOG.warning(msg)
+                continue
+            else:
+                break
+        else:
+            msg = basic_lib.output_err(636)
+            raise exception.HBSDError(message=msg)
+        LOG.debug('create end (normal: %s)' % six.text_type(new_ldev))
+        self.init_volinfo(self.volume_info, new_ldev)
+        return new_ldev
+
+    def _create_volume(self, size, is_vvol=False):
+        ldev_range = self.configuration.hitachi_ldev_range
+        if not ldev_range:
+            ldev_range = DEFAULT_LDEV_RANGE
+        pool_id = self.configuration.hitachi_pool_id
+
+        lock = basic_lib.get_process_lock(self.storage_lock_file)
+        with nested(self.storage_obj_lock, lock):
+            ldev = self.create_ldev(size, ldev_range, pool_id, is_vvol)
+        return ldev
+
+    def create_volume(self, volume):
+        volume_metadata = self.get_volume_metadata(volume['id'])
+        volume_metadata['type'] = 'Normal'
+
+        size = volume['size']
+        ldev = self._create_volume(size)
+        volume_metadata['ldev'] = six.text_type(ldev)
+
+        return {'provider_location': six.text_type(ldev),
+                'metadata': volume_metadata}
+
+    def delete_ldev(self, ldev, is_vvol):
+        LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)'
+                  % {'ldev': ldev, 'vvol': is_vvol})
+        with self.pair_flock:
+            self.delete_pair(ldev)
+        self.command.comm_delete_ldev(ldev, is_vvol)
+        with self.volinfo_lock:
+            if ldev in self.volume_info:
+                self.volume_info.pop(ldev)
+        LOG.debug('delete_ldev is finished '
+                  '(LDEV: %(ldev)d, is_vvol: %(vvol)s)'
+                  % {'ldev': ldev, 'vvol': is_vvol})
+
+    def delete_volume(self, volume):
+        ldev = self.get_ldev(volume)
+        if ldev is None:
+            msg = basic_lib.set_msg(
+                304, method='delete_volume', id=volume['id'])
+            LOG.warning(msg)
+            return
+        self.add_volinfo(ldev, volume['id'])
+        if not self.volume_info[ldev]['in_use'].lock.acquire(False):
+            desc = self.volume_info[ldev]['in_use'].desc
+            basic_lib.output_err(660, desc=desc)
+            raise exception.VolumeIsBusy(volume_name=volume['name'])
+        try:
+            is_vvol = self.get_volume_is_vvol(volume)
+            try:
+                self.delete_ldev(ldev, is_vvol)
+            except exception.HBSDNotFound:
+                with self.volinfo_lock:
+                    if ldev in self.volume_info:
+                        self.volume_info.pop(ldev)
+                msg = basic_lib.set_msg(
+                    305, type='volume', id=volume['id'])
+                LOG.warning(msg)
+            except exception.HBSDBusy:
+                raise exception.VolumeIsBusy(volume_name=volume['name'])
+        finally:
+            if ldev in self.volume_info:
+                self.volume_info[ldev]['in_use'].lock.release()
+
+    def check_volume_status(self, volume, is_vvol):
+        if not is_vvol:
+            status = VALID_DP_VOLUME_STATUS
+        else:
+            status = VALID_V_VOLUME_STATUS
+        if volume['status'] not in status:
+            msg = basic_lib.output_err(654, status=volume['status'])
+            raise exception.HBSDError(message=msg)
+
+    def create_snapshot(self, snapshot):
+        src_ref = self.get_volume(snapshot['volume_id'])
+        pvol = self.get_ldev(src_ref)
+        if pvol is None:
+            msg = basic_lib.output_err(624, type='volume', id=src_ref['id'])
+            raise exception.HBSDError(message=msg)
+
+        self.add_volinfo(pvol, src_ref['id'])
+        with self.volume_info[pvol]['in_use']:
+            is_vvol = self.get_volume_is_vvol(src_ref)
+            self.check_volume_status(src_ref, is_vvol)
+            size = snapshot['volume_size']
+            snap_metadata = self.get_snapshot_metadata(snapshot['id'])
+            method = None if is_vvol else self.get_copy_method(src_ref)
+
+            svol, type = self.copy_data(pvol, size, is_vvol, method)
+
+        if type == 'V-VOL':
+            snap_metadata['type'] = type
+            snap_metadata['ldev'] = svol
+
+        snapshot_metadata = api._metadata_refs(snap_metadata,
+                                               models.SnapshotMetadata)
+        return {'provider_location': svol,
+                'snapshot_metadata': snapshot_metadata}
+
+    def delete_snapshot(self, snapshot):
+        ldev = self.get_ldev(snapshot)
+        if ldev is None:
+            msg = basic_lib.set_msg(
+                304, method='delete_snapshot', id=snapshot['id'])
+            LOG.warning(msg)
+            return
+        self.add_volinfo(ldev, id=snapshot['id'], type='snapshot')
+        if not self.volume_info[ldev]['in_use'].lock.acquire(False):
+            desc = self.volume_info[ldev]['in_use'].desc
+            basic_lib.output_err(660, desc=desc)
+            raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
+        try:
+            is_vvol = self.get_snapshot_is_vvol(snapshot)
+            try:
+                self.delete_ldev(ldev, is_vvol)
+            except exception.HBSDNotFound:
+                with self.volinfo_lock:
+                    if ldev in self.volume_info:
+                        self.volume_info.pop(ldev)
+                msg = basic_lib.set_msg(
+                    305, type='snapshot', id=snapshot['id'])
+                LOG.warning(msg)
+            except exception.HBSDBusy:
+                raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
+        finally:
+            if ldev in self.volume_info:
+                self.volume_info[ldev]['in_use'].lock.release()
+
+    def create_cloned_volume(self, volume, src_vref):
+        pvol = self.get_ldev(src_vref)
+        if pvol is None:
+            msg = basic_lib.output_err(624, type='volume', id=src_vref['id'])
+            raise exception.HBSDError(message=msg)
+
+        self.add_volinfo(pvol, src_vref['id'])
+        with self.volume_info[pvol]['in_use']:
+            is_vvol = self.get_volume_is_vvol(src_vref)
+            self.check_volume_status(self.get_volume(src_vref['id']), is_vvol)
+            size = volume['size']
+            src_size = src_vref['size']
+            if size != src_size:
+                msg = basic_lib.output_err(617, type='volume',
+                                           volume_id=volume['id'])
+                raise exception.HBSDError(message=msg)
+
+            metadata = self.get_volume_metadata(volume['id'])
+            method = None if is_vvol else self.get_copy_method(volume)
+
+            svol, type = self.copy_data(pvol, size, is_vvol, method)
+
+            metadata['type'] = type
+            metadata['volume'] = src_vref['id']
+            metadata['ldev'] = svol
+
+        return {'provider_location': svol, 'metadata': metadata}
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        pvol = self.get_ldev(snapshot)
+        if pvol is None:
+            msg = basic_lib.output_err(624, type='snapshot', id=snapshot['id'])
+            raise exception.HBSDError(message=msg)
+
+        self.add_volinfo(pvol, id=snapshot['id'], type='snapshot')
+        with self.volume_info[pvol]['in_use']:
+            is_vvol = self.get_snapshot_is_vvol(snapshot)
+            if snapshot['status'] != 'available':
+                msg = basic_lib.output_err(655, status=snapshot['status'])
+                raise exception.HBSDError(message=msg)
+
+            size = volume['size']
+            src_size = snapshot['volume_size']
+            if size != src_size:
+                msg = basic_lib.output_err(617, type='snapshot',
+                                           volume_id=volume['id'])
+                raise exception.HBSDError(message=msg)
+
+            metadata = self.get_volume_metadata(volume['id'])
+            method = None if is_vvol else self.get_copy_method(volume)
+            svol, type = self.copy_data(pvol, size, is_vvol, method)
+
+            metadata['type'] = type
+            metadata['snapshot'] = snapshot['id']
+            metadata['ldev'] = svol
+
+        return {'provider_location': svol, 'metadata': metadata}
+
+    def _extend_volume(self, ldev, old_size, new_size):
+        with self.pair_flock:
+            self.delete_pair(ldev)
+        self.command.comm_extend_ldev(ldev, old_size, new_size)
+
+    def extend_volume(self, volume, new_size):
+        pvol = self.get_ldev(volume)
+        self.add_volinfo(pvol, volume['id'])
+        with self.volume_info[pvol]['in_use']:
+            if self.get_volume_is_vvol(volume):
+                msg = basic_lib.output_err(618, volume_id=volume['id'])
+                raise exception.HBSDError(message=msg)
+            self._extend_volume(pvol, volume['size'], new_size)
+
+    def output_backend_available_once(self):
+        if self.output_first:
+            self.output_first = False
+            msg = basic_lib.set_msg(
+                3, config_group=self.configuration.config_group)
+            LOG.warning(msg)
+
+    def update_volume_stats(self, storage_protocol):
+        data = {}
+        total_gb = None
+        free_gb = None
+        data['volume_backend_name'] = self.configuration.safe_get(
+            'volume_backend_name') or 'HBSD%s' % storage_protocol
+        data['vendor_name'] = 'Hitachi'
+        data['driver_version'] = VERSION
+        data['storage_protocol'] = storage_protocol
+
+        try:
+            total_gb, free_gb = self.command.comm_get_dp_pool(
+                self.configuration.hitachi_pool_id)
+        except Exception as ex:
+            LOG.error(_('Failed to update volume status: %s') %
+                      six.text_type(ex))
+            return None
+
+        data['total_capacity_gb'] = total_gb
+        data['free_capacity_gb'] = free_gb
+        data['reserved_percentage'] = self.configuration.safe_get(
+            'reserved_percentage')
+        data['QoS_support'] = False
+
+        LOG.debug('Updating volume status (%s)' % data)
+
+        return data
+
+    def init_volinfo(self, vol_info, ldev):
+        vol_info[ldev] = {'in_use': TryLock(), 'lock': threading.Lock()}
diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py
new file mode 100644 (file)
index 0000000..c461e0f
--- /dev/null
@@ -0,0 +1,521 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Fibre channel Cinder volume driver for Hitachi storage.
+
+"""
+
+from contextlib import nested
+import os
+import threading
+
+from oslo.config import cfg
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import excutils
+from cinder.openstack.common import log as logging
+from cinder import utils
+import cinder.volume.driver
+from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
+from cinder.volume.drivers.hitachi import hbsd_common as common
+from cinder.zonemanager import utils as fczm_utils
+
+LOG = logging.getLogger(__name__)
+
+volume_opts = [
+    cfg.BoolOpt('hitachi_zoning_request',
+                default=False,
+                help='Request for FC Zone creating HostGroup'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
+
+
+class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
+    VERSION = common.VERSION
+
+    def __init__(self, *args, **kwargs):
+        os.environ['LANG'] = 'C'
+        super(HBSDFCDriver, self).__init__(*args, **kwargs)
+        self.db = kwargs.get('db')
+        self.common = None
+        self.configuration.append_config_values(common.volume_opts)
+        self._stats = {}
+        self.context = None
+        self.max_hostgroups = None
+        self.pair_hostgroups = []
+        self.pair_hostnum = 0
+        self.do_setup_status = threading.Event()
+
+    def _check_param(self):
+        self.configuration.append_config_values(volume_opts)
+        for opt in volume_opts:
+            getattr(self.configuration, opt.name)
+
+    def check_param(self):
+        try:
+            self.common.check_param()
+            self._check_param()
+        except exception.HBSDError:
+            raise
+        except Exception as ex:
+            msg = basic_lib.output_err(601, param=six.text_type(ex))
+            raise exception.HBSDError(message=msg)
+
+    def output_param_to_log(self):
+        lock = basic_lib.get_process_lock(self.common.system_lock_file)
+
+        with lock:
+            self.common.output_param_to_log('FC')
+            for opt in volume_opts:
+                if not opt.secret:
+                    value = getattr(self.configuration, opt.name)
+                    LOG.info('\t%-35s%s' %
+                             (opt.name + ': ', six.text_type(value)))
+            self.common.command.output_param_to_log(self.configuration)
+
+    def _add_wwn(self, hgs, port, gid, wwns):
+        for wwn in wwns:
+            wwn = six.text_type(wwn)
+            self.common.command.comm_add_hbawwn(port, gid, wwn)
+            detected = self.common.command.is_detected(port, wwn)
+            hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
+                        'detected': detected})
+        LOG.debug('Create host group for %s' % hgs)
+
+    def _add_lun(self, hostgroups, ldev):
+        if hostgroups is self.pair_hostgroups:
+            is_once = True
+        else:
+            is_once = False
+        self.common.add_lun('auhgmap', hostgroups, ldev, is_once)
+
+    def _delete_lun(self, hostgroups, ldev):
+        try:
+            self.common.command.comm_delete_lun(hostgroups, ldev)
+        except exception.HBSDNotFound:
+            msg = basic_lib.set_msg(301, ldev=ldev)
+            LOG.warning(msg)
+
+    def _get_hgname_gid(self, port, host_grp_name):
+        return self.common.command.get_hgname_gid(port, host_grp_name)
+
+    def _get_unused_gid(self, port):
+        group_range = self.configuration.hitachi_group_range
+        if not group_range:
+            group_range = basic_lib.DEFAULT_GROUP_RANGE
+        return self.common.command.get_unused_gid(group_range, port)
+
+    def _get_hostgroup_info(self, hgs, wwns, login=True):
+        target_ports = self.configuration.hitachi_target_ports
+        return self.common.command.comm_get_hostgroup_info(
+            hgs, wwns, target_ports, login=login)
+
+    def _fill_group(self, hgs, port, host_grp_name, wwns):
+        added_hostgroup = False
+        LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
+                  'name: %(name)s wwns: %(wwns)s)'
+                  % {'hgs': hgs, 'port': port,
+                     'name': host_grp_name, 'wwns': wwns})
+        gid = self._get_hgname_gid(port, host_grp_name)
+        if gid is None:
+            for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
+                try:
+                    gid = self._get_unused_gid(port)
+                    self._add_hostgroup(port, gid, host_grp_name)
+                    added_hostgroup = True
+                except exception.HBSDNotFound:
+                    gid = None
+                    msg = basic_lib.set_msg(312, resource='GID')
+                    LOG.warning(msg)
+                    continue
+                else:
+                    LOG.debug('Completed to add host target'
+                              '(port: %(port)s gid: %(gid)d)'
+                              % {'port': port, 'gid': gid})
+                    break
+            else:
+                msg = basic_lib.output_err(641)
+                raise exception.HBSDError(message=msg)
+
+        try:
+            if wwns:
+                self._add_wwn(hgs, port, gid, wwns)
+            else:
+                hgs.append({'port': port, 'gid': gid, 'initiator_wwn': None,
+                            'detected': True})
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                if added_hostgroup:
+                    self._delete_hostgroup(port, gid, host_grp_name)
+
+    def add_hostgroup_master(self, hgs, master_wwns, host_ip, security_ports):
+        target_ports = self.configuration.hitachi_target_ports
+        group_request = self.configuration.hitachi_group_request
+        wwns = []
+        for wwn in master_wwns:
+            wwns.append(wwn.lower())
+        if target_ports and group_request:
+            host_grp_name = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
+            for port in security_ports:
+                wwns_copy = wwns[:]
+                for hostgroup in hgs:
+                    if (hostgroup['port'] == port and
+                            hostgroup['initiator_wwn'].lower() in wwns_copy):
+                        wwns_copy.remove(hostgroup['initiator_wwn'].lower())
+                if wwns_copy:
+                    try:
+                        self._fill_group(hgs, port, host_grp_name, wwns_copy)
+                    except Exception as ex:
+                        LOG.warning(_('Failed to add host group: %s') %
+                                    six.text_type(ex))
+                        msg = basic_lib.set_msg(
+                            308, port=port, name=host_grp_name)
+                        LOG.warning(msg)
+
+        if not hgs:
+            msg = basic_lib.output_err(649)
+            raise exception.HBSDError(message=msg)
+
+    def add_hostgroup_pair(self, pair_hostgroups):
+        if self.configuration.hitachi_unit_name:
+            return
+
+        properties = utils.brick_get_connector_properties()
+        if 'wwpns' not in properties:
+            msg = basic_lib.output_err(650, resource='HBA')
+            raise exception.HBSDError(message=msg)
+        hostgroups = []
+        self._get_hostgroup_info(hostgroups, properties['wwpns'],
+                                 login=False)
+        host_grp_name = '%spair%02x' % (basic_lib.NAME_PREFIX,
+                                        self.pair_hostnum)
+        for hostgroup in hostgroups:
+            gid = self._get_hgname_gid(hostgroup['port'],
+                                       host_grp_name)
+
+            # When 'gid' is 0, it should be true.
+            # So, it cannot remove 'is not None'.
+            if gid is not None:
+                pair_hostgroups.append({'port': hostgroup['port'],
+                                        'gid': gid, 'initiator_wwn': None,
+                                        'detected': True})
+                break
+
+        if not pair_hostgroups:
+            for hostgroup in hostgroups:
+                pair_port = hostgroup['port']
+                try:
+                    self._fill_group(pair_hostgroups, pair_port,
+                                     host_grp_name, None)
+                except Exception:
+                    if hostgroup is hostgroups[-1]:
+                        raise
+                else:
+                    break
+
+    def add_hostgroup(self):
+        properties = utils.brick_get_connector_properties()
+        if 'wwpns' not in properties:
+            msg = basic_lib.output_err(650, resource='HBA')
+            raise exception.HBSDError(message=msg)
+        LOG.debug("wwpns: %s" % properties['wwpns'])
+
+        hostgroups = []
+        security_ports = self._get_hostgroup_info(
+            hostgroups, properties['wwpns'], login=False)
+        self.add_hostgroup_master(hostgroups, properties['wwpns'],
+                                  properties['ip'], security_ports)
+        self.add_hostgroup_pair(self.pair_hostgroups)
+
+    def _get_target_wwn(self, port):
+        target_wwns = self.common.command.comm_set_target_wwns(
+            self.configuration.hitachi_target_ports)
+        return target_wwns[port]
+
+    def _add_hostgroup(self, port, gid, host_grp_name):
+        self.common.command.comm_add_hostgrp(port, gid, host_grp_name)
+
+    def _delete_hostgroup(self, port, gid, host_grp_name):
+        try:
+            self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                msg = basic_lib.set_msg(
+                    306, port=port, gid=gid, name=host_grp_name)
+                LOG.warning(msg)
+
+    def _check_volume_mapping(self, hostgroup):
+        port = hostgroup['port']
+        gid = hostgroup['gid']
+        if self.common.command.get_hostgroup_luns(port, gid):
+            return True
+        else:
+            return False
+
+    def _build_initiator_target_map(self, hostgroups, terminate=False):
+        target_wwns = []
+        init_targ_map = {}
+
+        target_ports = self.configuration.hitachi_target_ports
+        zoning_request = self.configuration.hitachi_zoning_request
+
+        for hostgroup in hostgroups:
+            target_wwn = self._get_target_wwn(hostgroup['port'])
+
+            if target_wwn not in target_wwns:
+                target_wwns.append(target_wwn)
+
+            if target_ports and zoning_request:
+                if terminate and self._check_volume_mapping(hostgroup):
+                    continue
+
+                initiator_wwn = hostgroup['initiator_wwn']
+                if initiator_wwn not in init_targ_map:
+                    init_targ_map[initiator_wwn] = []
+
+                init_targ_map[initiator_wwn].append(target_wwn)
+
+        return target_wwns, init_targ_map
+
+    def _get_properties(self, volume, hostgroups, terminate=False):
+        properties = {}
+
+        target_wwns, init_targ_map = self._build_initiator_target_map(
+            hostgroups, terminate)
+
+        properties['target_wwn'] = target_wwns
+
+        if init_targ_map:
+            properties['initiator_target_map'] = init_targ_map
+
+        if not terminate:
+            properties['target_lun'] = hostgroups[0]['lun']
+
+        return properties
+
+    def do_setup(self, context):
+        self.context = context
+        self.common = common.HBSDCommon(self.configuration, self,
+                                        context, self.db)
+
+        self.check_param()
+
+        self.common.create_lock_file()
+
+        self.common.command.connect_storage()
+        self.max_hostgroups = self.common.command.get_max_hostgroups()
+
+        lock = basic_lib.get_process_lock(self.common.service_lock_file)
+        with lock:
+            self.add_hostgroup()
+
+        self.output_param_to_log()
+        self.do_setup_status.set()
+
+    def check_for_setup_error(self):
+        pass
+
+    def extend_volume(self, volume, new_size):
+        self.do_setup_status.wait()
+        self.common.extend_volume(volume, new_size)
+
+    def get_volume_stats(self, refresh=False):
+        if refresh:
+            if self.do_setup_status.isSet():
+                self.common.output_backend_available_once()
+                _stats = self.common.update_volume_stats("FC")
+                if _stats:
+                    self._stats = _stats
+        return self._stats
+
+    def create_volume(self, volume):
+        self.do_setup_status.wait()
+        metadata = self.common.create_volume(volume)
+        return metadata
+
+    def delete_volume(self, volume):
+        self.do_setup_status.wait()
+        self.common.delete_volume(volume)
+
+    def create_snapshot(self, snapshot):
+        self.do_setup_status.wait()
+        metadata = self.common.create_snapshot(snapshot)
+        return metadata
+
+    def delete_snapshot(self, snapshot):
+        self.do_setup_status.wait()
+        self.common.delete_snapshot(snapshot)
+
+    def create_cloned_volume(self, volume, src_vref):
+        self.do_setup_status.wait()
+        metadata = self.common.create_cloned_volume(volume, src_vref)
+        return metadata
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        self.do_setup_status.wait()
+        metadata = self.common.create_volume_from_snapshot(volume, snapshot)
+        return metadata
+
+    def _initialize_connection(self, ldev, connector, src_hgs=None):
+        LOG.debug("Call _initialize_connection "
+                  "(config_group: %(group)s ldev: %(ldev)d)"
+                  % {'group': self.configuration.config_group, 'ldev': ldev})
+        if src_hgs is self.pair_hostgroups:
+            hostgroups = src_hgs
+        else:
+            hostgroups = []
+            security_ports = self._get_hostgroup_info(
+                hostgroups, connector['wwpns'], login=True)
+            self.add_hostgroup_master(hostgroups, connector['wwpns'],
+                                      connector['ip'], security_ports)
+
+        if src_hgs is self.pair_hostgroups:
+            try:
+                self._add_lun(hostgroups, ldev)
+            except exception.HBSDNotFound:
+                msg = basic_lib.set_msg(311, ldev=ldev)
+                LOG.warning(msg)
+                for i in range(self.max_hostgroups + 1):
+                    self.pair_hostnum += 1
+                    pair_hostgroups = []
+                    try:
+                        self.add_hostgroup_pair(pair_hostgroups)
+                        self.pair_hostgroups.extend(pair_hostgroups)
+                    except exception.HBSDNotFound:
+                        if i >= self.max_hostgroups:
+                            msg = basic_lib.output_err(648, resource='GID')
+                            raise exception.HBSDError(message=msg)
+                    else:
+                        break
+                self.pair_initialize_connection(ldev)
+        else:
+            self._add_lun(hostgroups, ldev)
+
+        return hostgroups
+
+    @fczm_utils.AddFCZone
+    def initialize_connection(self, volume, connector):
+        self.do_setup_status.wait()
+        ldev = self.common.get_ldev(volume)
+        if ldev is None:
+            msg = basic_lib.output_err(619, volume_id=volume['id'])
+            raise exception.HBSDError(message=msg)
+        self.common.add_volinfo(ldev, volume['id'])
+        with nested(self.common.volume_info[ldev]['lock'],
+                    self.common.volume_info[ldev]['in_use']):
+            hostgroups = self._initialize_connection(ldev, connector)
+            properties = self._get_properties(volume, hostgroups)
+            LOG.debug('Initialize volume_info: %s'
+                      % self.common.volume_info)
+
+        LOG.debug('HFCDrv: properties=%s' % properties)
+        return {
+            'driver_volume_type': 'fibre_channel',
+            'data': properties
+        }
+
+    def _terminate_connection(self, ldev, connector, src_hgs):
+        LOG.debug("Call _terminate_connection(config_group: %s)"
+                  % self.configuration.config_group)
+        hostgroups = src_hgs[:]
+        self._delete_lun(hostgroups, ldev)
+        LOG.debug("*** _terminate_ ***")
+
+    @fczm_utils.RemoveFCZone
+    def terminate_connection(self, volume, connector, **kwargs):
+        self.do_setup_status.wait()
+        ldev = self.common.get_ldev(volume)
+        if ldev is None:
+            msg = basic_lib.set_msg(302, volume_id=volume['id'])
+            LOG.warning(msg)
+            return
+
+        if 'wwpns' not in connector:
+            msg = basic_lib.output_err(650, resource='HBA')
+            raise exception.HBSDError(message=msg)
+
+        hostgroups = []
+        self._get_hostgroup_info(hostgroups,
+                                 connector['wwpns'], login=False)
+        if not hostgroups:
+            msg = basic_lib.output_err(649)
+            raise exception.HBSDError(message=msg)
+
+        self.common.add_volinfo(ldev, volume['id'])
+        with nested(self.common.volume_info[ldev]['lock'],
+                    self.common.volume_info[ldev]['in_use']):
+            self._terminate_connection(ldev, connector, hostgroups)
+            properties = self._get_properties(volume, hostgroups,
+                                              terminate=True)
+            LOG.debug('Terminate volume_info: %s' % self.common.volume_info)
+
+        return {
+            'driver_volume_type': 'fibre_channel',
+            'data': properties
+        }
+
+    def pair_initialize_connection(self, ldev):
+        if self.configuration.hitachi_unit_name:
+            return
+        self._initialize_connection(ldev, None, self.pair_hostgroups)
+
+    def pair_terminate_connection(self, ldev):
+        if self.configuration.hitachi_unit_name:
+            return
+        self._terminate_connection(ldev, None, self.pair_hostgroups)
+
+    def discard_zero_page(self, volume):
+        self.common.command.discard_zero_page(self.common.get_ldev(volume))
+
+    def create_export(self, context, volume):
+        pass
+
+    def ensure_export(self, context, volume):
+        pass
+
+    def remove_export(self, context, volume):
+        pass
+
+    def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
+        self.do_setup_status.wait()
+        super(HBSDFCDriver, self).copy_volume_data(context, src_vol,
+                                                   dest_vol, remote)
+        self.discard_zero_page(dest_vol)
+
+    def copy_image_to_volume(self, context, volume, image_service, image_id):
+        self.do_setup_status.wait()
+        super(HBSDFCDriver, self).copy_image_to_volume(context, volume,
+                                                       image_service,
+                                                       image_id)
+        self.discard_zero_page(volume)
+
+    def copy_volume_to_image(self, context, volume, image_service, image_meta):
+        self.do_setup_status.wait()
+        if (volume['instance_uuid'] or volume['attached_host']):
+            desc = 'volume %s' % volume['id']
+            msg = basic_lib.output_err(660, desc=desc)
+            raise exception.HBSDError(message=msg)
+        super(HBSDFCDriver, self).copy_volume_to_image(context, volume,
+                                                       image_service,
+                                                       image_meta)
+
+    def restore_backup(self, context, backup, volume, backup_service):
+        self.do_setup_status.wait()
+        super(HBSDFCDriver, self).restore_backup(context, backup,
+                                                 volume, backup_service)
+        self.discard_zero_page(volume)
diff --git a/cinder/volume/drivers/hitachi/hbsd_horcm.py b/cinder/volume/drivers/hitachi/hbsd_horcm.py
new file mode 100644 (file)
index 0000000..58ff10f
--- /dev/null
@@ -0,0 +1,1509 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from contextlib import nested
+from functools import wraps
+import os
+import re
+import shlex
+import threading
+import time
+
+from oslo.config import cfg
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import excutils
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder.openstack.common import processutils as putils
+from cinder import utils
+from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
+
+GETSTORAGEARRAY_ONCE = 100
+MAX_SNAPSHOT_COUNT = 1021
+SNAP_LAST_PATH_SSB = '0xB958,0x020A'
+HOST_IO_SSB = '0xB958,0x0233'
+INVALID_LUN_SSB = '0x2E20,0x0000'
+INTERCEPT_LDEV_SSB = '0x2E22,0x0001'
+HOSTGROUP_INSTALLED = '0xB956,0x3173'
+
+LDEV_STATUS_WAITTIME = 120
+LUN_DELETE_WAITTIME = basic_lib.DEFAULT_PROCESS_WAITTIME
+LUN_DELETE_INTERVAL = 3
+EXEC_MAX_WAITTIME = 30
+EXEC_RETRY_INTERVAL = 5
+HORCM_WAITTIME = 1
+
+RAIDCOM_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_'
+HORCMGR_LOCK_FILE = basic_lib.LOCK_DIR + 'horcmgr_'
+RESOURCE_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_resource_'
+
+STATUS_TABLE = {
+    'SMPL': basic_lib.SMPL,
+    'COPY': basic_lib.COPY,
+    'RCPY': basic_lib.COPY,
+    'PAIR': basic_lib.PAIR,
+    'PFUL': basic_lib.PAIR,
+    'PSUS': basic_lib.PSUS,
+    'PFUS': basic_lib.PSUS,
+    'SSUS': basic_lib.PSUS,
+    'PSUE': basic_lib.PSUE,
+}
+NOT_SET = '-'
+HORCM_RUNNING = 1
+COPY_GROUP = basic_lib.NAME_PREFIX + '%s%s%03X%d'
+SNAP_NAME = basic_lib.NAME_PREFIX + 'snap'
+LDEV_NAME = basic_lib.NAME_PREFIX + 'ldev-%d-%d'
+MAX_MUNS = 3
+
+EX_ENAUTH = 202
+EX_ENOOBJ = 205
+EX_CMDRJE = 221
+EX_CMDIOE = 237
+EX_INVCMD = 240
+EX_INVMOD = 241
+EX_ENODEV = 246
+EX_ENOENT = 247
+EX_OPTINV = 248
+EX_ATTDBG = 250
+EX_ATTHOR = 251
+EX_COMERR = 255
+EX_UNKOWN = -1
+
+NO_SUCH_DEVICE = (EX_ENODEV, EX_ENOENT)
+
+COMMAND_IO_TO_RAID = (EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV)
+
+HORCM_ERROR = (EX_ATTDBG, EX_ATTHOR, EX_COMERR)
+
+MAX_HOSTGROUPS = 254
+MAX_HLUN = 2047
+
+DEFAULT_PORT_BASE = 31000
+
+LOG = logging.getLogger(__name__)
+
+volume_opts = [
+    cfg.StrOpt('hitachi_horcm_numbers',
+               default='200,201',
+               help='Instance numbers for HORCM'),
+    cfg.StrOpt('hitachi_horcm_user',
+               default=None,
+               help='Username of storage system for HORCM'),
+    cfg.StrOpt('hitachi_horcm_password',
+               default=None,
+               help='Password of storage system for HORCM'),
+    cfg.BoolOpt('hitachi_horcm_add_conf',
+                default=True,
+                help='Add to HORCM configuration'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
+
+
+def horcm_synchronized(function):
+    @wraps(function)
+    def wrapper(*args, **kargs):
+        if len(args) == 1:
+            inst = args[0].conf.hitachi_horcm_numbers[0]
+            raidcom_obj_lock = args[0].raidcom_lock
+        else:
+            inst = args[1]
+            raidcom_obj_lock = args[0].raidcom_pair_lock
+        raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
+        lock = basic_lib.get_process_lock(raidcom_lock_file)
+        with nested(raidcom_obj_lock, lock):
+            return function(*args, **kargs)
+    return wrapper
+
+
+def storage_synchronized(function):
+    @wraps(function)
+    def wrapper(*args, **kargs):
+        serial = args[0].conf.hitachi_serial_number
+        resource_lock = args[0].resource_lock
+        resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
+        lock = basic_lib.get_process_lock(resource_lock_file)
+        with nested(resource_lock, lock):
+            return function(*args, **kargs)
+    return wrapper
+
+
+class HBSDHORCM(basic_lib.HBSDBasicLib):
+
+    def __init__(self, conf):
+        super(HBSDHORCM, self).__init__(conf=conf)
+
+        self.copy_groups = [None] * MAX_MUNS
+        self.raidcom_lock = threading.Lock()
+        self.raidcom_pair_lock = threading.Lock()
+        self.horcmgr_lock = threading.Lock()
+        self.horcmgr_flock = None
+        self.resource_lock = threading.Lock()
+
+    def check_param(self):
+        numbers = self.conf.hitachi_horcm_numbers.split(',')
+        if len(numbers) != 2:
+            msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
+            raise exception.HBSDError(message=msg)
+        for i in numbers:
+            if not i.isdigit():
+                msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
+                raise exception.HBSDError(message=msg)
+        self.conf.hitachi_horcm_numbers = map(int, numbers)
+        inst = self.conf.hitachi_horcm_numbers[0]
+        pair_inst = self.conf.hitachi_horcm_numbers[1]
+        if inst == pair_inst:
+            msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
+            raise exception.HBSDError(message=msg)
+        for param in ('hitachi_horcm_user', 'hitachi_horcm_password'):
+            if not getattr(self.conf, param):
+                msg = basic_lib.output_err(601, param=param)
+                raise exception.HBSDError(message=msg)
+        if self.conf.hitachi_thin_pool_id == self.conf.hitachi_pool_id:
+            msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
+            raise exception.HBSDError(message=msg)
+        for opt in volume_opts:
+            getattr(self.conf, opt.name)
+
+    def set_copy_groups(self, host_ip):
+        serial = self.conf.hitachi_serial_number
+        inst = self.conf.hitachi_horcm_numbers[1]
+
+        for mun in range(MAX_MUNS):
+            copy_group = COPY_GROUP % (host_ip, serial, inst, mun)
+            self.copy_groups[mun] = copy_group
+
+    def set_pair_flock(self):
+        inst = self.conf.hitachi_horcm_numbers[1]
+        name = '%s%d' % (HORCMGR_LOCK_FILE, inst)
+        self.horcmgr_flock = basic_lib.FileLock(name, self.horcmgr_lock)
+        return self.horcmgr_flock
+
+    def check_horcm(self, inst):
+        args = 'HORCMINST=%d horcmgr -check' % inst
+        ret, _stdout, _stderr = self.exec_command('env', args=args,
+                                                  printflag=False)
+        return ret
+
+    def shutdown_horcm(self, inst):
+        ret, stdout, stderr = self.exec_command(
+            'horcmshutdown.sh', args=six.text_type(inst), printflag=False)
+        return ret
+
+    def start_horcm(self, inst):
+        return self.exec_command('horcmstart.sh', args=six.text_type(inst),
+                                 printflag=False)
+
+    def _wait_for_horcm_shutdown(self, inst):
+        if self.check_horcm(inst) != HORCM_RUNNING:
+            raise loopingcall.LoopingCallDone()
+
+        if self.shutdown_horcm(inst):
+            LOG.error(_("Failed to shutdown horcm."))
+            raise loopingcall.LoopingCallDone()
+
+    @horcm_synchronized
+    def restart_horcm(self, inst=None):
+        if inst is None:
+            inst = self.conf.hitachi_horcm_numbers[0]
+
+        loop = loopingcall.FixedIntervalLoopingCall(
+            self._wait_for_horcm_shutdown, inst)
+
+        loop.start(interval=HORCM_WAITTIME).wait()
+
+        ret, stdout, stderr = self.start_horcm(inst)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='horcmstart.sh', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def restart_pair_horcm(self):
+        inst = self.conf.hitachi_horcm_numbers[1]
+        self.restart_horcm(inst=inst)
+
+    def setup_horcmgr(self, host_ip):
+        pair_inst = self.conf.hitachi_horcm_numbers[1]
+        self.set_copy_groups(host_ip)
+        if self.conf.hitachi_horcm_add_conf:
+            self.create_horcmconf()
+            self.create_horcmconf(inst=pair_inst)
+        self.restart_horcm()
+        with self.horcmgr_flock:
+            self.restart_pair_horcm()
+        ret, stdout, stderr = self.comm_login()
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='raidcom -login', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def _wait_for_exec_horcm(self, cmd, args, printflag, start):
+        if cmd == 'raidcom':
+            serial = self.conf.hitachi_serial_number
+            inst = self.conf.hitachi_horcm_numbers[0]
+            raidcom_obj_lock = self.raidcom_lock
+            args = '%s -s %s -I%d' % (args, serial, inst)
+        else:
+            inst = self.conf.hitachi_horcm_numbers[1]
+            raidcom_obj_lock = self.raidcom_pair_lock
+            args = '%s -ISI%d' % (args, inst)
+        user = self.conf.hitachi_horcm_user
+        passwd = self.conf.hitachi_horcm_password
+        raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
+        lock = basic_lib.get_process_lock(raidcom_lock_file)
+
+        with nested(raidcom_obj_lock, lock):
+            ret, stdout, stderr = self.exec_command(cmd, args=args,
+                                                    printflag=printflag)
+
+        if not ret or ret <= 127:
+            raise loopingcall.LoopingCallDone((ret, stdout, stderr))
+
+        if time.time() - start >= EXEC_MAX_WAITTIME:
+            LOG.error(_("horcm command timeout."))
+            raise loopingcall.LoopingCallDone((ret, stdout, stderr))
+
+        if (ret == EX_ENAUTH and
+                not re.search("-login %s %s" % (user, passwd), args)):
+            _ret, _stdout, _stderr = self.comm_login()
+            if _ret:
+                LOG.error(_("Failed to authenticate user."))
+                raise loopingcall.LoopingCallDone((ret, stdout, stderr))
+
+        elif ret in HORCM_ERROR:
+            _ret = 0
+            with nested(raidcom_obj_lock, lock):
+                if self.check_horcm(inst) != HORCM_RUNNING:
+                    _ret, _stdout, _stderr = self.start_horcm(inst)
+            if _ret and _ret != HORCM_RUNNING:
+                LOG.error(_("Failed to start horcm."))
+                raise loopingcall.LoopingCallDone((ret, stdout, stderr))
+
+        elif ret not in COMMAND_IO_TO_RAID:
+            LOG.error(_("Unexpected error occurs in horcm."))
+            raise loopingcall.LoopingCallDone((ret, stdout, stderr))
+
+    def exec_raidcom(self, cmd, args, printflag=True):
+        loop = loopingcall.FixedIntervalLoopingCall(
+            self._wait_for_exec_horcm, cmd, args, printflag, time.time())
+
+        return loop.start(interval=EXEC_RETRY_INTERVAL).wait()
+
+    def comm_login(self):
+        rmi_user = self.conf.hitachi_horcm_user
+        rmi_pass = self.conf.hitachi_horcm_password
+        args = '-login %s %s' % (rmi_user, rmi_pass)
+        return self.exec_raidcom('raidcom', args, printflag=False)
+
+    def comm_lock(self):
+        ret, _stdout, stderr = self.exec_raidcom('raidcom', 'lock resource')
+        if ret:
+            msg = basic_lib.output_err(
+                603, serial=self.conf.hitachi_serial_number,
+                inst=self.conf.hitachi_horcm_numbers[0], ret=ret, err=stderr)
+            raise exception.HBSDError(message=msg)
+
+    def comm_unlock(self):
+        self.exec_raidcom('raidcom', 'unlock resource')
+
+    def comm_reset_status(self):
+        self.exec_raidcom('raidcom', 'reset command_status')
+
+    def comm_get_status(self):
+        return self.exec_raidcom('raidcom', 'get command_status')
+
+    def get_command_error(self, stdout):
+        lines = stdout.splitlines()
+        line = shlex.split(lines[1])
+        return int(line[3])
+
+    def comm_get_ldev(self, ldev):
+        opt = 'get ldev -ldev_id %s' % ldev
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return stdout
+
+    def add_used_hlun(self, port, gid, used_list):
+        opt = 'get lun -port %s-%d' % (port, gid)
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            lun = int(shlex.split(line)[3])
+            if lun not in used_list:
+                used_list.append(lun)
+
+    def get_unused_ldev(self, ldev_range):
+        start = ldev_range[0]
+        end = ldev_range[1]
+
+        while start < end:
+            if end - start + 1 > GETSTORAGEARRAY_ONCE:
+                cnt = GETSTORAGEARRAY_ONCE
+            else:
+                cnt = end - start + 1
+            opt = 'get ldev -ldev_id %d -cnt %d' % (start, cnt)
+            ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                    printflag=False)
+            if ret:
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+            lines = stdout.splitlines()
+            ldev_num = None
+            for line in lines:
+                if re.match("LDEV :", line):
+                    ldev_num = int(shlex.split(line)[2])
+                    continue
+                if re.match("VOL_TYPE : NOT DEFINED", line):
+                    return ldev_num
+
+            start += GETSTORAGEARRAY_ONCE
+        else:
+            msg = basic_lib.output_err(648, resource='LDEV')
+            raise exception.HBSDError(message=msg)
+
+    def get_hgname_gid(self, port, host_grp_name):
+        opt = 'get host_grp -port %s -key host_grp' % port
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            line = shlex.split(line)
+            if line[2] == host_grp_name:
+                return int(line[1])
+        return None
+
+    def get_unused_gid(self, range, port):
+        _min = range[0]
+        _max = range[1]
+        opt = 'get host_grp -port %s -key host_grp' % port
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        free_gid = None
+        for line in lines[_min + 1:]:
+            line = shlex.split(line)
+            if int(line[1]) > _max:
+                break
+            if line[2] == '-':
+                free_gid = int(line[1])
+                break
+        if free_gid is None:
+            msg = basic_lib.output_err(648, resource='GID')
+            raise exception.HBSDError(message=msg)
+        return free_gid
+
+    def comm_set_target_wwns(self, target_ports):
+        opt = 'get port'
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        target_wwns = {}
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            line = shlex.split(line)
+            port = line[0][:5]
+            if target_ports and port not in target_ports:
+                continue
+
+            target_wwns[port] = line[10]
+        LOG.debug('target wwns: %s' % target_wwns)
+        return target_wwns
+
+    def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected):
+        opt = 'get host_grp -port %s' % port
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        found_wwns = 0
+        for line in lines[1:]:
+            line = shlex.split(line)
+            if not re.match(basic_lib.NAME_PREFIX, line[2]):
+                continue
+            gid = line[1]
+            opt = 'get hba_wwn -port %s-%s' % (port, gid)
+            ret, stdout, stderr = self.exec_raidcom(
+                'raidcom', opt, printflag=False)
+            if ret:
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+            lines = stdout.splitlines()
+            for line in lines[1:]:
+                hba_info = shlex.split(line)
+
+                if hba_info[3] in wwns:
+                    hostgroups.append({'port': six.text_type(port),
+                                       'gid': int(hba_info[1]),
+                                       'initiator_wwn': hba_info[3],
+                                       'detected': is_detected})
+                    found_wwns += 1
+                if len(wwns) == found_wwns:
+                    break
+
+            if len(wwns) == found_wwns:
+                break
+
+    def comm_chk_login_wwn(self, wwns, port):
+        opt = 'get port -port %s' % port
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            login_info = shlex.split(line)
+            if login_info[1] in wwns:
+                return True
+        else:
+            return False
+
+    def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True):
+        security_ports = []
+        hostgroups = []
+
+        opt = 'get port'
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+
+        for line in lines[1:]:
+            line = shlex.split(line)
+            port = line[0][:5]
+            if target_ports and port not in target_ports:
+                continue
+            security = True if line[7] == 'Y' else False
+
+            is_detected = None
+            if login:
+                is_detected = self.comm_chk_login_wwn(wwns, port)
+
+            if security:
+                self.comm_get_hbawwn(hostgroups, wwns, port, is_detected)
+                security_ports.append(port)
+
+        for hostgroup in hostgroups:
+            hgs.append(hostgroup)
+
+        return security_ports
+
+    def _get_lun(self, port, gid, ldev):
+        lun = None
+
+        opt = 'get lun -port %s-%d' % (port, gid)
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            line = shlex.split(line)
+            if line[5] == six.text_type(ldev):
+                lun = int(line[3])
+                break
+
+        return lun
+
+    def _wait_for_delete_lun(self, hostgroup, ldev, start):
+        opt = 'delete lun -port %s-%d -ldev_id %d' % (hostgroup['port'],
+                                                      hostgroup['gid'], ldev)
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
+        if not ret:
+            raise loopingcall.LoopingCallDone()
+
+        if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and
+                not self.comm_get_snapshot(ldev) or
+                re.search('SSB=%s' % HOST_IO_SSB, stderr)):
+            msg = basic_lib.set_msg(310, ldev=ldev, reason=stderr)
+            LOG.warning(msg)
+
+            if time.time() - start >= LUN_DELETE_WAITTIME:
+                msg = basic_lib.output_err(
+                    637, method='_wait_for_delete_lun',
+                    timeout=LUN_DELETE_WAITTIME)
+                raise exception.HBSDError(message=msg)
+        else:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_delete_lun_core(self, hostgroup, ldev):
+        loop = loopingcall.FixedIntervalLoopingCall(
+            self._wait_for_delete_lun, hostgroup, ldev, time.time())
+
+        loop.start(interval=LUN_DELETE_INTERVAL).wait()
+
+    @storage_synchronized
+    def comm_delete_lun(self, hostgroups, ldev):
+        try:
+            deleted_hostgroups = []
+            self.comm_lock()
+            no_ldev_cnt = 0
+            for hostgroup in hostgroups:
+                port = hostgroup['port']
+                gid = hostgroup['gid']
+                is_deleted = False
+                for deleted in deleted_hostgroups:
+                    if port == deleted['port'] and gid == deleted['gid']:
+                        is_deleted = True
+                if is_deleted:
+                    continue
+                try:
+                    self.comm_delete_lun_core(hostgroup, ldev)
+                except exception.HBSDCmdError as ex:
+                    no_ldev_cnt += 1
+                    if ex.ret == EX_ENOOBJ:
+                        if no_ldev_cnt != len(hostgroups):
+                            continue
+                        raise exception.HBSDNotFound
+                    else:
+                        raise
+                deleted_hostgroups.append({'port': port, 'gid': gid})
+        finally:
+            self.comm_unlock()
+
+    def _check_ldev_status(self, ldev, status):
+        opt = ('get ldev -ldev_id %s -check_status %s -time %s' %
+               (ldev, status, LDEV_STATUS_WAITTIME))
+        ret, _stdout, _stderr = self.exec_raidcom('raidcom', opt)
+        return ret
+
+    @storage_synchronized
+    def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol):
+        emulation = 'OPEN-V'
+        if is_vvol:
+            opt = ('add ldev -pool snap -ldev_id %d '
+                   '-capacity %dG -emulation %s'
+                   % (ldev, capacity, emulation))
+        else:
+            opt = ('add ldev -pool %d -ldev_id %d '
+                   '-capacity %dG -emulation %s'
+                   % (pool_id, ldev, capacity, emulation))
+
+        try:
+            self.comm_lock()
+            self.comm_reset_status()
+            ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
+            if ret:
+                if re.search('SSB=%s' % INTERCEPT_LDEV_SSB, stderr):
+                    raise exception.HBSDNotFound
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+            if self._check_ldev_status(ldev, "NML"):
+                msg = basic_lib.output_err(653, ldev=ldev)
+                raise exception.HBSDError(message=msg)
+        finally:
+            self.comm_unlock()
+
+    @storage_synchronized
+    def comm_add_hostgrp(self, port, gid, host_grp_name):
+        opt = 'add host_grp -port %s-%d -host_grp_name %s' % (port, gid,
+                                                              host_grp_name)
+        try:
+            self.comm_lock()
+            ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
+            if ret:
+                if re.search('SSB=%s' % HOSTGROUP_INSTALLED, stderr):
+                    raise exception.HBSDNotFound
+                else:
+                    opt = 'raidcom %s' % opt
+                    msg = basic_lib.output_err(
+                        600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                    raise exception.HBSDCmdError(
+                        message=msg, ret=ret, err=stderr)
+        finally:
+            self.comm_unlock()
+
+    @storage_synchronized
+    def comm_del_hostgrp(self, port, gid, host_grp_name):
+        opt = 'delete host_grp -port %s-%d %s' % (port, gid, host_grp_name)
+        try:
+            self.comm_lock()
+            ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
+            if ret:
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        finally:
+            self.comm_unlock()
+
+    @storage_synchronized
+    def comm_add_hbawwn(self, port, gid, wwn):
+        opt = 'add hba_wwn -port %s-%s -hba_wwn %s' % (port, gid, wwn)
+        try:
+            self.comm_lock()
+            ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
+            if ret:
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        finally:
+            self.comm_unlock()
+
+    @storage_synchronized
+    def comm_add_lun(self, unused_command, hostgroups, ldev, is_once=False):
+        tmp_hostgroups = hostgroups[:]
+        is_ok = False
+        used_list = []
+        lun = None
+        old_lun = None
+
+        for hostgroup in hostgroups:
+            port = hostgroup['port']
+            gid = hostgroup['gid']
+            self.add_used_hlun(port, gid, used_list)
+            lun = self._get_lun(port, gid, ldev)
+
+            # When 'lun' or 'old_lun' is 0, it should be true.
+            # So, it cannot remove 'is not None'.
+            if lun is not None:
+                if old_lun is not None and old_lun != lun:
+                    msg = basic_lib.output_err(648, resource='LUN (HLUN)')
+                    raise exception.HBSDError(message=msg)
+                is_ok = True
+                hostgroup['lun'] = lun
+                tmp_hostgroups.remove(hostgroup)
+                old_lun = lun
+
+            if is_once:
+                # When 'lun' is 0, it should be true.
+                # So, it cannot remove 'is not None'.
+                if lun is not None:
+                    return
+                elif len(used_list) < MAX_HLUN + 1:
+                    break
+                else:
+                    tmp_hostgroups.remove(hostgroup)
+                    if tmp_hostgroups:
+                        used_list = []
+
+        if not used_list:
+            lun = 0
+        elif lun is None:
+            for i in range(MAX_HLUN + 1):
+                if i not in used_list:
+                    lun = i
+                    break
+            else:
+                raise exception.HBSDNotFound
+
+        opt = None
+        ret = 0
+        stdout = None
+        stderr = None
+        invalid_hgs_str = None
+
+        try:
+            self.comm_lock()
+            for hostgroup in tmp_hostgroups:
+                port = hostgroup['port']
+                gid = hostgroup['gid']
+                if not hostgroup['detected']:
+                    if invalid_hgs_str:
+                        invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str,
+                                                         port, gid)
+                    else:
+                        invalid_hgs_str = '%s:%d' % (port, gid)
+                    continue
+                opt = 'add lun -port %s-%d -ldev_id %d -lun_id %d' % (
+                    port, gid, ldev, lun)
+                ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
+                if not ret:
+                    is_ok = True
+                    hostgroup['lun'] = lun
+                    if is_once:
+                        break
+                else:
+                    msg = basic_lib.set_msg(
+                        314, ldev=ldev, lun=lun, port=port, id=gid)
+                    LOG.warning(msg)
+
+        finally:
+            self.comm_unlock()
+
+        if not is_ok:
+            if stderr:
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+            else:
+                msg = basic_lib.output_err(659, gid=invalid_hgs_str)
+                raise exception.HBSDError(message=msg)
+
+    @storage_synchronized
+    def comm_delete_ldev(self, ldev, is_vvol):
+        ret = -1
+        stdout = ""
+        stderr = ""
+        try:
+            self.comm_lock()
+            self.comm_reset_status()
+            opt = 'delete ldev -ldev_id %d' % ldev
+            ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
+            if ret:
+                if re.search('SSB=%s' % INVALID_LUN_SSB, stderr):
+                    raise exception.HBSDNotFound
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+            ret, stdout, stderr = self.comm_get_status()
+            if ret or self.get_command_error(stdout):
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        finally:
+            self.comm_unlock()
+
+    @storage_synchronized
+    def comm_extend_ldev(self, ldev, old_size, new_size):
+        ret = -1
+        stdout = ""
+        stderr = ""
+        extend_size = new_size - old_size
+        try:
+            self.comm_lock()
+            self.comm_reset_status()
+            opt = 'extend ldev -ldev_id %d -capacity %dG' % (ldev, extend_size)
+            ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
+            if ret:
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+            ret, stdout, stderr = self.comm_get_status()
+            if ret or self.get_command_error(stdout):
+                opt = 'raidcom %s' % opt
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        finally:
+            self.comm_unlock()
+
+    def comm_get_dp_pool(self, pool_id):
+        opt = 'get dp_pool'
+        ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % opt
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            if int(shlex.split(line)[0]) == pool_id:
+                free_gb = int(shlex.split(line)[3]) / 1024
+                total_gb = int(shlex.split(line)[4]) / 1024
+                return total_gb, free_gb
+
+        msg = basic_lib.output_err(640, pool_id=pool_id)
+        raise exception.HBSDError(message=msg)
+
+    @storage_synchronized
+    def comm_modify_ldev(self, ldev):
+        args = 'modify ldev -ldev_id %d -status discard_zero_page' % ldev
+        try:
+            self.comm_lock()
+            ret, stdout, stderr = self.exec_raidcom('raidcom', args)
+            if ret:
+                msg = basic_lib.set_msg(315, ldev=ldev, reason=stderr)
+                LOG.warning(msg)
+        finally:
+            self.comm_unlock()
+
+    def is_detected(self, port, wwn):
+        return self.comm_chk_login_wwn([wwn], port)
+
+    def discard_zero_page(self, ldev):
+        try:
+            self.comm_modify_ldev(ldev)
+        except Exception as e:
+            LOG.warning(_('Failed to discard zero page: %s') %
+                        six.text_type(e))
+
+    @storage_synchronized
+    def comm_add_snapshot(self, pvol, svol):
+        pool = self.conf.hitachi_thin_pool_id
+        copy_size = self.conf.hitachi_copy_speed
+        args = ('add snapshot -ldev_id %d %d -pool %d '
+                '-snapshot_name %s -copy_size %d'
+                % (pvol, svol, pool, SNAP_NAME, copy_size))
+        try:
+            self.comm_lock()
+            ret, stdout, stderr = self.exec_raidcom('raidcom', args)
+            if ret:
+                opt = 'raidcom %s' % args
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        finally:
+            self.comm_unlock()
+
+    @storage_synchronized
+    def comm_delete_snapshot(self, ldev):
+        args = 'delete snapshot -ldev_id %d' % ldev
+        try:
+            self.comm_lock()
+            ret, stdout, stderr = self.exec_raidcom('raidcom', args)
+            if ret:
+                opt = 'raidcom %s' % args
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        finally:
+            self.comm_unlock()
+
+    @storage_synchronized
+    def comm_modify_snapshot(self, ldev, op):
+        args = ('modify snapshot -ldev_id %d -snapshot_data %s' % (ldev, op))
+        try:
+            self.comm_lock()
+            ret, stdout, stderr = self.exec_raidcom('raidcom', args)
+            if ret:
+                opt = 'raidcom %s' % args
+                msg = basic_lib.output_err(
+                    600, cmd=opt, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        finally:
+            self.comm_unlock()
+
+    def _wait_for_snap_status(self, pvol, svol, status, timeout, start):
+        if (self.get_snap_pvol_status(pvol, svol) in status and
+                self.get_snap_svol_status(svol) in status):
+            raise loopingcall.LoopingCallDone()
+
+        if time.time() - start >= timeout:
+            msg = basic_lib.output_err(
+                637, method='_wait_for_snap_status', timuout=timeout)
+            raise exception.HBSDError(message=msg)
+
+    def wait_snap(self, pvol, svol, status, timeout, interval):
+        loop = loopingcall.FixedIntervalLoopingCall(
+            self._wait_for_snap_status, pvol,
+            svol, status, timeout, time.time())
+
+        loop.start(interval=interval).wait()
+
+    def comm_get_snapshot(self, ldev):
+        args = 'get snapshot -ldev_id %d' % ldev
+        ret, stdout, stderr = self.exec_raidcom('raidcom', args,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return stdout
+
+    def check_snap_count(self, ldev):
+        stdout = self.comm_get_snapshot(ldev)
+        if not stdout:
+            return
+        lines = stdout.splitlines()
+        if len(lines) >= MAX_SNAPSHOT_COUNT + 1:
+            msg = basic_lib.output_err(
+                615, copy_method=basic_lib.THIN, pvol=ldev)
+            raise exception.HBSDBusy(message=msg)
+
+    def get_snap_pvol_status(self, pvol, svol):
+        stdout = self.comm_get_snapshot(pvol)
+        if not stdout:
+            return basic_lib.SMPL
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            line = shlex.split(line)
+            if int(line[6]) == svol:
+                return STATUS_TABLE[line[2]]
+        else:
+            return basic_lib.SMPL
+
+    def get_snap_svol_status(self, ldev):
+        stdout = self.comm_get_snapshot(ldev)
+        if not stdout:
+            return basic_lib.SMPL
+        lines = stdout.splitlines()
+        line = shlex.split(lines[1])
+        return STATUS_TABLE[line[2]]
+
+    @horcm_synchronized
+    def create_horcmconf(self, inst=None):
+        if inst is None:
+            inst = self.conf.hitachi_horcm_numbers[0]
+
+        serial = self.conf.hitachi_serial_number
+        filename = '/etc/horcm%d.conf' % inst
+
+        port = DEFAULT_PORT_BASE + inst
+
+        found = False
+
+        if not os.path.exists(filename):
+            file_str = """
+HORCM_MON
+#ip_address        service         poll(10ms)     timeout(10ms)
+127.0.0.1 %16d               6000              3000
+HORCM_CMD
+""" % port
+        else:
+            file_str = utils.read_file_as_root(filename)
+
+            lines = file_str.splitlines()
+            for line in lines:
+                if re.match(r'\\\\.\\CMD-%s:/dev/sd' % serial, line):
+                    found = True
+                    break
+
+        if not found:
+            insert_str = r'\\\\.\\CMD-%s:/dev/sd' % serial
+            file_str = re.sub(r'(\n\bHORCM_CMD.*|^\bHORCM_CMD.*)',
+                              r'\1\n%s\n' % insert_str, file_str)
+
+            try:
+                utils.execute('tee', filename, process_input=file_str,
+                              run_as_root=True)
+            except putils.ProcessExecutionError as ex:
+                msg = basic_lib.output_err(
+                    632, file=filename, ret=ex.exit_code, err=ex.stderr)
+                raise exception.HBSDError(message=msg)
+
+    def comm_get_copy_grp(self):
+        ret, stdout, stderr = self.exec_raidcom('raidcom', 'get copy_grp',
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom get copy_grp'
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return stdout
+
+    def comm_add_copy_grp(self, copy_group, pvol_group, svol_group, mun):
+        args = ('add copy_grp -copy_grp_name %s %s %s -mirror_id %d'
+                % (copy_group, pvol_group, svol_group, mun))
+        ret, stdout, stderr = self.exec_raidcom('raidcom', args,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_delete_copy_grp(self, copy_group):
+        args = 'delete copy_grp -copy_grp_name %s' % copy_group
+        ret, stdout, stderr = self.exec_raidcom('raidcom', args,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_get_device_grp(self, group_name):
+        args = 'get device_grp -device_grp_name %s' % group_name
+        ret, stdout, stderr = self.exec_raidcom('raidcom', args,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return stdout
+
+    def comm_add_device_grp(self, group_name, ldev_name, ldev):
+        args = ('add device_grp -device_grp_name %s %s -ldev_id %d'
+                % (group_name, ldev_name, ldev))
+        ret, stdout, stderr = self.exec_raidcom('raidcom', args,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_delete_device_grp(self, group_name, ldev):
+        args = ('delete device_grp -device_grp_name %s -ldev_id %d'
+                % (group_name, ldev))
+        ret, stdout, stderr = self.exec_raidcom('raidcom', args,
+                                                printflag=False)
+        if ret:
+            opt = 'raidcom %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_paircreate(self, copy_group, ldev_name):
+        args = ('-g %s -d %s -split -fq quick -c %d -vl'
+                % (copy_group, ldev_name, self.conf.hitachi_copy_speed))
+        ret, stdout, stderr = self.exec_raidcom('paircreate', args)
+        if ret:
+            opt = 'paircreate %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_pairsplit(self, copy_group, ldev_name):
+        args = '-g %s -d %s -S' % (copy_group, ldev_name)
+        ret, stdout, stderr = self.exec_raidcom('pairsplit', args)
+        if ret:
+            opt = 'pairsplit %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_pairevtwait(self, copy_group, ldev_name, check_svol):
+        if not check_svol:
+            option = '-nowait'
+        else:
+            option = '-nowaits'
+        args = '-g %s -d %s %s' % (copy_group, ldev_name, option)
+        ret, stdout, stderr = self.exec_raidcom('pairevtwait', args,
+                                                printflag=False)
+        if ret > 127:
+            opt = 'pairevtwait %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return ret
+
+    def comm_pairdisplay(self, copy_group, ldev_name=None):
+        if not ldev_name:
+            args = '-g %s -CLI' % copy_group
+        else:
+            args = '-g %s -d %s -CLI' % (copy_group, ldev_name)
+        ret, stdout, stderr = self.exec_raidcom('pairdisplay', args,
+                                                printflag=False)
+        if ret and ret not in NO_SUCH_DEVICE:
+            opt = 'pairdisplay %s' % args
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return ret, stdout, stderr
+
+    def check_copy_grp(self, copy_group):
+        stdout = self.comm_get_copy_grp()
+        lines = stdout.splitlines()
+        count = 0
+        for line in lines[1:]:
+            line = shlex.split(line)
+            if line[0] == copy_group:
+                count += 1
+                if count == 2:
+                    break
+        return count
+
+    def check_device_grp(self, group_name, ldev, ldev_name=None):
+        stdout = self.comm_get_device_grp(group_name)
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            line = shlex.split(line)
+            if int(line[2]) == ldev:
+                if not ldev_name:
+                    return True
+                else:
+                    return line[1] == ldev_name
+        else:
+            return False
+
+    def is_smpl(self, copy_group, ldev_name):
+        ret, stdout, stderr = self.comm_pairdisplay(copy_group,
+                                                    ldev_name=ldev_name)
+        if not stdout:
+            return True
+
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            line = shlex.split(line)
+            if line[9] in [NOT_SET, 'SMPL']:
+                return True
+        else:
+            return False
+
+    def get_copy_groups(self):
+        copy_groups = []
+        stdout = self.comm_get_copy_grp()
+        lines = stdout.splitlines()
+        for line in lines[1:]:
+            line = shlex.split(line)
+            if line[0] in self.copy_groups and line[0] not in copy_groups:
+                copy_groups.append(line[0])
+        return copy_groups
+
+    def get_matched_copy_group(self, pvol, svol, ldev_name):
+        for copy_group in self.get_copy_groups():
+            pvol_group = '%sP' % copy_group
+            if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name):
+                return copy_group
+        else:
+            return None
+
+    def get_paired_info(self, ldev, only_flag=False):
+        paired_info = {'pvol': None, 'svol': []}
+        pvol = None
+        is_svol = False
+
+        stdout = self.comm_get_snapshot(ldev)
+        if stdout:
+            lines = stdout.splitlines()
+            line = shlex.split(lines[1])
+            status = STATUS_TABLE.get(line[2], basic_lib.UNKN)
+
+            if line[1] == 'P-VOL':
+                pvol = ldev
+                svol = int(line[6])
+            else:
+                is_svol = True
+                pvol = int(line[6])
+                svol = ldev
+
+                if status == basic_lib.PSUS:
+                    status = self.get_snap_pvol_status(pvol, svol)
+
+            svol_info = {'lun': svol, 'status': status, 'is_vvol': True}
+            paired_info['svol'].append(svol_info)
+            paired_info['pvol'] = pvol
+
+        if only_flag or is_svol:
+            return paired_info
+
+        for copy_group in self.get_copy_groups():
+            ldev_name = None
+            pvol_status = basic_lib.UNKN
+            svol_status = basic_lib.UNKN
+
+            ret, stdout, stderr = self.comm_pairdisplay(copy_group)
+            if not stdout:
+                continue
+
+            lines = stdout.splitlines()
+            for line in lines[1:]:
+                line = shlex.split(line)
+                if line[9] not in ['P-VOL', 'S-VOL']:
+                    continue
+
+                ldev0 = int(line[8])
+                ldev1 = int(line[12])
+                if ldev not in [ldev0, ldev1]:
+                    continue
+
+                ldev_name = line[1]
+
+                if line[9] == 'P-VOL':
+                    pvol = ldev0
+                    svol = ldev1
+                    pvol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN)
+                else:
+                    svol = ldev0
+                    pvol = ldev1
+                    svol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN)
+
+                if svol == ldev:
+                    is_svol = True
+
+            if not ldev_name:
+                continue
+
+            pvol_group = '%sP' % copy_group
+            pvol_ok = self.check_device_grp(pvol_group, pvol,
+                                            ldev_name=ldev_name)
+
+            svol_group = '%sS' % copy_group
+            svol_ok = self.check_device_grp(svol_group, svol,
+                                            ldev_name=ldev_name)
+
+            if pvol_ok and svol_ok:
+                if pvol_status == basic_lib.PSUS:
+                    status = svol_status
+                else:
+                    status = pvol_status
+
+                svol_info = {'lun': svol, 'status': status, 'is_vvol': False}
+                paired_info['svol'].append(svol_info)
+
+                if is_svol:
+                    break
+
+        # When 'pvol' is 0, it should be true.
+        # So, it cannot remove 'is not None'.
+        if pvol is not None and paired_info['pvol'] is None:
+            paired_info['pvol'] = pvol
+
+        return paired_info
+
+    @storage_synchronized
+    def add_pair_config(self, pvol, svol, copy_group, ldev_name, mun):
+        pvol_group = '%sP' % copy_group
+        svol_group = '%sS' % copy_group
+        try:
+            self.comm_lock()
+            self.comm_add_device_grp(pvol_group, ldev_name, pvol)
+            self.comm_add_device_grp(svol_group, ldev_name, svol)
+            nr_copy_groups = self.check_copy_grp(copy_group)
+            if nr_copy_groups == 1:
+                self.comm_delete_copy_grp(copy_group)
+            if nr_copy_groups != 2:
+                self.comm_add_copy_grp(copy_group, pvol_group,
+                                       svol_group, mun)
+        finally:
+            self.comm_unlock()
+
+    @storage_synchronized
+    def delete_pair_config(self, pvol, svol, copy_group, ldev_name):
+        pvol_group = '%sP' % copy_group
+        svol_group = '%sS' % copy_group
+        try:
+            self.comm_lock()
+            if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name):
+                self.comm_delete_device_grp(pvol_group, pvol)
+            if self.check_device_grp(svol_group, svol, ldev_name=ldev_name):
+                self.comm_delete_device_grp(svol_group, svol)
+        finally:
+            self.comm_unlock()
+
+    def _wait_for_pair_status(self, copy_group, ldev_name,
+                              status, timeout, check_svol, start):
+        if self.comm_pairevtwait(copy_group, ldev_name,
+                                 check_svol) in status:
+            raise loopingcall.LoopingCallDone()
+
+        if time.time() - start >= timeout:
+            msg = basic_lib.output_err(
+                637, method='_wait_for_pair_status', timout=timeout)
+            raise exception.HBSDError(message=msg)
+
+    def wait_pair(self, copy_group, ldev_name, status, timeout,
+                  interval, check_svol=False):
+        loop = loopingcall.FixedIntervalLoopingCall(
+            self._wait_for_pair_status, copy_group, ldev_name,
+            status, timeout, check_svol, time.time())
+
+        loop.start(interval=interval).wait()
+
+    def comm_create_pair(self, pvol, svol, is_vvol):
+        timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
+        interval = self.conf.hitachi_copy_check_interval
+        if not is_vvol:
+            restart = False
+            create = False
+            ldev_name = LDEV_NAME % (pvol, svol)
+            mun = 0
+            for mun in range(MAX_MUNS):
+                copy_group = self.copy_groups[mun]
+                pvol_group = '%sP' % copy_group
+
+                if not self.check_device_grp(pvol_group, pvol):
+                    break
+            else:
+                msg = basic_lib.output_err(
+                    615, copy_method=basic_lib.FULL, pvol=pvol)
+                raise exception.HBSDBusy(message=msg)
+            try:
+                self.add_pair_config(pvol, svol, copy_group, ldev_name, mun)
+                self.restart_pair_horcm()
+                restart = True
+                self.comm_paircreate(copy_group, ldev_name)
+                create = True
+                self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS],
+                               timeout, interval)
+                self.wait_pair(copy_group, ldev_name,
+                               [basic_lib.PSUS, basic_lib.COPY],
+                               timeout, interval, check_svol=True)
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    if create:
+                        try:
+                            self.wait_pair(copy_group, ldev_name,
+                                           [basic_lib.PSUS], timeout,
+                                           interval)
+                            self.wait_pair(copy_group, ldev_name,
+                                           [basic_lib.PSUS], timeout,
+                                           interval, check_svol=True)
+                        except Exception as ex:
+                            LOG.warning(_('Failed to create pair: %s') %
+                                        six.text_type(ex))
+
+                        try:
+                            self.comm_pairsplit(copy_group, ldev_name)
+                            self.wait_pair(
+                                copy_group, ldev_name,
+                                [basic_lib.SMPL], timeout,
+                                self.conf.hitachi_async_copy_check_interval)
+                        except Exception as ex:
+                            LOG.warning(_('Failed to create pair: %s') %
+                                        six.text_type(ex))
+
+                    if self.is_smpl(copy_group, ldev_name):
+                        try:
+                            self.delete_pair_config(pvol, svol, copy_group,
+                                                    ldev_name)
+                        except Exception as ex:
+                            LOG.warning(_('Failed to create pair: %s') %
+                                        six.text_type(ex))
+
+                    if restart:
+                        try:
+                            self.restart_pair_horcm()
+                        except Exception as ex:
+                            LOG.warning(_('Failed to restart horcm: %s') %
+                                        six.text_type(ex))
+
+        else:
+            self.check_snap_count(pvol)
+            self.comm_add_snapshot(pvol, svol)
+
+            try:
+                self.wait_snap(pvol, svol, [basic_lib.PAIR], timeout, interval)
+                self.comm_modify_snapshot(svol, 'create')
+                self.wait_snap(pvol, svol, [basic_lib.PSUS], timeout, interval)
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    try:
+                        self.comm_delete_snapshot(svol)
+                        self.wait_snap(
+                            pvol, svol, [basic_lib.SMPL], timeout,
+                            self.conf.hitachi_async_copy_check_interval)
+                    except Exception as ex:
+                        LOG.warning(_('Failed to create pair: %s') %
+                                    six.text_type(ex))
+
+    def delete_pair(self, pvol, svol, is_vvol):
+        timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
+        interval = self.conf.hitachi_async_copy_check_interval
+        if not is_vvol:
+            ldev_name = LDEV_NAME % (pvol, svol)
+            copy_group = self.get_matched_copy_group(pvol, svol, ldev_name)
+            if not copy_group:
+                return
+            try:
+                self.comm_pairsplit(copy_group, ldev_name)
+                self.wait_pair(copy_group, ldev_name, [basic_lib.SMPL],
+                               timeout, interval)
+            finally:
+                if self.is_smpl(copy_group, ldev_name):
+                    self.delete_pair_config(pvol, svol, copy_group, ldev_name)
+        else:
+            self.comm_delete_snapshot(svol)
+            self.wait_snap(pvol, svol, [basic_lib.SMPL], timeout, interval)
+
+    def comm_raidqry(self):
+        ret, stdout, stderr = self.exec_command('raidqry', '-h')
+        if ret:
+            opt = 'raidqry -h'
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return stdout
+
+    def get_comm_version(self):
+        stdout = self.comm_raidqry()
+        lines = stdout.splitlines()
+        return shlex.split(lines[1])[1]
+
+    def output_param_to_log(self, conf):
+        for opt in volume_opts:
+            if not opt.secret:
+                value = getattr(conf, opt.name)
+                LOG.info('\t%-35s%s' % (opt.name + ': ',
+                         six.text_type(value)))
+
+    def create_lock_file(self):
+        inst = self.conf.hitachi_horcm_numbers[0]
+        pair_inst = self.conf.hitachi_horcm_numbers[1]
+        serial = self.conf.hitachi_serial_number
+        raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
+        raidcom_pair_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, pair_inst)
+        horcmgr_lock_file = '%s%d' % (HORCMGR_LOCK_FILE, pair_inst)
+        resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
+
+        basic_lib.create_empty_file(raidcom_lock_file)
+        basic_lib.create_empty_file(raidcom_pair_lock_file)
+        basic_lib.create_empty_file(horcmgr_lock_file)
+        basic_lib.create_empty_file(resource_lock_file)
+
+    def connect_storage(self):
+        properties = utils.brick_get_connector_properties()
+        self.setup_horcmgr(properties['ip'])
+
+    def get_max_hostgroups(self):
+        """return the maximum value of hostgroup id."""
+        return MAX_HOSTGROUPS
+
+    def get_hostgroup_luns(self, port, gid):
+        list = []
+        self.add_used_hlun(port, gid, list)
+
+        return list
diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py
new file mode 100644 (file)
index 0000000..8f2d3cc
--- /dev/null
@@ -0,0 +1,420 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+iSCSI Cinder volume driver for Hitachi storage.
+
+"""
+
+from contextlib import nested
+import os
+import threading
+
+from oslo.config import cfg
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder import utils
+import cinder.volume.driver
+from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
+from cinder.volume.drivers.hitachi import hbsd_common as common
+
+LOG = logging.getLogger(__name__)
+
+CHAP_METHOD = ('None', 'CHAP None', 'CHAP')
+
+volume_opts = [
+    cfg.BoolOpt('hitachi_add_chap_user',
+                default=False,
+                help='Add CHAP user'),
+    cfg.StrOpt('hitachi_auth_method',
+               default=None,
+               help='iSCSI authentication method'),
+    cfg.StrOpt('hitachi_auth_user',
+               default='%sCHAP-user' % basic_lib.NAME_PREFIX,
+               help='iSCSI authentication username'),
+    cfg.StrOpt('hitachi_auth_password',
+               default='%sCHAP-password' % basic_lib.NAME_PREFIX,
+               help='iSCSI authentication password'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
+
+
+class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
+    VERSION = common.VERSION
+
+    def __init__(self, *args, **kwargs):
+        os.environ['LANG'] = 'C'
+        super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
+        self.db = kwargs.get('db')
+        self.common = None
+        self.configuration.append_config_values(common.volume_opts)
+        self._stats = {}
+        self.context = None
+        self.do_setup_status = threading.Event()
+
+    def _check_param(self):
+        self.configuration.append_config_values(volume_opts)
+        if (self.configuration.hitachi_auth_method and
+                self.configuration.hitachi_auth_method not in CHAP_METHOD):
+            msg = basic_lib.output_err(601, param='hitachi_auth_method')
+            raise exception.HBSDError(message=msg)
+        if self.configuration.hitachi_auth_method == 'None':
+            self.configuration.hitachi_auth_method = None
+        for opt in volume_opts:
+            getattr(self.configuration, opt.name)
+
+    def check_param(self):
+        try:
+            self.common.check_param()
+            self._check_param()
+        except exception.HBSDError:
+            raise
+        except Exception as ex:
+            msg = basic_lib.output_err(601, param=six.text_type(ex))
+            raise exception.HBSDError(message=msg)
+
+    def output_param_to_log(self):
+        lock = basic_lib.get_process_lock(self.common.system_lock_file)
+
+        with lock:
+            self.common.output_param_to_log('iSCSI')
+            for opt in volume_opts:
+                if not opt.secret:
+                    value = getattr(self.configuration, opt.name)
+                    LOG.info('\t%-35s%s' % (opt.name + ': ',
+                             six.text_type(value)))
+
+    def _delete_lun_iscsi(self, hostgroups, ldev):
+        try:
+            self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
+        except exception.HBSDNotFound:
+            msg = basic_lib.set_msg(301, ldev=ldev)
+            LOG.warning(msg)
+
+    def _add_target(self, hostgroups, ldev):
+        self.common.add_lun('autargetmap', hostgroups, ldev)
+
+    def _add_initiator(self, hgs, port, gid, host_iqn):
+        self.common.command.comm_add_initiator(port, gid, host_iqn)
+        hgs.append({'port': port, 'gid': int(gid), 'detected': True})
+        LOG.debug("Create iSCSI target for %s" % hgs)
+
+    def _get_unused_gid_iscsi(self, port):
+        group_range = self.configuration.hitachi_group_range
+        if not group_range:
+            group_range = basic_lib.DEFAULT_GROUP_RANGE
+        return self.common.command.get_unused_gid_iscsi(group_range, port)
+
+    def _delete_iscsi_target(self, port, target_no, target_alias):
+        ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
+            port, target_no, target_alias)
+        if ret:
+            msg = basic_lib.set_msg(
+                307, port=port, tno=target_no, alias=target_alias)
+            LOG.warning(msg)
+
+    def _delete_chap_user(self, port):
+        ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
+        if ret:
+            msg = basic_lib.set_msg(
+                303, user=self.configuration.hitachi_auth_user)
+            LOG.warning(msg)
+
+    def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
+        return self.common.command.comm_get_hostgroup_info_iscsi(
+            hgs, host_iqn, self.configuration.hitachi_target_ports)
+
+    def _discovery_iscsi_target(self, hostgroups):
+        for hostgroup in hostgroups:
+            ip_addr, ip_port = self.common.command.comm_get_iscsi_ip(
+                hostgroup['port'])
+            target_iqn = self.common.command.comm_get_target_iqn(
+                hostgroup['port'], hostgroup['gid'])
+            hostgroup['ip_addr'] = ip_addr
+            hostgroup['ip_port'] = ip_port
+            hostgroup['target_iqn'] = target_iqn
+            LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s"
+                      % {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
+
+    def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
+        for port in ports:
+            added_hostgroup = False
+            added_user = False
+            LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
+                      'target_iqn: %(tiqn)s target_alias: %(alias)s '
+                      'add_iqn: %(aiqn)s)' %
+                      {'hgs': hgs, 'port': port, 'tiqn': target_iqn,
+                       'alias': target_alias, 'aiqn': add_iqn})
+            gid = self.common.command.get_gid_from_targetiqn(
+                target_iqn, target_alias, port)
+            if gid is None:
+                for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
+                    gid = None
+                    try:
+                        gid = self._get_unused_gid_iscsi(port)
+                        self.common.command.comm_add_hostgrp_iscsi(
+                            port, gid, target_alias, target_iqn)
+                        added_hostgroup = True
+                    except exception.HBSDNotFound:
+                        msg = basic_lib.set_msg(312, resource='GID')
+                        LOG.warning(msg)
+                        continue
+                    except Exception as ex:
+                        msg = basic_lib.set_msg(
+                            309, port=port, alias=target_alias,
+                            reason=six.text_type(ex))
+                        LOG.warning(msg)
+                        break
+                    else:
+                        LOG.debug('Completed to add target'
+                                  '(port: %(port)s gid: %(gid)d)'
+                                  % {'port': port, 'gid': gid})
+                        break
+            if gid is None:
+                LOG.error(_('Failed to add target(port: %s)') % port)
+                continue
+            try:
+                if added_hostgroup:
+                    if self.configuration.hitachi_auth_method:
+                        added_user = self.common.command.set_chap_authention(
+                            port, gid)
+                    self.common.command.comm_set_hostgrp_reportportal(
+                        port, target_alias)
+                self._add_initiator(hgs, port, gid, add_iqn)
+            except Exception as ex:
+                msg = basic_lib.set_msg(
+                    316, port=port, reason=six.text_type(ex))
+                LOG.warning(msg)
+                if added_hostgroup:
+                    if added_user:
+                        self._delete_chap_user(port)
+                    self._delete_iscsi_target(port, gid, target_alias)
+
+    def add_hostgroup_core(self, hgs, ports, target_iqn,
+                           target_alias, add_iqn):
+        if ports:
+            self._fill_groups(hgs, ports, target_iqn, target_alias, add_iqn)
+
+    def add_hostgroup_master(self, hgs, master_iqn, host_ip, security_ports):
+        target_ports = self.configuration.hitachi_target_ports
+        group_request = self.configuration.hitachi_group_request
+        target_alias = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
+        if target_ports and group_request:
+            target_iqn = '%s.target' % master_iqn
+
+            diff_ports = []
+            for port in security_ports:
+                for hostgroup in hgs:
+                    if hostgroup['port'] == port:
+                        break
+                else:
+                    diff_ports.append(port)
+
+            self.add_hostgroup_core(hgs, diff_ports, target_iqn,
+                                    target_alias, master_iqn)
+        if not hgs:
+            msg = basic_lib.output_err(649)
+            raise exception.HBSDError(message=msg)
+
+    def add_hostgroup(self):
+        properties = utils.brick_get_connector_properties()
+        if 'initiator' not in properties:
+            msg = basic_lib.output_err(650, resource='HBA')
+            raise exception.HBSDError(message=msg)
+        LOG.debug("initiator: %s" % properties['initiator'])
+        hostgroups = []
+        security_ports = self._get_hostgroup_info_iscsi(
+            hostgroups, properties['initiator'])
+        self.add_hostgroup_master(hostgroups, properties['initiator'],
+                                  properties['ip'], security_ports)
+
+    def _get_properties(self, volume, hostgroups):
+        conf = self.configuration
+        properties = {}
+        self._discovery_iscsi_target(hostgroups)
+        hostgroup = hostgroups[0]
+
+        properties['target_discovered'] = True
+        properties['target_portal'] = "%s:%s" % (hostgroup['ip_addr'],
+                                                 hostgroup['ip_port'])
+        properties['target_iqn'] = hostgroup['target_iqn']
+        properties['target_lun'] = hostgroup['lun']
+
+        if conf.hitachi_auth_method:
+            properties['auth_method'] = 'CHAP'
+            properties['auth_username'] = conf.hitachi_auth_user
+            properties['auth_password'] = conf.hitachi_auth_password
+
+        return properties
+
+    def do_setup(self, context):
+        self.context = context
+        self.common = common.HBSDCommon(self.configuration, self,
+                                        context, self.db)
+
+        self.check_param()
+
+        self.common.create_lock_file()
+
+        self.common.command.connect_storage()
+
+        lock = basic_lib.get_process_lock(self.common.service_lock_file)
+        with lock:
+            self.add_hostgroup()
+
+        self.output_param_to_log()
+        self.do_setup_status.set()
+
+    def check_for_setup_error(self):
+        pass
+
+    def extend_volume(self, volume, new_size):
+        self.do_setup_status.wait()
+        self.common.extend_volume(volume, new_size)
+
+    def get_volume_stats(self, refresh=False):
+        if refresh:
+            if self.do_setup_status.isSet():
+                self.common.output_backend_available_once()
+                _stats = self.common.update_volume_stats("iSCSI")
+                if _stats:
+                    self._stats = _stats
+        return self._stats
+
+    def create_volume(self, volume):
+        self.do_setup_status.wait()
+        metadata = self.common.create_volume(volume)
+        return metadata
+
+    def delete_volume(self, volume):
+        self.do_setup_status.wait()
+        self.common.delete_volume(volume)
+
+    def create_snapshot(self, snapshot):
+        self.do_setup_status.wait()
+        metadata = self.common.create_snapshot(snapshot)
+        return metadata
+
+    def delete_snapshot(self, snapshot):
+        self.do_setup_status.wait()
+        self.common.delete_snapshot(snapshot)
+
+    def create_cloned_volume(self, volume, src_vref):
+        self.do_setup_status.wait()
+        metadata = self.common.create_cloned_volume(volume, src_vref)
+        return metadata
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        self.do_setup_status.wait()
+        metadata = self.common.create_volume_from_snapshot(volume, snapshot)
+        return metadata
+
+    def _initialize_connection(self, ldev, connector, src_hgs=None):
+        LOG.debug("Call _initialize_connection "
+                  "(config_group: %(group)s ldev: %(ldev)d)"
+                  % {'group': self.configuration.config_group, 'ldev': ldev})
+        if src_hgs:
+            hostgroups = src_hgs[:]
+        else:
+            hostgroups = []
+            security_ports = self._get_hostgroup_info_iscsi(
+                hostgroups, connector['initiator'])
+            self.add_hostgroup_master(hostgroups, connector['initiator'],
+                                      connector['ip'], security_ports)
+
+        self._add_target(hostgroups, ldev)
+
+        return hostgroups
+
+    def initialize_connection(self, volume, connector):
+        self.do_setup_status.wait()
+        ldev = self.common.get_ldev(volume)
+        if ldev is None:
+            msg = basic_lib.output_err(619, volume_id=volume['id'])
+            raise exception.HBSDError(message=msg)
+        self.common.add_volinfo(ldev, volume['id'])
+        with nested(self.common.volume_info[ldev]['lock'],
+                    self.common.volume_info[ldev]['in_use']):
+            hostgroups = self._initialize_connection(ldev, connector)
+            protocol = 'iscsi'
+            properties = self._get_properties(volume, hostgroups)
+            LOG.debug('Initialize volume_info: %s'
+                      % self.common.volume_info)
+
+        LOG.debug('HFCDrv: properties=%s' % properties)
+        return {
+            'driver_volume_type': protocol,
+            'data': properties
+        }
+
+    def _terminate_connection(self, ldev, connector, src_hgs):
+        LOG.debug("Call _terminate_connection(config_group: %s)"
+                  % self.configuration.config_group)
+        hostgroups = src_hgs[:]
+        self._delete_lun_iscsi(hostgroups, ldev)
+
+        LOG.debug("*** _terminate_ ***")
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        self.do_setup_status.wait()
+        ldev = self.common.get_ldev(volume)
+        if ldev is None:
+            msg = basic_lib.set_msg(302, volume_id=volume['id'])
+            LOG.warning(msg)
+            return
+
+        if 'initiator' not in connector:
+            msg = basic_lib.output_err(650, resource='HBA')
+            raise exception.HBSDError(message=msg)
+
+        hostgroups = []
+        self._get_hostgroup_info_iscsi(hostgroups,
+                                       connector['initiator'])
+        if not hostgroups:
+            msg = basic_lib.output_err(649)
+            raise exception.HBSDError(message=msg)
+
+        self.common.add_volinfo(ldev, volume['id'])
+        with nested(self.common.volume_info[ldev]['lock'],
+                    self.common.volume_info[ldev]['in_use']):
+            self._terminate_connection(ldev, connector, hostgroups)
+
+    def create_export(self, context, volume):
+        pass
+
+    def ensure_export(self, context, volume):
+        pass
+
+    def remove_export(self, context, volume):
+        pass
+
+    def pair_initialize_connection(self, unused_ldev):
+        pass
+
+    def pair_terminate_connection(self, unused_ldev):
+        pass
+
+    def copy_volume_to_image(self, context, volume, image_service, image_meta):
+        self.do_setup_status.wait()
+        if (volume['instance_uuid'] or volume['attached_host']):
+            desc = 'volume %s' % volume['id']
+            msg = basic_lib.output_err(660, desc=desc)
+            raise exception.HBSDError(message=msg)
+        super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
+                                                          image_service,
+                                                          image_meta)
diff --git a/cinder/volume/drivers/hitachi/hbsd_snm2.py b/cinder/volume/drivers/hitachi/hbsd_snm2.py
new file mode 100644 (file)
index 0000000..4179182
--- /dev/null
@@ -0,0 +1,1086 @@
+# Copyright (C) 2014, Hitachi, Ltd.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from contextlib import nested
+import re
+import shlex
+import threading
+import time
+
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder import utils
+from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
+
+LOG = logging.getLogger(__name__)
+
+SNM2_ENV = ('LANG=C STONAVM_HOME=/usr/stonavm '
+            'LD_LIBRARY_PATH=/usr/stonavm/lib '
+            'STONAVM_RSP_PASS=on STONAVM_ACT=on')
+
+MAX_HOSTGROUPS = 127
+MAX_HOSTGROUPS_ISCSI = 254
+MAX_HLUN = 2047
+EXEC_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'hsnm_'
+EXEC_TIMEOUT = 10
+EXEC_INTERVAL = 1
+
+CHAP_TIMEOUT = 5
+PAIRED = 12
+DUMMY_LU = -1
+
+
+class HBSDSNM2(basic_lib.HBSDBasicLib):
+
+    def __init__(self, conf):
+        super(HBSDSNM2, self).__init__(conf=conf)
+
+        self.unit_name = conf.hitachi_unit_name
+        self.hsnm_lock = threading.Lock()
+        self.hsnm_lock_file = ('%s%s'
+                               % (EXEC_LOCK_PATH_BASE, self.unit_name))
+        copy_speed = conf.hitachi_copy_speed
+        if copy_speed <= 2:
+            self.pace = 'slow'
+        elif copy_speed == 3:
+            self.pace = 'normal'
+        else:
+            self.pace = 'prior'
+
+    def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start):
+        lock = basic_lib.get_process_lock(self.hsnm_lock_file)
+        with nested(self.hsnm_lock, lock):
+            ret, stdout, stderr = self.exec_command('env', args=args,
+                                                    printflag=printflag)
+
+        if not ret or noretry:
+            raise loopingcall.LoopingCallDone((ret, stdout, stderr))
+
+        if time.time() - start >= timeout:
+            LOG.error(_("snm2 command timeout."))
+            raise loopingcall.LoopingCallDone((ret, stdout, stderr))
+
+        if (re.search('DMEC002047', stderr)
+                or re.search('DMEC002048', stderr)
+                or re.search('DMED09000A', stderr)
+                or re.search('DMED090026', stderr)
+                or re.search('DMED0E002B', stderr)
+                or re.search('DMER03006A', stderr)
+                or re.search('DMER030080', stderr)
+                or re.search('DMER0300B8', stderr)
+                or re.search('DMER0800CF', stderr)
+                or re.search('DMER0800D[0-6D]', stderr)
+                or re.search('DMES052602', stderr)):
+            LOG.error(_("Unexpected error occurs in snm2."))
+            raise loopingcall.LoopingCallDone((ret, stdout, stderr))
+
+    def exec_hsnm(self, command, args, printflag=True, noretry=False,
+                  timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL):
+        args = '%s %s %s' % (SNM2_ENV, command, args)
+
+        loop = loopingcall.FixedIntervalLoopingCall(
+            self._wait_for_exec_hsnm, args, printflag,
+            noretry, timeout, time.time())
+
+        return loop.start(interval=interval).wait()
+
+    def get_comm_version(self):
+        ret, stdout, stderr = self.exec_hsnm('auman', '-help')
+        m = re.search('Version (\d+).(\d+)', stdout)
+        if not m:
+            msg = basic_lib.output_err(
+                600, cmd='auman', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return '%s.%s' % (m.group(1), m.group(2))
+
+    def add_used_hlun(self, command, port, gid, used_list, ldev):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm(command,
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd=command, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        lines = stdout.splitlines()
+        for line in lines[2:]:
+            line = shlex.split(line)
+            if not line:
+                continue
+            if line[0] == port and int(line[1][0:3]) == gid:
+                if int(line[2]) not in used_list:
+                    used_list.append(int(line[2]))
+                if int(line[3]) == ldev:
+                    hlu = int(line[2])
+                    LOG.warning(_('ldev(%(ldev)d) is already mapped '
+                                  '(hlun: %(hlu)d)')
+                                % {'ldev': ldev, 'hlu': hlu})
+                    return hlu
+        return None
+
+    def get_unused_ldev(self, ldev_range):
+        start = ldev_range[0]
+        end = ldev_range[1]
+        unit = self.unit_name
+
+        ret, stdout, stderr = self.exec_hsnm('auluref', '-unit %s' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='auluref', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        free_ldev = start
+        lines = stdout.splitlines()
+        found = False
+        for line in lines[2:]:
+            line = shlex.split(line)
+            if not line:
+                continue
+            ldev_num = int(line[0])
+            if free_ldev > ldev_num:
+                continue
+            if free_ldev == ldev_num:
+                free_ldev += 1
+            else:
+                found = True
+                break
+            if free_ldev > end:
+                break
+        else:
+            found = True
+
+        if not found:
+            msg = basic_lib.output_err(648, resource='LDEV')
+            raise exception.HBSDError(message=msg)
+
+        return free_ldev
+
+    def get_hgname_gid(self, port, host_grp_name):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm('auhgdef',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        lines = stdout.splitlines()
+        is_target_port = False
+        for line in lines:
+            line = shlex.split(line)
+            if not line:
+                continue
+            if line[0] == 'Port' and line[1] == port:
+                is_target_port = True
+                continue
+            if is_target_port:
+                if line[0] == 'Port':
+                    break
+                if not line[0].isdigit():
+                    continue
+                gid = int(line[0])
+                if line[1] == host_grp_name:
+                    return gid
+        return None
+
+    def get_unused_gid(self, group_range, port):
+        start = group_range[0]
+        end = group_range[1]
+        unit = self.unit_name
+
+        ret, stdout, stderr = self.exec_hsnm('auhgdef',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        is_target_port = False
+        free_gid = start
+        found = False
+        for line in lines:
+            line = shlex.split(line)
+            if not line:
+                continue
+            if line[0] == 'Port' and line[1] == port:
+                is_target_port = True
+                continue
+            if is_target_port:
+                if line[0] == 'Port':
+                    found = True
+                    break
+                if not line[0].isdigit():
+                    continue
+
+                gid = int(line[0])
+                if free_gid > gid:
+                    continue
+                if free_gid == gid:
+                    free_gid += 1
+                else:
+                    found = True
+                    break
+                if free_gid > end or free_gid > MAX_HOSTGROUPS:
+                    break
+        else:
+            found = True
+
+        if not found:
+            msg = basic_lib.output_err(648, resource='GID')
+            raise exception.HBSDError(message=msg)
+
+        return free_gid
+
+    def comm_set_target_wwns(self, target_ports):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm('aufibre1',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='aufibre1', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        target_wwns = {}
+        for line in lines[3:]:
+            if re.match('Transfer', line):
+                break
+
+            line = shlex.split(line)
+            if len(line) < 4:
+                continue
+
+            port = '%s%s' % (line[0], line[1])
+            if target_ports:
+                if port in target_ports:
+                    target_wwns[port] = line[3]
+            else:
+                target_wwns[port] = line[3]
+
+        LOG.debug('target wwns: %s' % target_wwns)
+        return target_wwns
+
+    def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login):
+        for pt in wwns:
+            for line in buf[port]['assigned']:
+                hgname = shlex.split(line[38:])[1][4:]
+                if not re.match(basic_lib.NAME_PREFIX, hgname):
+                    continue
+                if pt.search(line[38:54]):
+                    wwn = line[38:54]
+                    gid = int(shlex.split(line[38:])[1][0:3])
+                    is_detected = None
+                    if login:
+                        for line in buf[port]['detected']:
+                            if pt.search(line[38:54]):
+                                is_detected = True
+                                break
+                        else:
+                            is_detected = False
+                    hostgroups.append({'port': six.text_type(port), 'gid': gid,
+                                       'initiator_wwn': wwn,
+                                       'detected': is_detected})
+
+    def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm('auhgwwn',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        security_ports = []
+        patterns = []
+        for wwn in wwns:
+            pt = re.compile(wwn, re.IGNORECASE)
+            patterns.append(pt)
+
+        lines = stdout.splitlines()
+        buf = {}
+        _buffer = []
+        port = None
+        security = None
+        for line in lines:
+            if re.match('Port', line):
+                port = shlex.split(line)[1]
+                if target_ports and port not in target_ports:
+                    port = None
+                else:
+                    security = True if shlex.split(line)[5] == 'ON' else False
+                    buf[port] = {'detected': [], 'assigned': [],
+                                 'assignable': []}
+                    if security:
+                        security_ports.append(port)
+                continue
+            if port and security:
+                if re.search('Detected WWN', line):
+                    _buffer = buf[port]['detected']
+                    continue
+                elif re.search('Assigned WWN', line):
+                    _buffer = buf[port]['assigned']
+                    continue
+                elif re.search('Assignable WWN', line):
+                    _buffer = buf[port]['assignable']
+                    continue
+                _buffer.append(line)
+
+        hostgroups = []
+        for port in buf.keys():
+            self.get_hostgroup_from_wwns(
+                hostgroups, port, patterns, buf, login)
+
+        for hostgroup in hostgroups:
+            hgs.append(hostgroup)
+
+        return security_ports
+
+    def comm_delete_lun_core(self, command, hostgroups, lun):
+        unit = self.unit_name
+
+        no_lun_cnt = 0
+        deleted_hostgroups = []
+        for hostgroup in hostgroups:
+            LOG.debug('comm_delete_lun: hostgroup is %s' % hostgroup)
+            port = hostgroup['port']
+            gid = hostgroup['gid']
+            ctl_no = port[0]
+            port_no = port[1]
+
+            is_deleted = False
+            for deleted in deleted_hostgroups:
+                if port == deleted['port'] and gid == deleted['gid']:
+                    is_deleted = True
+            if is_deleted:
+                continue
+            ret, stdout, stderr = self.exec_hsnm(command,
+                                                 '-unit %s -refer' % unit)
+            if ret:
+                msg = basic_lib.output_err(
+                    600, cmd=command, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+            lines = stdout.splitlines()
+            for line in lines[2:]:
+                line = shlex.split(line)
+                if not line:
+                    continue
+                if (line[0] == port and int(line[1][0:3]) == gid
+                        and int(line[3]) == lun):
+                    hlu = int(line[2])
+                    break
+            else:
+                no_lun_cnt += 1
+                if no_lun_cnt == len(hostgroups):
+                    raise exception.HBSDNotFound
+                else:
+                    continue
+
+            opt = '-unit %s -rm %s %s %d %d %d' % (unit, ctl_no, port_no,
+                                                   gid, hlu, lun)
+            ret, stdout, stderr = self.exec_hsnm(command, opt)
+            if ret:
+                msg = basic_lib.output_err(
+                    600, cmd=command, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+            deleted_hostgroups.append({'port': port, 'gid': gid})
+            LOG.debug('comm_delete_lun is over (%d)' % lun)
+
+    def comm_delete_lun(self, hostgroups, ldev):
+        self.comm_delete_lun_core('auhgmap', hostgroups, ldev)
+
+    def comm_delete_lun_iscsi(self, hostgroups, ldev):
+        self.comm_delete_lun_core('autargetmap', hostgroups, ldev)
+
+    def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol):
+        unit = self.unit_name
+
+        if is_vvol:
+            command = 'aureplicationvvol'
+            opt = ('-unit %s -add -lu %d -size %dg'
+                   % (unit, ldev, capacity))
+        else:
+            command = 'auluadd'
+            opt = ('-unit %s -lu %d -dppoolno %d -size %dg'
+                   % (unit, ldev, pool_id, capacity))
+
+        ret, stdout, stderr = self.exec_hsnm(command, opt)
+        if ret:
+            if (re.search('DMEC002047', stderr)
+                    or re.search('DMES052602', stderr)
+                    or re.search('DMED09000A', stderr)):
+                raise exception.HBSDNotFound
+            else:
+                msg = basic_lib.output_err(
+                    600, cmd=command, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_add_hostgrp(self, port, gid, host_grp_name):
+        unit = self.unit_name
+        ctl_no = port[0]
+        port_no = port[1]
+
+        opt = '-unit %s -add %s %s -gno %d -gname %s' % (unit, ctl_no,
+                                                         port_no, gid,
+                                                         host_grp_name)
+        ret, stdout, stderr = self.exec_hsnm('auhgdef', opt)
+        if ret:
+            raise exception.HBSDNotFound
+
+    def comm_del_hostgrp(self, port, gid, host_grp_name):
+        unit = self.unit_name
+        ctl_no = port[0]
+        port_no = port[1]
+        opt = '-unit %s -rm %s %s -gname %s' % (unit, ctl_no, port_no,
+                                                host_grp_name)
+        ret, stdout, stderr = self.exec_hsnm('auhgdef', opt)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_add_hbawwn(self, port, gid, wwn):
+        unit = self.unit_name
+        ctl_no = port[0]
+        port_no = port[1]
+        opt = '-unit %s -set -permhg %s %s %s -gno %d' % (unit, ctl_no,
+                                                          port_no, wwn, gid)
+        ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt)
+        if ret:
+            opt = '-unit %s -assign -permhg %s %s %s -gno %d' % (unit, ctl_no,
+                                                                 port_no, wwn,
+                                                                 gid)
+            ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt)
+            if ret:
+                msg = basic_lib.output_err(
+                    600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_add_lun(self, command, hostgroups, ldev, is_once=False):
+        unit = self.unit_name
+        tmp_hostgroups = hostgroups[:]
+        used_list = []
+        is_ok = False
+        hlu = None
+        old_hlu = None
+        for hostgroup in hostgroups:
+            port = hostgroup['port']
+            gid = hostgroup['gid']
+            hlu = self.add_used_hlun(command, port, gid, used_list, ldev)
+            # When 'hlu' or 'old_hlu' is 0, it should be true.
+            # So, it cannot remove 'is not None'.
+            if hlu is not None:
+                if old_hlu is not None and old_hlu != hlu:
+                    msg = basic_lib.output_err(648, resource='LUN (HLUN)')
+                    raise exception.HBSDError(message=msg)
+                is_ok = True
+                hostgroup['lun'] = hlu
+                tmp_hostgroups.remove(hostgroup)
+                old_hlu = hlu
+            else:
+                hlu = old_hlu
+
+        if not used_list:
+            hlu = 0
+        elif hlu is None:
+            for i in range(MAX_HLUN + 1):
+                if i not in used_list:
+                    hlu = i
+                    break
+            else:
+                raise exception.HBSDNotFound
+
+        ret = 0
+        stdout = None
+        stderr = None
+        invalid_hgs_str = None
+        for hostgroup in tmp_hostgroups:
+            port = hostgroup['port']
+            gid = hostgroup['gid']
+            ctl_no = port[0]
+            port_no = port[1]
+            if not hostgroup['detected']:
+                if invalid_hgs_str:
+                    invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str,
+                                                     port, gid)
+                else:
+                    invalid_hgs_str = '%s:%d' % (port, gid)
+                continue
+            opt = '-unit %s -add %s %s %d %d %d' % (unit, ctl_no, port_no,
+                                                    gid, hlu, ldev)
+            ret, stdout, stderr = self.exec_hsnm(command, opt)
+            if ret == 0:
+                is_ok = True
+                hostgroup['lun'] = hlu
+                if is_once:
+                    break
+            else:
+                msg = basic_lib.set_msg(
+                    314, ldev=ldev, lun=hlu, port=port, id=gid)
+                LOG.warning(msg)
+
+        if not is_ok:
+            if stderr:
+                msg = basic_lib.output_err(
+                    600, cmd=command, ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+            else:
+                msg = basic_lib.output_err(659, gid=invalid_hgs_str)
+                raise exception.HBSDError(message=msg)
+
+    def comm_delete_ldev(self, ldev, is_vvol):
+        unit = self.unit_name
+
+        if is_vvol:
+            command = 'aureplicationvvol'
+            opt = '-unit %s -rm -lu %d' % (unit, ldev)
+        else:
+            command = 'auludel'
+            opt = '-unit %s -lu %d -f' % (unit, ldev)
+
+        ret, stdout, stderr = self.exec_hsnm(command, opt,
+                                             timeout=30, interval=3)
+        if ret:
+            if (re.search('DMEC002048', stderr)
+                    or re.search('DMED090026', stderr)):
+                raise exception.HBSDNotFound
+            msg = basic_lib.output_err(
+                600, cmd=command, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+        return ret
+
+    def comm_extend_ldev(self, ldev, old_size, new_size):
+        unit = self.unit_name
+        command = 'auluchgsize'
+        options = '-unit %s -lu %d -size %dg' % (unit, ldev, new_size)
+
+        ret, stdout, stderr = self.exec_hsnm(command, options)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd=command, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def delete_chap_user(self, port):
+        unit = self.unit_name
+        ctl_no = port[0]
+        port_no = port[1]
+        auth_username = self.conf.hitachi_auth_user
+
+        opt = '-unit %s -rm %s %s -user %s' % (unit, ctl_no, port_no,
+                                               auth_username)
+        return self.exec_hsnm('auchapuser', opt)
+
+    def _wait_for_add_chap_user(self, cmd, auth_username,
+                                auth_password, start):
+        # Don't move 'import pexpect' to the begining of the file so that
+        # a tempest can work.
+        import pexpect
+
+        lock = basic_lib.get_process_lock(self.hsnm_lock_file)
+        with nested(self.hsnm_lock, lock):
+            try:
+                child = pexpect.spawn(cmd)
+                child.expect('Secret: ', timeout=CHAP_TIMEOUT)
+                child.sendline(auth_password)
+                child.expect('Re-enter Secret: ',
+                             timeout=CHAP_TIMEOUT)
+                child.sendline(auth_password)
+                child.expect('The CHAP user information has '
+                             'been added successfully.',
+                             timeout=CHAP_TIMEOUT)
+            except Exception:
+                if time.time() - start >= EXEC_TIMEOUT:
+                    msg = basic_lib.output_err(642, user=auth_username)
+                    raise exception.HBSDError(message=msg)
+            else:
+                raise loopingcall.LoopingCallDone(True)
+
+    def set_chap_authention(self, port, gid):
+        ctl_no = port[0]
+        port_no = port[1]
+        unit = self.unit_name
+        auth_username = self.conf.hitachi_auth_user
+        auth_password = self.conf.hitachi_auth_password
+        add_chap_user = self.conf.hitachi_add_chap_user
+        assign_flag = True
+        added_flag = False
+        opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no,
+                                                  auth_username)
+        ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True)
+
+        if ret:
+            if not add_chap_user:
+                msg = basic_lib.output_err(643, user=auth_username)
+                raise exception.HBSDError(message=msg)
+
+            root_helper = utils.get_root_helper()
+            cmd = ('%s env %s auchapuser -unit %s -add %s %s '
+                   '-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no,
+                                         port_no, gid, auth_username))
+
+            LOG.debug('Add CHAP user')
+            loop = loopingcall.FixedIntervalLoopingCall(
+                self._wait_for_add_chap_user, cmd,
+                auth_username, auth_password, time.time())
+
+            added_flag = loop.start(interval=EXEC_INTERVAL).wait()
+
+        else:
+            lines = stdout.splitlines()[4:]
+            for line in lines:
+                if int(shlex.split(line)[0][0:3]) == gid:
+                    assign_flag = False
+                    break
+
+        if assign_flag:
+            opt = '-unit %s -assign %s %s -tno %d -user %s' % (unit, ctl_no,
+                                                               port_no, gid,
+                                                               auth_username)
+            ret, stdout, stderr = self.exec_hsnm('auchapuser', opt)
+            if ret:
+                if added_flag:
+                    _ret, _stdout, _stderr = self.delete_chap_user(port)
+                    if _ret:
+                        msg = basic_lib.set_msg(303, user=auth_username)
+                        LOG.warning(msg)
+
+                msg = basic_lib.output_err(
+                    600, cmd='auchapuser', ret=ret, out=stdout, err=stderr)
+                raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        return added_flag
+
+    def comm_add_hostgrp_iscsi(self, port, gid, target_alias, target_iqn):
+        auth_method = self.conf.hitachi_auth_method
+        unit = self.unit_name
+        ctl_no = port[0]
+        port_no = port[1]
+        if auth_method:
+            auth_arg = '-authmethod %s -mutual disable' % auth_method
+        else:
+            auth_arg = '-authmethod None'
+
+        opt = '-unit %s -add %s %s -tno %d' % (unit, ctl_no, port_no, gid)
+        opt = '%s -talias %s -iname %s %s' % (opt, target_alias, target_iqn,
+                                              auth_arg)
+        ret, stdout, stderr = self.exec_hsnm('autargetdef', opt)
+
+        if ret:
+            raise exception.HBSDNotFound
+
+    def delete_iscsi_target(self, port, _target_no, target_alias):
+        unit = self.unit_name
+        ctl_no = port[0]
+        port_no = port[1]
+        opt = '-unit %s -rm %s %s -talias %s' % (unit, ctl_no, port_no,
+                                                 target_alias)
+        return self.exec_hsnm('autargetdef', opt)
+
+    def comm_set_hostgrp_reportportal(self, port, target_alias):
+        unit = self.unit_name
+        ctl_no = port[0]
+        port_no = port[1]
+        opt = '-unit %s -set %s %s -talias %s' % (unit, ctl_no, port_no,
+                                                  target_alias)
+        opt = '%s -ReportFullPortalList enable' % opt
+        ret, stdout, stderr = self.exec_hsnm('autargetopt', opt)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='autargetopt', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_add_initiator(self, port, gid, host_iqn):
+        unit = self.unit_name
+        ctl_no = port[0]
+        port_no = port[1]
+        opt = '-unit %s -add %s %s -tno %d -iname %s' % (unit, ctl_no,
+                                                         port_no, gid,
+                                                         host_iqn)
+        ret, stdout, stderr = self.exec_hsnm('autargetini', opt)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='autargetini', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_get_hostgroup_info_iscsi(self, hgs, host_iqn, target_ports):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm('autargetini',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='autargetini', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        security_ports = []
+        lines = stdout.splitlines()
+        hostgroups = []
+        security = True
+        for line in lines:
+            if not shlex.split(line):
+                continue
+            if re.match('Port', line):
+                line = shlex.split(line)
+                port = line[1]
+                security = True if line[4] == 'ON' else False
+                continue
+
+            if target_ports and port not in target_ports:
+                continue
+
+            if security:
+                if (host_iqn in shlex.split(line[72:]) and
+                        re.match(basic_lib.NAME_PREFIX,
+                                 shlex.split(line)[0][4:])):
+                    gid = int(shlex.split(line)[0][0:3])
+                    hostgroups.append(
+                        {'port': port, 'gid': gid, 'detected': True})
+                    LOG.debug('Find port=%(port)s gid=%(gid)d'
+                              % {'port': port, 'gid': gid})
+                if port not in security_ports:
+                    security_ports.append(port)
+
+        for hostgroup in hostgroups:
+            hgs.append(hostgroup)
+
+        return security_ports
+
+    def comm_get_iscsi_ip(self, port):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm('auiscsi',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='auiscsi', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        is_target_port = False
+        for line in lines:
+            line_array = shlex.split(line)
+            if not line_array:
+                continue
+            if line_array[0] == 'Port' and line_array[1] != 'Number':
+                if line_array[1] == port:
+                    is_target_port = True
+                else:
+                    is_target_port = False
+                continue
+            if is_target_port and re.search('IPv4 Address', line):
+                ip_addr = shlex.split(line)[3]
+                break
+            if is_target_port and re.search('Port Number', line):
+                ip_port = shlex.split(line)[3]
+        else:
+            msg = basic_lib.output_err(651)
+            raise exception.HBSDError(message=msg)
+
+        return ip_addr, ip_port
+
+    def comm_get_target_iqn(self, port, gid):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm('autargetdef',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        is_target_host = False
+        tmp_port = None
+        lines = stdout.splitlines()
+        for line in lines:
+            line = shlex.split(line)
+            if not line:
+                continue
+
+            if line[0] == "Port":
+                tmp_port = line[1]
+                continue
+
+            if port != tmp_port:
+                continue
+
+            gid_tmp = line[0][0:3]
+            if gid_tmp.isdigit() and int(gid_tmp) == gid:
+                is_target_host = True
+                continue
+            if is_target_host and line[0] == "iSCSI":
+                target_iqn = line[3]
+                break
+        else:
+            msg = basic_lib.output_err(650, resource='IQN')
+            raise exception.HBSDError(message=msg)
+
+        return target_iqn
+
+    def get_unused_gid_iscsi(self, group_range, port):
+        start = group_range[0]
+        end = min(group_range[1], MAX_HOSTGROUPS_ISCSI)
+        unit = self.unit_name
+
+        ret, stdout, stderr = self.exec_hsnm('autargetdef',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        used_list = []
+        tmp_port = None
+        lines = stdout.splitlines()
+        for line in lines:
+            line = shlex.split(line)
+            if not line:
+                continue
+
+            if line[0] == "Port":
+                tmp_port = line[1]
+                continue
+
+            if port != tmp_port:
+                continue
+
+            if line[0][0:3].isdigit():
+                gid = int(line[0][0:3])
+                if start <= gid <= end:
+                    used_list.append(gid)
+        if not used_list:
+            return start
+
+        for gid in range(start, end + 1):
+            if gid not in used_list:
+                break
+        else:
+            msg = basic_lib.output_err(648, resource='GID')
+            raise exception.HBSDError(message=msg)
+
+        return gid
+
+    def get_gid_from_targetiqn(self, target_iqn, target_alias, port):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm('autargetdef',
+                                             '-unit %s -refer' % unit)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        gid = None
+        tmp_port = None
+        found_alias_full = False
+        found_alias_part = False
+        lines = stdout.splitlines()
+        for line in lines:
+            line = shlex.split(line)
+            if not line:
+                continue
+
+            if line[0] == "Port":
+                tmp_port = line[1]
+                continue
+
+            if port != tmp_port:
+                continue
+
+            if line[0][0:3].isdigit():
+                tmp_gid = int(line[0][0:3])
+                if re.match(basic_lib.NAME_PREFIX, line[0][4:]):
+                    found_alias_part = True
+                if line[0][4:] == target_alias:
+                    found_alias_full = True
+                continue
+
+            if line[0] == "iSCSI":
+                if line[3] == target_iqn:
+                    gid = tmp_gid
+                    break
+                else:
+                    found_alias_part = False
+
+        if found_alias_full and gid is None:
+            msg = basic_lib.output_err(641)
+            raise exception.HBSDError(message=msg)
+
+        # When 'gid' is 0, it should be true.
+        # So, it cannot remove 'is not None'.
+        if not found_alias_part and gid is not None:
+            msg = basic_lib.output_err(641)
+            raise exception.HBSDError(message=msg)
+
+        return gid
+
+    def comm_get_dp_pool(self, pool_id):
+        unit = self.unit_name
+        ret, stdout, stderr = self.exec_hsnm('audppool',
+                                             '-unit %s -refer -g' % unit,
+                                             printflag=False)
+        if ret:
+            msg = basic_lib.output_err(
+                600, cmd='audppool', ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+        lines = stdout.splitlines()
+        for line in lines[2:]:
+            tc_cc = re.search('\s(\d+\.\d) GB\s+(\d+\.\d) GB\s', line)
+            pool_tmp = re.match('\s*\d+', line)
+            if (pool_tmp and tc_cc
+                    and int(pool_tmp.group(0)) == pool_id):
+                total_gb = int(float(tc_cc.group(1)))
+                free_gb = total_gb - int(float(tc_cc.group(2)))
+                return total_gb, free_gb
+
+        msg = basic_lib.output_err(640, pool_id=pool_id)
+        raise exception.HBSDError(message=msg)
+
+    def is_detected(self, port, wwn):
+        hgs = []
+        self.comm_get_hostgroup_info(hgs, [wwn], [port], login=True)
+        return hgs[0]['detected']
+
+    def pairoperate(self, opr, pvol, svol, is_vvol, args=None):
+        unit = self.unit_name
+        method = '-ss' if is_vvol else '-si'
+        opt = '-unit %s -%s %s -pvol %d -svol %d' % (unit, opr, method,
+                                                     pvol, svol)
+        if args:
+            opt = '%s %s' % (opt, args)
+        ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt)
+        if ret:
+            opt = '%s %s' % ('aureplicationlocal', opt)
+            msg = basic_lib.output_err(
+                600, cmd=opt, ret=ret, out=stdout, err=stderr)
+            raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
+
+    def comm_create_pair(self, pvol, svol, is_vvol):
+        if not is_vvol:
+            args = '-compsplit -pace %s' % self.pace
+            method = basic_lib.FULL
+        else:
+            pool = self.conf.hitachi_thin_pool_id
+            args = ('-localrepdppoolno %d -localmngdppoolno %d '
+                    '-compsplit -pace %s' % (pool, pool, self.pace))
+            method = basic_lib.THIN
+        try:
+            self.pairoperate('create', pvol, svol, is_vvol, args=args)
+        except exception.HBSDCmdError as ex:
+            if (re.search('DMER0300B8', ex.stderr)
+                    or re.search('DMER0800CF', ex.stderr)
+                    or re.search('DMER0800D[0-6D]', ex.stderr)
+                    or re.search('DMER03006A', ex.stderr)
+                    or re.search('DMER030080', ex.stderr)):
+                msg = basic_lib.output_err(615, copy_method=method, pvol=pvol)
+                raise exception.HBSDBusy(message=msg)
+            else:
+                raise
+
+    def _comm_pairevtwait(self, pvol, svol, is_vvol):
+        unit = self.unit_name
+        if not is_vvol:
+            pairname = 'SI_LU%04d_LU%04d' % (pvol, svol)
+            method = '-si'
+        else:
+            pairname = 'SS_LU%04d_LU%04d' % (pvol, svol)
+            method = '-ss'
+        opt = ('-unit %s -evwait %s -pairname %s -gname Ungrouped -nowait' %
+               (unit, method, pairname))
+        ret, stdout, stderr = self.exec_hsnm('aureplicationmon',
+                                             opt, noretry=True)
+
+        return ret
+
+    def _wait_for_pair_status(self, pvol, svol, is_vvol,
+                              status, timeout, start):
+        if self._comm_pairevtwait(pvol, svol, is_vvol) in status:
+            raise loopingcall.LoopingCallDone()
+
+        if time.time() - start >= timeout:
+            msg = basic_lib.output_err(
+                637, method='_wait_for_pair_status', timeout=timeout)
+            raise exception.HBSDError(message=msg)
+
+    def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval):
+        loop = loopingcall.FixedIntervalLoopingCall(
+            self._wait_for_pair_status, pvol, svol, is_vvol,
+            status, timeout, time.time())
+
+        loop.start(interval=interval).wait()
+
+    def delete_pair(self, pvol, svol, is_vvol):
+        self.pairoperate('simplex', pvol, svol, is_vvol)
+
+    def trans_status_hsnm2raid(self, str):
+        status = None
+        obj = re.search('Split\((.*)%\)', str)
+        if obj:
+            status = basic_lib.PSUS
+        obj = re.search('Paired\((.*)%\)', str)
+        if obj:
+            status = basic_lib.PAIR
+        return status
+
+    def get_paired_info(self, ldev, only_flag=False):
+        opt_base = '-unit %s -refer' % self.unit_name
+        if only_flag:
+            opt_base = '%s -ss' % opt_base
+
+        opt = '%s -pvol %d' % (opt_base, ldev)
+        ret, stdout, stderr = self.exec_hsnm('aureplicationlocal',
+                                             opt, noretry=True)
+        if ret == 0:
+            lines = stdout.splitlines()
+            pair_info = {'pvol': ldev, 'svol': []}
+            for line in lines[1:]:
+                status = self.trans_status_hsnm2raid(line)
+                if re.search('SnapShot', line[100:]):
+                    is_vvol = True
+                else:
+                    is_vvol = False
+                line = shlex.split(line)
+                if not line:
+                    break
+                svol = int(line[2])
+                pair_info['svol'].append({'lun': svol,
+                                          'status': status,
+                                          'is_vvol': is_vvol})
+            return pair_info
+
+        opt = '%s -svol %d' % (opt_base, ldev)
+        ret, stdout, stderr = self.exec_hsnm('aureplicationlocal',
+                                             opt, noretry=True)
+        if ret == 1:
+            return {'pvol': None, 'svol': []}
+        lines = stdout.splitlines()
+        status = self.trans_status_hsnm2raid(lines[1])
+        if re.search('SnapShot', lines[1][100:]):
+            is_vvol = True
+        else:
+            is_vvol = False
+        line = shlex.split(lines[1])
+        pvol = int(line[1])
+
+        return {'pvol': pvol, 'svol': [{'lun': ldev,
+                                        'status': status,
+                                        'is_vvol': is_vvol}]}
+
+    def create_lock_file(self):
+        basic_lib.create_empty_file(self.hsnm_lock_file)
+
+    def get_hostgroup_luns(self, port, gid):
+        list = []
+        self.add_used_hlun('auhgmap', port, gid, list, DUMMY_LU)
+
+        return list
index 663bbfffdcda43d11118bfede073fffe67975690..b42cacf8cfdf3bb9c25c7db4fdd5381046f9d4de 100644 (file)
 #hds_hnas_nfs_config_file=/opt/hds/hnas/cinder_nfs_conf.xml
 
 
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_common
+#
+
+# Serial number of storage system (string value)
+#hitachi_serial_number=<None>
+
+# Name of an array unit (string value)
+#hitachi_unit_name=<None>
+
+# Pool ID of storage system (integer value)
+#hitachi_pool_id=<None>
+
+# Thin pool ID of storage system (integer value)
+#hitachi_thin_pool_id=<None>
+
+# Range of logical device of storage system (string value)
+#hitachi_ldev_range=<None>
+
+# Default copy method of storage system (string value)
+#hitachi_default_copy_method=FULL
+
+# Copy speed of storage system (integer value)
+#hitachi_copy_speed=3
+
+# Interval to check copy (integer value)
+#hitachi_copy_check_interval=3
+
+# Interval to check copy asynchronously (integer value)
+#hitachi_async_copy_check_interval=10
+
+# Control port names for HostGroup or iSCSI Target (string
+# value)
+#hitachi_target_ports=<None>
+
+# Range of group number (string value)
+#hitachi_group_range=<None>
+
+# Request for creating HostGroup or iSCSI Target (boolean
+# value)
+#hitachi_group_request=false
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_fc
+#
+
+# Request for FC Zone creating HostGroup (boolean value)
+#hitachi_zoning_request=false
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_horcm
+#
+
+# Instance numbers for HORCM (string value)
+#hitachi_horcm_numbers=200,201
+
+# Username of storage system for HORCM (string value)
+#hitachi_horcm_user=<None>
+
+# Password of storage system for HORCM (string value)
+#hitachi_horcm_password=<None>
+
+# Add to HORCM configuration (boolean value)
+#hitachi_horcm_add_conf=true
+
+
+#
+# Options defined in cinder.volume.drivers.hitachi.hbsd_iscsi
+#
+
+# Add CHAP user (boolean value)
+#hitachi_add_chap_user=false
+
+# iSCSI authentication method (string value)
+#hitachi_auth_method=<None>
+
+# iSCSI authentication username (string value)
+#hitachi_auth_user=HBSD-CHAP-user
+
+# iSCSI authentication password (string value)
+#hitachi_auth_password=HBSD-CHAP-password
+
+
 #
 # Options defined in cinder.volume.drivers.huawei
 #
index 3136e9de318fc331c639c3e5d84f8cf66511cd78..2d23743f3a97027f08d3615d745e760b2d74bf6c 100644 (file)
@@ -123,3 +123,35 @@ sg_scan: CommandFilter, sg_scan, root
 
 #cinder/backup/services/tsm.py
 dsmc:CommandFilter,/usr/bin/dsmc,root
+
+# cinder/volume/drivers/hitachi/hbsd_horcm.py
+raidqry: CommandFilter, raidqry, root
+raidcom: CommandFilter, raidcom, root
+pairsplit: CommandFilter, pairsplit, root
+paircreate: CommandFilter, paircreate, root
+pairdisplay: CommandFilter, pairdisplay, root
+pairevtwait: CommandFilter, pairevtwait, root
+horcmstart.sh: CommandFilter, horcmstart.sh, root
+horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root
+horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr
+
+# cinder/volume/drivers/hitachi/hbsd_snm2.py
+auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman
+auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref
+auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef
+aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1
+auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn
+auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap
+autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap
+aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol
+auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd
+auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel
+auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize
+auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser
+autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef
+autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt
+autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini
+auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi
+audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool
+aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal
+aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon