]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Implement Huawei SDSHypervisor driver
authorzhangni <zhangni@huawei.com>
Fri, 5 Dec 2014 02:57:45 +0000 (10:57 +0800)
committerzhangni <zhangni@huawei.com>
Wed, 17 Dec 2014 12:03:07 +0000 (20:03 +0800)
It uses socket and CLI to communicate with SDSHypervisor
to perform the following:
* Create/Delete Volume
* Extend Volume
* Create/Delete Snapshot
* Create Volume from Snapshot
* Delete Volume Snapshot
* Attach/Detach Volume
* Get Volume Stats
* Clone Volume

Certification test result for Huawei SDSHypervisor:
https://bugs.launchpad.net/cinder/+bug/1368064

Implements: blueprint huawei-sdshypervisor-driver
Change-Id: Ied72c5568875eae2387cf6271f31ddc5eebcc4bb

cinder/tests/test_huaweistorac.py [new file with mode: 0644]
cinder/volume/drivers/huaweistorhyper/__init__.py [new file with mode: 0644]
cinder/volume/drivers/huaweistorhyper/cinder_huawei_storac_conf.xml [new file with mode: 0644]
cinder/volume/drivers/huaweistorhyper/huaweistorac.py [new file with mode: 0644]
cinder/volume/drivers/huaweistorhyper/utils.py [new file with mode: 0644]
cinder/volume/drivers/huaweistorhyper/vbs_client.py [new file with mode: 0644]

diff --git a/cinder/tests/test_huaweistorac.py b/cinder/tests/test_huaweistorac.py
new file mode 100644 (file)
index 0000000..ba34c05
--- /dev/null
@@ -0,0 +1,780 @@
+# Copyright (c) 2014 Huawei Technologies Co., Ltd.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Unit Tests for Huawei SDS hypervisor volume drivers.
+"""
+
+import mock
+
+import os
+import re
+import tempfile
+from xml.dom.minidom import Document
+
+from oslo.utils import units
+
+from cinder.brick.initiator import connector as brick_connector
+from cinder import exception
+from cinder import test
+from cinder.volume import driver as base_driver
+from cinder.volume.drivers.huaweistorhyper import huaweistorac
+from cinder.volume.drivers.huaweistorhyper import utils
+from cinder.volume.drivers.huaweistorhyper import vbs_client
+from cinder.volume import volume_types
+
+
+test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
+               'size': 2,
+               'volume_name': 'vol1',
+               'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
+               'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
+               'provider_auth': None,
+               'project_id': 'project',
+               'display_name': 'vol1',
+               'display_description': 'test volume',
+               'volume_type_id': None,
+               'host': '',
+               'status': 'available',
+               'provider_location':
+               'volume-21ec7341-9256-497b-97d9-ef48edcf0635'}
+
+test_volume_with_type = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0666',
+                         'size': 2,
+                         'volume_name': 'vol1',
+                         'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
+                         'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0666',
+                         'provider_auth': None,
+                         'project_id': 'project',
+                         'display_name': 'vol1',
+                         'display_description': 'test volume',
+                         'volume_type_id': 'gold',
+                         'volume_type': {'name': 'gold',
+                                         'extra_specs': [{'key': 'raid_level',
+                                                          'value': '2'},
+                                                         {'key': 'iops',
+                                                          'value': '1000'}],
+                                         'qos_specs': {}},
+                         'host': '',
+                         'status': 'available',
+                         'provider_location':
+                         'volume-21ec7341-9256-497b-97d9-ef48edcf0635'}
+volume_type = {'name': 'gold',
+               'deleted': False,
+               'updated_at': None,
+               'extra_specs': {'raid_level': '2',
+                               'iops': '1000'},
+               'deleted_at': None,
+               'id': 'gold'}
+volume_type_qos = {'name': 'white',
+                   'deleted': False,
+                   'updated_at': None,
+                   'extra_specs': [{'key':
+                                    'raid_level',
+                                    'value': '3'},
+                                   {'key': 'iops',
+                                    'value':
+                                    '2000'}],
+                   'deleted_at': None,
+                   'qos_specs': {'id': 1,
+                                 'name': 'qos_specs',
+                                 'consumer': 'Consumer',
+                                 'specs': {'Qos-high': '10'}},
+                   'id': 'white'}
+test_volume_with_type_qos = {'name':
+                             'volume-21ec7341-9256-497b-97d9-ef48edcf8888',
+                             'size': 2,
+                             'volume_name': 'vol2',
+                             'id': '21ec7341-9256-497b-97d9-ef48edcf8888',
+                             'volume_id':
+                             '21ec7341-9256-497b-97d9-ef48edcf8888',
+                             'provider_auth': None,
+                             'project_id': 'project2',
+                             'display_name': 'vol2',
+                             'display_description': 'test volume',
+                             'volume_type_id': 'white',
+                             'volume_type': {'name': 'white',
+                                             'extra_specs': [{'key':
+                                                              'raid_level',
+                                                              'value': '3'},
+                                                             {'key': 'iops',
+                                                              'value':
+                                                              '2000'}],
+                                             'qos_specs': {'id': 1,
+                                                           'name': 'qos_specs',
+                                                           'consumer':
+                                                           'Consumer',
+                                                           'specs':
+                                                           {'Qos-high':
+                                                            '10'}}},
+                             'host': '',
+                             'status': 'available',
+                             'provider_location':
+                             'volume-21ec7341-9256-497b-97d9-ef48edcf8888'}
+test_volume_tgt = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0636',
+                   'size': 2,
+                   'volume_name': 'vol1',
+                   'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
+                   'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
+                   'provider_auth': None,
+                   'project_id': 'project',
+                   'display_name': 'vol2',
+                   'display_description': 'test volume',
+                   'volume_type_id': None,
+                   'host': '',
+                   'status': 'available',
+                   'provider_location':
+                   'volume-21ec7341-9256-497b-97d9-ef48edcf0636'}
+
+test_snap = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
+             'size': 1,
+             'volume_name': 'vol1',
+             'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
+             'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
+             'provider_auth': None,
+             'project_id': 'project',
+             'display_name': 'vol1',
+             'display_description': 'test volume',
+             'volume_type_id': None}
+
+test_volume_orders = ['CREATE_VOLUME_REQ',
+                      'DELETE_VOLUME_REQ',
+                      'CREATE_VOLUME_FROM_SNAPSHOT_REQ',
+                      'CLONE_VOLUME_REQ',
+                      'EXTEND_VOLUME_REQ',
+                      'CREATE_SNAPSHOT_REQ',
+                      'DELETE_SNAPSHOT_REQ',
+                      'CREATE_FULLVOLUME_FROM_SNAPSHOT_REQ',
+                      'CREATE_LUN_MAPPING_REQ',
+                      'DELETE_LUN_MAPPING_REQ'
+                      ]
+test_context = None
+test_image_service = None
+test_image_meta = None
+test_connector = {'ip': '173.30.0.23',
+                  'initiator': 'iqn.1993-08.org.debian:01:37b12ad7d46',
+                  'host': 'openstack'}
+
+
+class FakeVbsClient(vbs_client.VbsClient):
+
+    retcode = None
+    delete_snapshot_ret = None
+
+    def __init__(self, conf_file):
+        super(FakeVbsClient, self).__init__(conf_file)
+        self.test_normal_case = True
+        self.reqs = []
+
+    def send_message(self, msg):
+        return self.__start_send_req(msg)
+
+    def __start_send_req(self, req):
+        title = self._get_title(req)
+        self.reqs.append(title)
+        self._set_ret()
+        if self.test_normal_case:
+            if title:
+                if title in test_volume_orders:
+                    return 'retcode=' + FakeVbsClient.retcode
+                elif 'QUERY_VOLUME_REQ' == title:
+                    return '''retcode=-900079'''
+                elif 'QUERY_SNAPSHOT_REQ' == title:
+                    return '''retcode=-900079'''
+                elif 'QUERY_SINGLE_POOL_CAPABILITY_REQ' == title:
+                    return {'total_capacity': '100',
+                            'usable_capacity': '90',
+                            'tolerance_disk_failure': '20 10',
+                            'tolerance_cache_failure': '10 5'}
+                elif 'QUERY_POOLS_CAPABILITY_REQ' == title:
+                    return 'retcode=0\npool0=[stor_id=16384,'\
+                           'total_capacity=100,usable_capacity=97,'\
+                           'raid_level=5,iosp=15000,max_iops=15000,'\
+                           'min_iops=0]\npool1=[stor_id=16385,'\
+                           'total_capacity=100,usable_capacity=97,'\
+                           'raid_level=5,iosp=25000,max_iops=25000,'\
+                           'min_iops=0]\n'
+        else:
+            if title:
+                return 'retcode=' + FakeVbsClient.retcode
+
+    def _get_title(self, req):
+        lines = re.split('\n', req)
+        return lines[0][1:-1]
+
+    def _set_ret(self):
+        if self.test_normal_case:
+            FakeVbsClient.retcode = '0'
+        else:
+            FakeVbsClient.retcode = '1'
+
+
+class FakeStorACStorage(huaweistorac.StorACDriver):
+
+    def __init__(self, configuration):
+        super(FakeStorACStorage, self).__init__(configuration=configuration)
+        self.configuration = configuration
+
+    def do_setup(self, conf_file):
+        self._vbs_client = FakeVbsClient(conf_file)
+        self._get_default_volume_stats()
+
+
+class HuaweistoracUtilsTestCase(test.TestCase):
+
+    def setUp(self):
+        super(HuaweistoracUtilsTestCase, self).setUp()
+        self.request_info = {'vol_name':
+                             'volume-3f'}
+        self.request_type = 'QUERY_VOLUME_REQ'
+        self.serialize_out_fake = '[QUERY_VOLUME_REQ]\nvol_name=volume-3f\n'
+
+    def test_serialize(self):
+        serialize_out = utils.serialize(self.request_type,
+                                        self.request_info)
+        self.assertEqual(self.serialize_out_fake,
+                         serialize_out)
+
+    def test_deserialize(self):
+        deserialize_out = utils.deserialize('retcode=0\npool0=[stor_id=1638]',
+                                            '\n')
+        self.assertEqual({'retcode': '0',
+                          'pool0': '[stor_id=1638]'},
+                         deserialize_out)
+
+    def test_get_valid_ip_list(self):
+        iplist = utils.get_valid_ip_list(['', '127.0.0.1', '33', ''])
+        self.assertEqual(['127.0.0.1'],
+                         iplist)
+
+    def test_generate_dict_from_result(self):
+        result = utils.generate_dict_from_result("[stor_id=1638,iops=25]")
+        self.assertEqual({'stor_id': '1638',
+                          'iops': '25'},
+                         result)
+
+
+class StorACDriverTestCase(test.TestCase):
+
+    def setUp(self):
+        super(StorACDriverTestCase, self).setUp()
+        self.fake_conf_file = tempfile.mktemp(suffix='.xml')
+        self.addCleanup(os.remove, self.fake_conf_file)
+
+        self.create_fake_conf_file()
+        self.configuration = mock.Mock()
+        self.configuration.use_multipath_for_image_xfer = False
+        self.configuration.num_volume_device_scan_tries = 3
+        self.configuration.cinder_huawei_sds_conf_file = self.fake_conf_file
+        self.driver = FakeStorACStorage(configuration=self.configuration)
+        self.driver.do_setup(self.fake_conf_file)
+        self.driver._vbs_client.test_normal_case = True
+
+    def create_fake_conf_file(self):
+        doc = Document()
+
+        config = doc.createElement('config')
+        doc.appendChild(config)
+
+        controller = doc.createElement('controller')
+        config.appendChild(controller)
+
+        self._xml_append_child(doc, controller, 'vbs_url', '127.0.0.1,')
+        self._xml_append_child(doc, controller, 'vbs_port', '10599')
+        self._xml_append_child(doc, controller, 'UserName', 'aa')
+        self._xml_append_child(doc, controller, 'UserPassword', 'bb')
+
+        policy = doc.createElement('policy')
+        config.appendChild(policy)
+
+        self._xml_append_child(doc, policy, 'force_provision_size', '2')
+        self._xml_append_child(doc, policy, 'iops', '200')
+        self._xml_append_child(doc, policy, 'cache_size', '2')
+        self._xml_append_child(doc, policy, 'repicate_num', '2')
+        self._xml_append_child(doc, policy, 'repicate_tolerant_num', '0')
+        self._xml_append_child(doc, policy, 'encrypt_algorithm', '0')
+        self._xml_append_child(doc, policy, 'consistency', '1')
+        self._xml_append_child(doc, policy, 'compress_algorithm', '0')
+        self._xml_append_child(doc, policy, 'backup_cycle', '0')
+        self._xml_append_child(doc, policy, 'stor_space_level', '1')
+        self._xml_append_child(doc, policy, 'QoS_support', '1')
+        self._xml_append_child(doc, policy, 'tolerance_disk_failure', '0')
+        self._xml_append_child(doc, policy, 'tolerance_cache_failure', '1')
+
+        capability = doc.createElement('capability')
+        config.appendChild(capability)
+
+        self._xml_append_child(doc, capability, 'reserved_percentage', '0')
+        self._xml_append_child(doc, capability, 'deduplication', '0')
+        self._xml_append_child(doc, capability, 'snapshot', '1')
+        self._xml_append_child(doc, capability, 'backup', '0')
+
+        pools = doc.createElement('pools')
+        config.appendChild(pools)
+        pool1 = doc.createElement('pool')
+        pool2 = doc.createElement('pool')
+        pools.appendChild(pool1)
+        pools.appendChild(pool2)
+
+        self._xml_append_child(doc, pool1, 'pool_id', 'xxx1')
+        self._xml_append_child(doc, pool1, 'iops', '200')
+
+        newefile = open(self.fake_conf_file, 'w')
+        newefile.write(doc.toprettyxml(indent=''))
+        newefile.close()
+
+    def _xml_append_child(self, doc, parent, child_name, child_text):
+
+        child = doc.createElement(child_name)
+        child_node_text = doc.createTextNode(child_text)
+        child.appendChild(child_node_text)
+        parent.appendChild(child)
+
+    def test_create_volume_success(self):
+        retval = self.driver.create_volume(test_volume)
+        self.assertEqual("volume-21ec7341-9256-497b-97d9-ef48edcf0635",
+                         retval['provider_location'])
+
+    def test_create_volume_with_volume_type(self):
+        retval = self.driver.create_volume(test_volume_with_type)
+        self.assertEqual("volume-21ec7341-9256-497b-97d9-ef48edcf0666",
+                         retval['provider_location'])
+
+    def test_create_volume_from_snapshot_success(self):
+        retval = self.driver. \
+            create_volume_from_snapshot(test_volume, test_snap)
+        self.assertEqual("volume-21ec7341-9256-497b-97d9-ef48edcf0635",
+                         retval['provider_location'])
+
+    @mock.patch.object(base_driver.VolumeDriver,
+                       'copy_volume_data')
+    def test_create_cloned_volume_success(self, mock_copy_volume_data):
+        mock_copy_volume_data.return_value = None
+        retval = self.driver.\
+            create_cloned_volume(test_volume_tgt, test_volume)
+        self.assertEqual("volume-21ec7341-9256-497b-97d9-ef48edcf0636",
+                         retval['provider_location'])
+
+    def test_delete_volume_success(self):
+        self.driver.delete_volume(test_volume)
+        self.assertEqual('0', FakeVbsClient.retcode)
+
+    def test_extend_volume_success(self):
+        new_size = 4
+        self.driver.extend_volume(test_volume, new_size)
+        self.assertEqual('0', FakeVbsClient.retcode)
+
+    def test_migrate_volume_success(self):
+        pass
+
+    def test_get_volume_stats(self):
+        stats = self.driver.get_volume_stats(True)
+        self.assertEqual(0, stats['free_capacity_gb'])
+        self.assertEqual(0, stats['total_capacity_gb'])
+        self.assertEqual('0', stats['reserved_percentage'])
+
+    def test_create_snapshot_success(self):
+        retval = self.driver.create_snapshot(test_snap)
+        self.assertEqual('volume-21ec7341-9256-497b-97d9-ef48edcf0635',
+                         retval['provider_location'])
+
+    @mock.patch.object(brick_connector.HuaweiStorHyperConnector,
+                       'is_volume_connected')
+    @mock.patch.object(brick_connector.HuaweiStorHyperConnector,
+                       'connect_volume')
+    @mock.patch.object(brick_connector.HuaweiStorHyperConnector,
+                       'disconnect_volume')
+    @mock.patch.object(base_driver.VolumeDriver,
+                       '_attach_volume')
+    @mock.patch.object(base_driver.VolumeDriver,
+                       '_detach_volume')
+    def test_delete_snapshot_success(self, mock_disconnect_volume,
+                                     mock_connect_volume,
+                                     mock_is_volume_connected,
+                                     mock__attach_volume,
+                                     mock__detach_volume):
+            mock_disconnect_volume.return_value = None
+            mock_connect_volume.return_value = {'type': 'block',
+                                                'path': '/dev/null'}
+
+            mock_is_volume_connected.return_value = True
+            mock__attach_volume.return_value = None
+            mock__detach_volume.return_value = None
+
+            self.driver.delete_snapshot(test_snap)
+            self.assertEqual('0', FakeVbsClient.retcode)
+
+            mock_is_volume_connected.return_value = False
+            self.driver.delete_snapshot(test_snap)
+            self.assertEqual('0', FakeVbsClient.retcode)
+
+    @mock.patch.object(base_driver.VolumeDriver,
+                       'copy_volume_to_image')
+    def test_copy_volume_to_image_success(self,
+                                          mock_copy_volume_to_image):
+        mock_copy_volume_to_image.return_value = None
+        self.driver.copy_volume_to_image(test_context,
+                                         test_volume,
+                                         test_image_service,
+                                         test_image_meta)
+
+        expected_reqs = ['CREATE_SNAPSHOT_REQ',
+                         'CREATE_VOLUME_FROM_SNAPSHOT_REQ',
+                         'DELETE_VOLUME_REQ',
+                         'QUERY_VOLUME_REQ']
+        self.assertEqual(expected_reqs, self.driver._vbs_client.reqs)
+
+    @mock.patch.object(base_driver.VolumeDriver,
+                       'copy_volume_data')
+    def test_copy_volume_data_success(self,
+                                      mock_copy_volume_data):
+        mock_copy_volume_data.return_value = None
+
+        self.driver.copy_volume_data(test_context,
+                                     test_volume,
+                                     test_volume_tgt,
+                                     remote=None)
+
+        expected_reqs = ['CREATE_SNAPSHOT_REQ',
+                         'CREATE_VOLUME_FROM_SNAPSHOT_REQ',
+                         'DELETE_VOLUME_REQ',
+                         'QUERY_VOLUME_REQ']
+        self.assertEqual(expected_reqs, self.driver._vbs_client.reqs)
+
+    def test_initialize_connection_success(self):
+        retval = self.driver.initialize_connection(test_volume, test_connector)
+        self.assertEqual('HUAWEISDSHYPERVISOR', retval['driver_volume_type'])
+
+    def test_terminate_connection_success(self):
+        pass
+
+    def test_create_volume_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver.create_volume, test_volume)
+
+    def test_create_volume_from_snapshot_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver.create_volume_from_snapshot,
+                          test_volume, test_snap)
+
+    def test_create_cloned_volume_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        test_volume_tmp = dict(test_volume)
+        test_volume_tmp.pop('provider_location')
+        self.assertRaises(exception.CinderException,
+                          self.driver.create_cloned_volume,
+                          test_volume_tgt, test_volume_tmp)
+
+    def test_delete_volume_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver.delete_volume, test_volume)
+
+    def test_extend_volume_fail(self):
+        new_size = 4
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver.extend_volume, test_volume, new_size)
+
+    def create_snapshot_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver.create_snapshot, test_snap)
+
+    def delete_snapshot_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver.delete_snapshot, test_snap)
+
+    def test_copy_volume_to_image_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver.copy_volume_to_image,
+                          test_context,
+                          test_volume,
+                          test_image_service,
+                          test_image_meta)
+
+    def test_copy_volume_data_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver.copy_volume_data,
+                          test_context,
+                          test_volume,
+                          test_volume_tgt,
+                          remote=None)
+
+    def test_terminate_connection_fail(self):
+        pass
+
+    @mock.patch.object(brick_connector.HuaweiStorHyperConnector,
+                       'is_volume_connected')
+    def test__is_volume_attached(self, mock_is_volume_connected):
+            mock_is_volume_connected.return_value = True
+            ret = self.driver._is_volume_attached('21ec7341')
+            self.assertEqual(True, ret)
+
+            mock_is_volume_connected.return_value = False
+            ret = self.driver._is_volume_attached('21ec7341')
+            self.assertEqual(False, ret)
+
+    def test__create_target_volume_success(self):
+        test_volume_name_tgt = test_volume_tgt['name']
+        retval = self.driver._create_target_volume(test_volume,
+                                                   test_volume_name_tgt,
+                                                   test_volume_tgt)
+        self.assertEqual("volume-21ec7341-9256-497b-97d9-ef48edcf0636",
+                         retval['vol_name'])
+
+    def test__create_linked_volume_from_snap_success(self):
+        tgt_vol_name = test_volume['name']
+        src_snapshot_name = test_snap['name']
+        self.driver._create_linked_volume_from_snap(src_snapshot_name,
+                                                    tgt_vol_name,
+                                                    test_volume['size'])
+        expected_reqs = ['CREATE_VOLUME_FROM_SNAPSHOT_REQ']
+        self.assertEqual(expected_reqs, self.driver._vbs_client.reqs)
+
+    def test__get_all_pool_capacity_success(self):
+        retval = self.driver._get_all_pool_capacity()
+        stats = retval['16384']
+        self.assertEqual(97, stats['free_capacity_gb'])
+        self.assertEqual(100, stats['total_capacity_gb'])
+        self.assertEqual(0, stats['reserved_percentage'])
+
+    def test__delete_snapshot_success(self):
+        self.driver._delete_snapshot(test_snap)
+        expected_reqs = ['DELETE_SNAPSHOT_REQ',
+                         'QUERY_SNAPSHOT_REQ']
+        self.assertEqual(expected_reqs, self.driver._vbs_client.reqs)
+
+    def test__create_default_volume_stats_success(self):
+        retval = self.driver._create_default_volume_stats()
+        self.assertEqual('Huawei', retval['vendor_name'])
+
+    def test__get_default_volume_stats_success(self):
+        retval = self.driver._get_default_volume_stats()
+        self.assertEqual('0', retval['reserved_percentage'])
+        self.assertEqual('0', retval['deduplication'])
+        self.assertEqual('1', retval['snapshot'])
+        self.assertEqual('0', retval['backup'])
+
+    def test__query_volume_success(self):
+        retval = self.driver._query_volume(test_volume['name'])
+        self.assertEqual('-900079', retval['retcode'])
+
+    def test__is_volume_exist_success(self):
+        volume_name = test_volume['name']
+        retval = self.driver._is_volume_exist(volume_name)
+        self.assertEqual(False, retval)
+
+    def test__create_target_volume_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver._create_target_volume,
+                          test_volume,
+                          test_volume_tgt['volume_name'],
+                          test_volume_tgt)
+
+    def test__create_linked_volume_from_snap_fail(self):
+        tgt_vol_name = test_volume['name']
+        src_snapshot_name = test_snap['name']
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver._create_linked_volume_from_snap,
+                          src_snapshot_name,
+                          tgt_vol_name,
+                          test_volume['size'])
+
+    def test__get_volume_stats_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver._get_volume_stats)
+
+    def test__get_all_pool_capacity_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver._get_all_pool_capacity)
+
+    def test__delete_snapshot_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.CinderException,
+                          self.driver._delete_snapshot,
+                          test_snap)
+
+    def test__query_volume_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        retval = self.driver._query_volume(test_volume['name'])
+        self.assertEqual('1', retval['retcode'])
+
+    def test__is_volume_exist_fail(self):
+        volume_name = test_volume['name']
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver._is_volume_exist,
+                          volume_name)
+
+    def test_size_translate_success(self):
+        exp_size = '%s' % (2 * units.Ki)
+        vol_size = self.driver._size_translate(2)
+        self.assertEqual(exp_size, vol_size)
+
+    def test_update_volume_info_from_volume_extra_specs_success(self):
+        volume_info = self.driver._create_storage_info('volume_info')
+        extra_specs = volume_type_qos.get('extra_specs')
+        self.driver._update_volume_info_from_volume_extra_specs(volume_info,
+                                                                extra_specs)
+        self.assertEqual('2000', volume_info['iops'])
+
+    def test_update_volume_info_from_volume_success(self):
+        volume_info = self.driver._create_storage_info('volume_info')
+        self.driver._update_volume_info_from_volume(volume_info,
+                                                    test_volume_with_type_qos)
+        self.assertEqual('2000', volume_info['iops'])
+        self.assertEqual("3", volume_info['IOPRIORITY'])
+
+    def test_update_volume_info_from_qos_specs_success(self):
+        volume_info = self.driver._create_storage_info('volume_info')
+        self.driver._update_volume_info_from_qos_specs(volume_info,
+                                                       volume_type_qos)
+        self.assertEqual("3", volume_info['IOPRIORITY'])
+
+    @mock.patch.object(volume_types, 'get_volume_type')
+    @mock.patch.object(volume_types, 'get_volume_type_qos_specs')
+    def test_update_volinfo_from_type_success(self,
+                                              _mock_get_volume_types,
+                                              _mock_get_volume_type_qos_specs):
+        volume_info = self.driver._create_storage_info('volume_info')
+        _mock_get_volume_types.return_value = volume_type_qos
+        _mock_get_volume_type_qos_specs.return_value = {'qos_specs':
+                                                        {'id': 1,
+                                                         'name': 'qos_specs',
+                                                         'consumer':
+                                                         'Consumer',
+                                                         'specs': {'Qos-high':
+                                                                   '10'}}}
+        self.driver._update_volume_info_from_volume_type(volume_info, 'white')
+        self.assertEqual('100', volume_info['iops'])
+        self.assertEqual("3", volume_info['IOPRIORITY'])
+
+    def test_create_storage_info_success(self):
+        volume_info = self.driver._create_storage_info('volume_info')
+        self.assertEqual('', volume_info['vol_name'])
+        self.assertEqual('', volume_info['vol_size'])
+        self.assertEqual('0', volume_info['pool_id'])
+        self.assertEqual('0', volume_info['thin_flag'])
+        self.assertEqual('0', volume_info['reserved'])
+        self.assertEqual('0', volume_info['volume_space_reserved'])
+        self.assertEqual('0', volume_info['force_provision_size'])
+        self.assertEqual('100', volume_info['iops'])
+        self.assertEqual('100', volume_info['max_iops'])
+        self.assertEqual('0', volume_info['min_iops'])
+        self.assertEqual('0', volume_info['cache_size'])
+        self.assertEqual('1', volume_info['repicate_num'])
+        self.assertEqual('1', volume_info['repicate_tolerant_num'])
+        self.assertEqual('0', volume_info['encrypt_algorithm'])
+        self.assertEqual('0', volume_info['consistency'])
+        self.assertEqual('1', volume_info['stor_space_level'])
+        self.assertEqual('0', volume_info['compress_algorithm'])
+        self.assertEqual('0', volume_info['deduplication'])
+        self.assertEqual('0', volume_info['snapshot'])
+        self.assertEqual('0', volume_info['backup_cycle'])
+        self.assertEqual('0', volume_info['tolerance_disk_failure'])
+        self.assertEqual('1', volume_info['tolerance_cache_failure'])
+
+    def test_is_snapshot_exist_success(self):
+        result = self.driver._is_snapshot_exist('snap-21ec7341')
+        self.assertEqual(False, result)
+
+    def test_get_volume_pool_id_success(self):
+        result = self.driver._get_volume_pool_id('host#cloud')
+        self.assertEqual('cloud', result)
+
+    def test_send_request_success(self):
+        volume_info = self.driver._create_storage_info('volume_info')
+        volume_info['vol_name'] = 'test_vol'
+        volume_info['vol_size'] = 2
+        result = self.driver._send_request('CREATE_VOLUME_REQ',
+                                           volume_info,
+                                           'create volume error.')
+        self.assertEqual('0', result['retcode'])
+
+    def test_update_default_volume_stats_from_config_success(self):
+        default_stats = {'pools_id': []}
+        self.driver.\
+            _update_default_volume_stats_from_config(default_stats,
+                                                     self.fake_conf_file)
+        self.assertEqual(True, default_stats['QoS_support'])
+        self.assertEqual('200', default_stats['iops'])
+        self.assertEqual('2', default_stats['cache_size'])
+        self.assertEqual('2', default_stats['repicate_num'])
+        self.assertEqual('0', default_stats['repicate_tolerant_num'])
+        self.assertEqual('0', default_stats['encrypt_algorithm'])
+        self.assertEqual('1', default_stats['consistency'])
+        self.assertEqual('0', default_stats['compress_algorithm'])
+        self.assertEqual('0', default_stats['backup_cycle'])
+        self.assertEqual('1', default_stats['stor_space_level'])
+        self.assertEqual('0', default_stats['tolerance_disk_failure'])
+        self.assertEqual('1', default_stats['tolerance_cache_failure'])
+        self.assertEqual('0', default_stats['reserved_percentage'])
+        self.assertEqual('0', default_stats['deduplication'])
+        self.assertEqual('1', default_stats['snapshot'])
+        self.assertEqual('0', default_stats['backup'])
+
+    def test_update_all_pool_capacity_from_policy_success(self):
+        all_pool_policy = {'xxx1': {'total_capacity_gb': 100,
+                                    'free_capacity_gb': 80,
+                                    'iops': 2000}}
+        all_pool_capacity = {'xxx1': {'total_capacity_gb': 80,
+                                      'free_capacity_gb': 60,
+                                      'iops': 1000}}
+        self.driver._update_all_pool_capacity_from_policy(all_pool_capacity,
+                                                          all_pool_policy)
+        self.assertEqual(100, all_pool_capacity['xxx1']['total_capacity_gb'])
+        self.assertEqual(80, all_pool_capacity['xxx1']['free_capacity_gb'])
+        self.assertEqual(2000, all_pool_capacity['xxx1']['iops'])
+
+    def test_extract_pool_policy_mapping_from_config_success(self):
+        pools = self.driver.\
+            _extract_pool_policy_mapping_from_config(self.fake_conf_file)
+        self.assertEqual('200', pools['xxx1']['iops'])
+
+    def test_is_snapshot_exist_fail(self):
+        self.driver._vbs_client.test_normal_case = False
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          self.driver._is_snapshot_exist,
+                          'test_snap_not_exist')
+
+    def test_get_volume_pool_id_default(self):
+        pool_info = self.driver._get_volume_pool_id('host_test')
+        self.assertEqual('xxx1', pool_info)
+
+    def test_create_storage_info_fail1(self):
+        volume_info = self.driver._create_storage_info('')
+        self.assertEqual(None, volume_info)
+
+    def test_create_storage_info_fail2(self):
+        volume_info = self.driver._create_storage_info('volume')
+        self.assertEqual(None, volume_info)
+
+    def test__query_volume_notexist(self):
+        retval = self.driver._query_volume('volume-2b73118c')
+        self.assertEqual(retval['retcode'], '-900079')
+
+    def test__is_volume_exist_notexist(self):
+        volume_name = 'volume-2b73118c-2c6d-4f2c-a00a-9c27791ee814'
+        retval = self.driver._is_volume_exist(volume_name)
+        self.assertEqual(False, retval)
diff --git a/cinder/volume/drivers/huaweistorhyper/__init__.py b/cinder/volume/drivers/huaweistorhyper/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/volume/drivers/huaweistorhyper/cinder_huawei_storac_conf.xml b/cinder/volume/drivers/huaweistorhyper/cinder_huawei_storac_conf.xml
new file mode 100644 (file)
index 0000000..98c54b1
--- /dev/null
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<config>
+    <controller>
+        <vbs_url>,127.0.0.1,</vbs_url>
+        <vbs_port>10599</vbs_port>
+        <UserName>aa</UserName>
+        <UserPassword>bb</UserPassword>
+    </controller>
+    <pools>
+    <!--<pool>
+      <pool_id>xxx1</pool_id>
+      <iops>200</iops>
+     </pool>
+     <pool>
+      <pool_id>xxx2</pool_id>
+      <iops>200</iops>
+     </pool>-->
+    </pools>
+    <policy>
+        <iops>200</iops>
+        <cache_size>2</cache_size>
+        <repicate_num>2</repicate_num>
+        <repicate_tolerant_num>0</repicate_tolerant_num>
+        <encrypt_algorithm>0</encrypt_algorithm>
+        <consistency>1</consistency>
+        <compress_algorithm>0</compress_algorithm>
+        <backup_cycle>0</backup_cycle>
+        <stor_space_level>1</stor_space_level>
+        <QoS_support>1</QoS_support>
+        <tolerance_disk_failure>0</tolerance_disk_failure>
+        <tolerance_cache_failure>1</tolerance_cache_failure>
+     </policy>
+     <capability>
+        <reserved_percentage>0</reserved_percentage>
+        <deduplication>0</deduplication>
+        <snapshot>1</snapshot>
+        <backup>0</backup>
+     </capability>
+</config>
diff --git a/cinder/volume/drivers/huaweistorhyper/huaweistorac.py b/cinder/volume/drivers/huaweistorhyper/huaweistorac.py
new file mode 100644 (file)
index 0000000..3ba3b32
--- /dev/null
@@ -0,0 +1,750 @@
+# Copyright (c) 2014 Huawei Technologies Co., Ltd.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+ Volume api for Huawei SDSHypervisor systems.
+"""
+
+import uuid
+
+
+from oslo.config import cfg
+from oslo.utils import units
+import six
+
+from cinder import context
+from cinder import exception
+from cinder.i18n import _, _LE
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder import utils
+from cinder.volume import driver
+from cinder.volume.drivers.huaweistorhyper import utils as storhyper_utils
+from cinder.volume.drivers.huaweistorhyper import vbs_client
+from cinder.volume import volume_types
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+QOS_KEY = ["Qos-high", "Qos-normal", "Qos-low"]
+
+LINKED_CLONE_TYPE = 'linked'
+FULL_CLONE_TYPE = 'full'
+
+CHECK_VOLUME_DATA_FINISHED_INTERVAL = 10
+CHECK_VOLUME_DELETE_FINISHED_INTERVAL = 2
+CHECK_SNAPSHOT_DELETE_FINISHED_INTERVAL = 2
+
+huawei_storhyper_opts = [
+    cfg.StrOpt('cinder_huawei_sds_conf_file',
+               default='/etc/cinder/cinder_huawei_storac_conf.xml',
+               help='huawei storagehyper driver config file path'),
+]
+
+CONF.register_opts(huawei_storhyper_opts)
+
+
+class StorACDriver(driver.VolumeDriver):
+
+    VERSION = '1.0.0'
+    del_complete_code = '-900079'
+
+    def __init__(self, *args, **kwargs):
+        super(StorACDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(huawei_storhyper_opts)
+        self._conf_file = self.configuration.cinder_huawei_sds_conf_file
+        LOG.debug('Conf_file is: ' + self._conf_file)
+        self._vbs_client = vbs_client.VbsClient(self._conf_file)
+        self._volume_stats = self._get_default_volume_stats()
+
+    def check_for_setup_error(self):
+        pass
+
+    def initialize_connection(self, volume, connector):
+        LOG.debug('Initialize connection.')
+        properties = {}
+        properties['volume_id'] = volume['name']
+        return {'driver_volume_type': 'HUAWEISDSHYPERVISOR',
+                'data': properties}
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Terminate the map."""
+        pass
+
+    def create_volume(self, volume):
+        """Create a new volume."""
+        volume_name = volume['name']
+        LOG.debug('Create volume, volume name: %s.' % volume_name)
+        volume_size = self._size_translate(volume['size'])
+
+        volume_info = self._create_storage_info('volume_info')
+        volume_info['vol_name'] = volume_name
+        volume_info['vol_size'] = volume_size
+        volume_info['pool_id'] = self._get_volume_pool_id(volume['host'])
+        self._update_volume_info_from_volume(volume_info, volume)
+        self._send_request('CREATE_VOLUME_REQ',
+                           volume_info,
+                           'create volume error.')
+        return {'provider_location': volume['name']}
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Create a volume from a snapshot."""
+        tgt_vol_name = volume['name']
+        src_snapshot_name = snapshot['name']
+        LOG.debug('Create volume from snapshot: '
+                  'tgt_vol_name: %(tgt_vol_name)s, '
+                  'src_snapshot_name: %(src_snapshot_name)s, '
+                  'vol_size: %(vol_size)s.'
+                  % {'tgt_vol_name': tgt_vol_name,
+                     'src_snapshot_name': src_snapshot_name,
+                     'vol_size': volume['size']})
+        self._create_linked_volume_from_snap(src_snapshot_name,
+                                             tgt_vol_name,
+                                             volume['size'])
+
+        return {'provider_location': volume['name']}
+
+    def create_cloned_volume(self, tgt_volume, src_volume):
+        """Create a clone volume."""
+        src_vol_name = src_volume['name']
+        tgt_vol_name = tgt_volume['name']
+        LOG.debug('Create cloned volume: src volume: %(src)s, '
+                  'tgt volume: %(tgt)s.' % {'src': src_vol_name,
+                                            'tgt': tgt_vol_name})
+
+        src_vol_id = src_volume.get('provider_location')
+        if not src_vol_id:
+            err_msg = (_LE('Source volume %(name)s does not exist.')
+                       % {'name': src_vol_name})
+            LOG.error(err_msg)
+            raise exception.VolumeNotFound(volume_id=src_vol_name)
+
+        volume_info = self._create_target_volume(src_volume,
+                                                 tgt_vol_name,
+                                                 tgt_volume)
+        tgt_vol_id = volume_info['vol_name']
+
+        self.copy_volume_data(context.get_admin_context(), src_volume,
+                              tgt_volume, remote=None)
+
+        return {'provider_location': tgt_vol_id}
+
+    def delete_volume(self, volume):
+        """Delete a volume."""
+        req_paras = {}
+        req_paras['vol_name'] = volume['name']
+        self._send_request('DELETE_VOLUME_REQ',
+                           req_paras,
+                           'Delete volume failed.')
+        self._wait_for_volume_delete(volume['name'])
+
+    def extend_volume(self, volume, new_size):
+        """Extend the size of an existing volume."""
+        LOG.debug('Extend volume: %s.' % volume['name'])
+        volume_name = volume['name']
+        new_volume_size = self._size_translate(new_size)
+        volume_info = {"vol_name": volume_name,
+                       "vol_size": new_volume_size}
+
+        self._send_request('EXTEND_VOLUME_REQ',
+                           volume_info,
+                           'extend volume failed.')
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume stats."""
+        if refresh:
+            try:
+                self._get_volume_stats()
+            except Exception as ex:
+                self._volume_stats = self._get_default_volume_stats()
+                msg = (_LE('Error from get volume stats: '
+                           '%s, using default stats.') % ex)
+                LOG.error(msg)
+        return self._volume_stats
+
+    def create_snapshot(self, snapshot):
+        create_snapshot_req = {}
+        create_snapshot_req['snap_name'] = snapshot['name']
+        create_snapshot_req['vol_name'] = snapshot['volume_name']
+        create_snapshot_req['smartflag'] = '1'
+
+        self._send_request('CREATE_SNAPSHOT_REQ',
+                           create_snapshot_req,
+                           'create snapshot failed.')
+
+        return {'provider_location': snapshot['name']}
+
+    def delete_snapshot(self, snapshot):
+        """Delete a snapshot."""
+        """Delete SDS snapshot,ensure source volume is attached """
+        source_volume_id = snapshot['volume_id']
+        if not source_volume_id:
+            self._delete_snapshot(snapshot)
+            return
+
+        is_volume_attached = self._is_volume_attached(source_volume_id)
+        if is_volume_attached:
+            LOG.debug('Volume is attached')
+            self._delete_snapshot(snapshot)
+        else:
+            LOG.debug('Volume is not attached')
+            source_volume = {'name': 'volume-' + source_volume_id,
+                             'id': source_volume_id}
+            properties = utils.brick_get_connector_properties()
+            source_volume_attach_info = self._attach_volume(
+                None, source_volume, properties, False)
+            try:
+                self._delete_snapshot(snapshot)
+            except Exception as ex:
+                err_msg = (_LE('Delete snapshot failed: '
+                           '%s.') % ex)
+                LOG.error(err_msg)
+            self._detach_volume(
+                None, source_volume_attach_info, source_volume,
+                properties, False, False)
+
+    def create_export(self, context, volume):
+        """Export the volume."""
+        pass
+
+    def ensure_export(self, context, volume):
+        """Synchronously recreate an export for a volume."""
+        pass
+
+    def remove_export(self, context, volume):
+        """Remove an export for a volume."""
+        pass
+
+    def copy_volume_to_image(self, context, volume, image_service, image_meta):
+        err_msg = ''
+        temp_snapshot, temp_volume = self._create_temp_snap_and_volume(volume)
+        try:
+            self.create_snapshot(temp_snapshot)
+            self._create_linked_volume_from_snap(temp_snapshot['name'],
+                                                 temp_volume['name'],
+                                                 temp_volume['size'])
+            temp_volume['status'] = volume['status']
+            super(StorACDriver, self).copy_volume_to_image(context,
+                                                           temp_volume,
+                                                           image_service,
+                                                           image_meta)
+        except Exception as ex:
+            err_msg = (_LE('Copy volume to image failed: %s.') % ex)
+            LOG.error(err_msg)
+            raise exception.VolumeBackendAPIException(data=err_msg)
+        finally:
+            self._clean_copy_volume_data(temp_volume,
+                                         temp_snapshot,
+                                         'copy_volume_to_image')
+
+    def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
+        err_msg = ''
+        temp_snapshot, temp_volume = self._create_temp_snap_and_volume(src_vol)
+        try:
+            self.create_snapshot(temp_snapshot)
+            self._create_linked_volume_from_snap(temp_snapshot['name'],
+                                                 temp_volume['name'],
+                                                 temp_volume['size'])
+            temp_volume['status'] = src_vol['status']
+            super(StorACDriver, self).copy_volume_data(context,
+                                                       temp_volume,
+                                                       dest_vol,
+                                                       remote)
+        except Exception as ex:
+            err_msg = (_LE('Copy volume data failed: %s.') % ex)
+            LOG.error(err_msg)
+            raise exception.VolumeBackendAPIException(data=err_msg)
+        finally:
+            self._clean_copy_volume_data(temp_volume,
+                                         temp_snapshot,
+                                         'copy_volume_data')
+
+    def _create_temp_snap_and_volume(self, src_vol):
+        temp_snapshot = {'name': 'snapshot-' + six.text_type(uuid.uuid1()),
+                         'volume_name': src_vol['name'],
+                         'smartflag': '1',
+                         'volume_id': src_vol['id']}
+        temp_volume_id = six.text_type(uuid.uuid1())
+        temp_volume = {'id': temp_volume_id,
+                       'name': 'volume-' + temp_volume_id,
+                       'size': src_vol['size']}
+        return temp_snapshot, temp_volume
+
+    def _clean_copy_volume_data(self, temp_volume, temp_snapshot, method):
+        try:
+            self.delete_volume(temp_volume)
+        except Exception as ex:
+            err_msg = (_LE('Delete temp volume failed '
+                       'after %(method)s: %(ex)s.')
+                       % {'ex': ex, 'method': method})
+            LOG.error(err_msg)
+        try:
+            self.delete_snapshot(temp_snapshot)
+        except Exception as ex:
+            err_msg = (_LE('Delete temp snapshot failed '
+                       'after %(method)s: %(ex)s.')
+                       % {'ex': ex, 'method': method})
+            LOG.error(err_msg)
+
+    def _is_volume_attached(self, volume_id):
+        if not volume_id:
+            return False
+        conn = {'driver_volume_type': 'HUAWEISDSHYPERVISOR',
+                'data': {'volume_id': 'volume-' + volume_id}}
+        use_multipath = self.configuration.use_multipath_for_image_xfer
+        device_scan_attempts = self.configuration.num_volume_device_scan_tries
+        protocol = conn['driver_volume_type']
+        connector = utils.brick_get_connector(protocol,
+                                              use_multipath=use_multipath,
+                                              device_scan_attempts=
+                                              device_scan_attempts,
+                                              conn=conn)
+        is_volume_attached = connector.is_volume_connected(
+            conn['data']['volume_id'])
+        return is_volume_attached
+
+    def _create_target_volume(self, src_volume, tgt_vol_name, tgt_volume):
+        if int(tgt_volume['size']) == 0:
+            tgt_vol_size = self._size_translate(src_volume['size'])
+        else:
+            tgt_vol_size = self._size_translate(tgt_volume['size'])
+
+        volume_info = self._create_storage_info('volume_info')
+        volume_info['vol_name'] = tgt_vol_name
+        volume_info['vol_size'] = tgt_vol_size
+        volume_info['pool_id'] = self._get_volume_pool_id(tgt_volume['host'])
+
+        self._update_volume_info_from_volume_type(volume_info,
+                                                  tgt_volume['volume_type_id'])
+        self._send_request('CREATE_VOLUME_REQ',
+                           volume_info,
+                           'create volume failed.')
+        return volume_info
+
+    def _create_linked_volume_from_snap(self, src_snapshot_name,
+                                        tgt_vol_name, volume_size):
+        vol_size = self._size_translate(volume_size)
+        req_paras = {'vol_name': tgt_vol_name,
+                     'vol_size': vol_size,
+                     'snap_name_src': src_snapshot_name,
+                     'vol_num': '1'}
+
+        self._send_request('CREATE_VOLUME_FROM_SNAPSHOT_REQ',
+                           req_paras,
+                           'Create volume from snapshot failed.')
+
+    def _get_volume_stats(self):
+        """Retrieve stats info from volume group."""
+        capacity = self._get_capacity()
+        self._volume_stats['pools'] = capacity
+
+        if len(capacity) == 1:
+            for key, value in capacity[0].items():
+                self._volume_stats[key] = value
+
+    def _get_all_pool_capacity(self):
+        pool_info = {}
+        poolnum = len(self._volume_stats['pools_id'])
+        pool_info['pool_num'] = six.text_type(poolnum)
+        pool_info['pool_id'] = self._volume_stats['pools_id']
+        result = self._send_request('QUERY_POOLS_CAPABILITY_REQ',
+                                    pool_info,
+                                    'Get storage capacity failed')
+        return self._extract_pool_capacity_mapping_from_result(result)
+
+    def _get_capacity(self):
+        storage_capacity = []
+        try:
+            all_pool_policy = self._extract_pool_policy_mapping_from_config(
+                self._conf_file)
+            all_pool_capacity = self._get_all_pool_capacity()
+            self._update_all_pool_capacity_from_policy(all_pool_capacity,
+                                                       all_pool_policy)
+            storage_capacity = all_pool_capacity.values()
+        except exception.VolumeBackendAPIException as ex:
+            msg = (_LE('Error from get block storage capacity: '
+                   '%s.') % ex)
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(msg)
+        return storage_capacity
+
+    def _delete_snapshot(self, snapshot):
+        req_paras = {}
+        req_paras['snap_name'] = snapshot['name']
+        self._send_request('DELETE_SNAPSHOT_REQ',
+                           req_paras,
+                           'Delete snapshot error.')
+        self._wait_for_snapshot_delete(snapshot['name'])
+
+    def _create_default_volume_stats(self):
+        default_volume_stats = {'tolerance_disk_failure': ['1', '2', '3'],
+                                'tolerance_cache_failure': ['0', '1'],
+                                'free_capacity_gb': 0,
+                                'total_capacity_gb': 0,
+                                'reserved_percentage': 0,
+                                'vendor_name': 'Huawei',
+                                'driver_version': self.VERSION,
+                                'storage_protocol': 'StorageHypervisor',
+                                'pools_id': []}
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        default_volume_stats['volume_backend_name'] = (
+            backend_name or self.__class__.__name__)
+        return default_volume_stats
+
+    def _get_default_volume_stats(self):
+        default_volume_stats = self._create_default_volume_stats()
+        self._update_default_volume_stats_from_config(default_volume_stats,
+                                                      self._conf_file)
+        return default_volume_stats
+
+    def _wait_for_volume_delete(self, volume_name):
+        """Wait for volume delete to complete."""
+        timer = loopingcall.FixedIntervalLoopingCall(
+            self._check_volume_delete_finished, volume_name)
+        LOG.debug('Calling _wait_for_volume_delete: volume_name %s.'
+                  % volume_name)
+        ret = timer.start(
+            interval=CHECK_VOLUME_DELETE_FINISHED_INTERVAL).wait()
+        timer.stop()
+        if not ret:
+            msg = (_LE('Delete volume failed,volume_name: %s.')
+                   % volume_name)
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(message=msg)
+
+        LOG.debug('Finish _wait_for_volume_delete: volume_name %s.'
+                  % volume_name)
+
+    def _wait_for_snapshot_delete(self, snapshot_name):
+        """Wait for snapshot delete to complete."""
+        timer = loopingcall.FixedIntervalLoopingCall(
+            self._check_snapshot_delete_finished, snapshot_name)
+        LOG.debug('Calling _wait_for_snapshot_delete: snapshot_name %s.'
+                  % snapshot_name)
+        ret = timer.start(
+            interval=CHECK_SNAPSHOT_DELETE_FINISHED_INTERVAL).wait()
+        timer.stop()
+        if not ret:
+            msg = (_LE('Delete snapshot failed,snapshot_name: %s.')
+                   % snapshot_name)
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(message=msg)
+
+        LOG.debug('Finish _wait_for_snapshot_delete: snapshot_name %s.'
+                  % snapshot_name)
+
+    def _check_volume_delete_finished(self, volume_name):
+        try:
+            is_volume_exist = self._is_volume_exist(volume_name)
+        except Exception as ex:
+            msg = (_LE('Check volume_name delete finished failed: '
+                   '%s.') % ex)
+            LOG.error(msg)
+            raise loopingcall.LoopingCallDone(retvalue=False)
+        if not is_volume_exist:
+            raise loopingcall.LoopingCallDone(retvalue=True)
+
+    def _check_snapshot_delete_finished(self, snapshot_name):
+        try:
+            is_snapshot_exist = self._is_snapshot_exist(snapshot_name)
+        except Exception as ex:
+            msg = (_LE('Check snapshot delete finished failed: '
+                   '%s.') % ex)
+            LOG.error(msg)
+            raise loopingcall.LoopingCallDone(retvalue=False)
+        if not is_snapshot_exist:
+            raise loopingcall.LoopingCallDone(retvalue=True)
+
+    def _query_volume(self, volume_name):
+        request_info = {'vol_name': volume_name}
+        request_type = 'QUERY_VOLUME_REQ'
+        rsp_str = self._vbs_client.send_message(
+            storhyper_utils.serialize(request_type,
+                                      request_info)
+        )
+        LOG.debug('%s received:%s.' % (request_type, repr(rsp_str)))
+        result = storhyper_utils.deserialize(six.text_type(rsp_str),
+                                             delimiter='\n')
+        storhyper_utils.log_dict(result)
+        return result
+
+    def _is_volume_exist(self, volume_name):
+        query_volume_result = self._query_volume(volume_name)
+        if ((not query_volume_result) or
+                ('retcode' not in query_volume_result) or
+                (query_volume_result['retcode']
+                 not in ('0', self.del_complete_code))):
+            msg = _('%(err)s\n') % {'err': 'Query volume failed!'
+                                           ' Invalid result code'}
+            raise exception.VolumeBackendAPIException(data=msg)
+        if query_volume_result['retcode'] == self.del_complete_code:
+            return False
+        if query_volume_result['retcode'] == '0':
+            if 'volume0' not in query_volume_result:
+                msg = _('%(err)s\n') % {'err': 'Query volume failed! '
+                                               'Volume0 not exist!'}
+                raise exception.VolumeBackendAPIException(data=msg)
+            query_volume_result['volume0'] = \
+                storhyper_utils.generate_dict_from_result(
+                    query_volume_result['volume0'])
+            if (('status' not in query_volume_result['volume0']) or
+                    (query_volume_result['volume0']['status'] not in
+                        ('1', '2', '10'))):
+                msg = _('%(err)s\n') % {'err': 'Query volume failed!'
+                                               ' Invalid volume status'}
+                raise exception.VolumeBackendAPIException(data=msg)
+            return True
+
+    def _query_snapshot(self, snapshot_name):
+        request_info = {'snap_name': snapshot_name}
+        request_type = 'QUERY_SNAPSHOT_REQ'
+        rsp_str = self._vbs_client.send_message(
+            storhyper_utils.serialize(request_type,
+                                      request_info)
+        )
+        LOG.debug('%s received:%s.' % (request_type, repr(rsp_str)))
+        result = storhyper_utils.deserialize(six.text_type(rsp_str),
+                                             delimiter='\n')
+        storhyper_utils.log_dict(result)
+        return result
+
+    def _is_snapshot_exist(self, snapshot_name):
+        query_snapshot_result = self._query_snapshot(snapshot_name)
+        if ((not query_snapshot_result) or
+                ('retcode' not in query_snapshot_result) or
+                (query_snapshot_result['retcode']
+                 not in ('0', self.del_complete_code))):
+            msg = _('%(err)s\n') % {'err': 'Query snapshot failed!'}
+            raise exception.VolumeBackendAPIException(data=msg)
+        if query_snapshot_result['retcode'] == self.del_complete_code:
+            return False
+        if query_snapshot_result['retcode'] == '0':
+            if 'snapshot0' not in query_snapshot_result:
+                msg = _('%(err)s\n') % {'err': 'Query snapshot failed!'}
+                raise exception.VolumeBackendAPIException(data=msg)
+            query_snapshot_result['snapshot0'] =\
+                storhyper_utils.generate_dict_from_result(
+                    query_snapshot_result['snapshot0'])
+            if (('status' not in query_snapshot_result['snapshot0']) or
+                    (query_snapshot_result['snapshot0']['status'] not in
+                        ('1', '2'))):
+                msg = _('%(err)s\n') % {'err': 'Query snapshot failed!'}
+                raise exception.VolumeBackendAPIException(data=msg)
+            return True
+
+    def _get_volume_pool_id(self, volume_host):
+        if volume_host:
+            if len(volume_host.split('#', 1)) == 2:
+                return volume_host.split('#')[1]
+
+        if len(self._volume_stats['pools_id']) == 1:
+            return self._volume_stats['pools_id'][0]
+        else:
+            msg = (_LE("Get pool id failed, invalid pool id."))
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+    def _send_request(self, request_type, request_info, error_message):
+        rsp_str = self._vbs_client.send_message(
+            storhyper_utils.serialize(request_type, request_info))
+        LOG.debug('%s received:%s.' % (request_type, repr(rsp_str)))
+        result = storhyper_utils.deserialize(six.text_type(rsp_str),
+                                             delimiter='\n')
+        storhyper_utils.log_dict(result)
+        if (len(result) < 0 or 'retcode' not in result
+                or result['retcode'] != '0'):
+            msg = _('%(err)s\n') % {'err': error_message}
+            raise exception.VolumeBackendAPIException(data=msg)
+        return result
+
+    def _update_default_volume_stats_from_config(self,
+                                                 default_volume_stats,
+                                                 config_file):
+            root = storhyper_utils.parse_xml_file(config_file)
+            for child in root.find('policy').findall('*'):
+                if child.tag == 'QoS_support':
+                    if child.text.strip() == '0':
+                        default_volume_stats[child.tag] = False
+                    else:
+                        default_volume_stats[child.tag] = True
+                else:
+                    default_volume_stats[child.tag] = child.text.strip()
+            for child in root.find('capability').findall('*'):
+                default_volume_stats[child.tag] = child.text.strip()
+            pools = root.find('pools').findall('*')
+            for pool in pools:
+                for child in pool.findall('*'):
+                    childtext = child.text.strip()
+                    if child.tag == 'pool_id' and len(childtext) > 0:
+                        default_volume_stats['pools_id'].append(childtext)
+
+    def _update_all_pool_capacity_from_policy(self,
+                                              all_pool_capacity,
+                                              all_pool_policy):
+        for pool_name in all_pool_capacity.keys():
+            if pool_name in all_pool_policy:
+                for pool_key, pool_value in all_pool_policy[pool_name].items():
+                    all_pool_capacity[pool_name][pool_key] = pool_value
+
+    def _extract_pool_policy_mapping_from_config(self, conf_file):
+        pools_policy_mapping = {}
+        root = storhyper_utils.parse_xml_file(conf_file)
+        pools = root.find('pools').findall('*')
+        for pool in pools:
+            policy = {}
+            pool_id = ''
+            for child in pool.findall('*'):
+                if child.tag == 'pool_id':
+                    pool_id = child.text.strip()
+                else:
+                    policy[child.tag] = child.text.strip()
+            pools_policy_mapping[pool_id] = policy
+        return pools_policy_mapping
+
+    def _extract_pool_capacity_mapping_from_result(self, result):
+        pool_capacity_mapping = {}
+        for key, value in result.items():
+            if 'pool' in key and value:
+                pool_capacity = {}
+                pool_name = ''
+                pool_str = value.replace('[', '').replace(']', '')
+                paras = pool_str.split(',')
+                for para in paras:
+                    key = para.split('=')[0]
+                    value = para.split('=')[1]
+                    if key == 'stor_id':
+                        pool_capacity['pool_name'] = six.text_type(value)
+                        pool_name = six.text_type(value)
+                    elif key == 'total_capacity':
+                        pool_capacity['total_capacity_gb'] = int(value)
+                    elif key == 'usable_capacity':
+                        pool_capacity['free_capacity_gb'] = int(value)
+                    elif key == 'raid_level':
+                        pool_capacity['raid_level'] = int(value)
+                    elif key == 'iops':
+                        pool_capacity['iops'] = int(value)
+
+                pool_capacity['allocated_capacity_gb'] = \
+                    pool_capacity['total_capacity_gb'] \
+                    - pool_capacity['free_capacity_gb']
+                pool_capacity['reserved_percentage'] = 0
+                pool_capacity_mapping[pool_name] = pool_capacity
+
+        return pool_capacity_mapping
+
+    def _size_translate(self, size):
+            volume_size = '%s' % (size * units.Ki)
+            return volume_size
+
+    def _update_volume_info_from_volume_extra_specs(self, volume_info,
+                                                    extra_specs):
+        if not extra_specs:
+            return
+
+        for x in extra_specs:
+            key = x['key']
+            value = x['value']
+            LOG.debug('Volume type: key=%(key)s  value=%(value)s.'
+                      % {'key': key, 'value': value})
+            if key in volume_info.keys():
+                words = value.strip().split()
+                volume_info[key] = words.pop()
+
+    def _update_volume_info_from_volume(self, volume_info, volume):
+        if not volume['volume_type_id']:
+            return
+        else:
+            spec = volume['volume_type']['extra_specs']
+            self._update_volume_info_from_volume_extra_specs(volume_info,
+                                                             spec)
+            self._update_volume_info_from_qos_specs(volume_info,
+                                                    volume['volume_type'])
+
+    def _update_volume_info_from_extra_specs(self,
+                                             volume_info,
+                                             extra_specs):
+        if not extra_specs:
+            return
+        for key, value in extra_specs.items():
+            LOG.debug('key=%(key)s  value=%(value)s.'
+                      % {'key': key, 'value': value})
+            if key in volume_info.keys():
+                words = value.strip().split()
+                volume_info[key] = words.pop()
+
+    def _update_volume_info_from_qos_specs(self,
+                                           volume_info,
+                                           qos_specs):
+        if not qos_specs:
+            return
+        if qos_specs.get('qos_specs'):
+            if qos_specs['qos_specs'].get('specs'):
+                qos_spec = qos_specs['qos_specs'].get('specs')
+                for key, value in qos_spec.items():
+                    LOG.debug('key=%(key)s  value=%(value)s.'
+                              % {'key': key, 'value': value})
+                    if key in QOS_KEY:
+                        volume_info['IOClASSID'] = value.strip()
+                        qos_level = key
+                        if qos_level == 'Qos-high':
+                            volume_info['IOPRIORITY'] = "3"
+                        elif qos_level == 'Qos-normal':
+                            volume_info['IOPRIORITY'] = "2"
+                        elif qos_level == 'Qos-low':
+                            volume_info['IOPRIORITY'] = "1"
+                        else:
+                            volume_info['IOPRIORITY'] = "2"
+
+    def _update_volume_info_from_volume_type(self,
+                                             volume_info,
+                                             volume_type_id):
+        if not volume_type_id:
+            return
+        else:
+            volume_type = volume_types.get_volume_type(
+                context.get_admin_context(), volume_type_id)
+            extra_specs = volume_type.get('extra_specs')
+            self._update_volume_info_from_extra_specs(volume_info, extra_specs)
+            qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id)
+            self._update_volume_info_from_qos_specs(volume_info, qos_specs)
+
+    def _create_storage_info(self, info_type):
+        if info_type == 'volume_info':
+            volume_info = {'vol_name': '',
+                           'vol_size': '',
+                           'pool_id': '0',
+                           'thin_flag': '0',
+                           'reserved': '0',
+                           'volume_space_reserved': '0',
+                           'force_provision_size': '0',
+                           'iops': '100',
+                           'max_iops': '100',
+                           'min_iops': '0',
+                           'cache_size': '0',
+                           'repicate_num': '1',
+                           'repicate_tolerant_num': '1',
+                           'encrypt_algorithm': '0',
+                           'consistency': '0',
+                           'stor_space_level': '1',
+                           'compress_algorithm': '0',
+                           'deduplication': '0',
+                           'snapshot': '0',
+                           'backup_cycle': '0',
+                           'tolerance_disk_failure': '0',
+                           'tolerance_cache_failure': '1'}
+            return volume_info
+        else:
+            LOG.error(_LE('Invalid info type.'))
+            return None
diff --git a/cinder/volume/drivers/huaweistorhyper/utils.py b/cinder/volume/drivers/huaweistorhyper/utils.py
new file mode 100644 (file)
index 0000000..2e5487a
--- /dev/null
@@ -0,0 +1,120 @@
+# Copyright (c) 2014 Huawei Technologies Co., Ltd.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+ Utils for Huawei SDSHypervisor systems.
+"""
+
+import socket
+from xml.etree import ElementTree as ETree
+
+import six
+
+from cinder.i18n import _LW, _LE
+from cinder.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def serialize(title, para):
+    para_list = ['[' + title + ']\n']
+    if len(para):
+        for key, value in para.items():
+            if isinstance(value, list):
+                for item in value:
+                    para_list.append(key + "=" + six.text_type(item) + "\n")
+            else:
+                para_list.append(key + "=" + six.text_type(value) + "\n")
+            LOG.debug('key=%(key)s  value=%(value)s.'
+                      % {'key': key, 'value': value})
+
+    return ''.join(para_list)
+
+
+def deserialize(rsp_str, delimiter):
+    LOG.debug('Calling deserialize: %s.' % rsp_str)
+    rsp = {}
+    if len(rsp_str) > 0:
+        lines = rsp_str.split(delimiter)
+        for line in lines:
+            LOG.debug('line = %s.' % line)
+            if line.find('=') != -1:
+                paras = six.text_type(line).split('=', 1)
+                key = paras[0].replace('=', '')
+                value = paras[1].replace('\n', '').replace('\x00', '')
+                rsp[key] = value.strip()
+    return rsp
+
+
+def parse_xml_file(file_path):
+    """Get root of xml file."""
+    try:
+        tree = ETree.parse(file_path)
+        root = tree.getroot()
+        return root
+    except IOError as err:
+        LOG.error(_LE('Parse_xml_file: %s.'), exc_info=True)
+        raise err
+
+
+def check_ipv4(ip_string):
+    """Check if ip(v4) valid."""
+    if ip_string.find('.') == -1:
+        return False
+    try:
+        socket.inet_aton(ip_string)
+        return True
+    except Exception:
+        return False
+
+
+def get_valid_ip_list(ip_list):
+    valid_ip_list = []
+    for ip in ip_list:
+        ip = ip.strip()
+        LOG.debug('IP=%s.' % ip)
+        if not check_ipv4(ip):
+            LOG.warn(_LW('Invalid ip, ip address is: %s.') % ip)
+        else:
+            valid_ip_list.append(ip)
+    return valid_ip_list
+
+
+def get_ip_and_port(config_file):
+    root = parse_xml_file(config_file)
+    vbs_url = root.findtext('controller/vbs_url').strip()
+    LOG.debug('VbsClient   vbs_url=%s.' % vbs_url)
+    vbs_port = root.findtext('controller/vbs_port').strip()
+    LOG.debug('VbsClient   vbs_port=%s.' % vbs_port)
+
+    valid_ip_list = get_valid_ip_list(vbs_url.split(','))
+    port = int(vbs_port)
+
+    return valid_ip_list, port
+
+
+def log_dict(result):
+    if result:
+        for key, value in result.items():
+            LOG.debug('key=%(key)s  value=%(value)s.'
+                      % {'key': key, 'value': value})
+
+
+def generate_dict_from_result(result):
+    LOG.debug('Result from response=%s.' % result)
+    result = result.replace('[', '').replace(']', '')
+    result = deserialize(result, delimiter=',')
+    log_dict(result)
+    return result
\ No newline at end of file
diff --git a/cinder/volume/drivers/huaweistorhyper/vbs_client.py b/cinder/volume/drivers/huaweistorhyper/vbs_client.py
new file mode 100644 (file)
index 0000000..d6bf650
--- /dev/null
@@ -0,0 +1,82 @@
+# Copyright (c) 2014 Huawei Technologies Co., Ltd.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+ Vbs Client for Huawei SDSHypervisor systems internal communication.
+"""
+
+import socket
+
+from oslo.utils import units
+
+from cinder.i18n import _, _LE
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.huaweistorhyper import utils as storhyper_utils
+
+LOG = logging.getLogger(__name__)
+
+BUFFER_SIZE = 1024
+
+
+class VbsClient(object):
+
+    def __init__(self, config_file):
+        LOG.debug('Vbs client init.')
+        self.config_file = config_file
+        (self.ip_list, self.port) = \
+            storhyper_utils.get_ip_and_port(config_file)
+
+    def send_message(self, msg):
+        return self._send_message_to_first_valid_host(msg)
+
+    def _send_message_to_first_valid_host(self, msg):
+        LOG.debug('Send message to first valid host.')
+        if not self.ip_list:
+            msg = _LE('No valid ip in vbs ip list.')
+            LOG.error(msg)
+            raise AssertionError(msg)
+
+        exec_result = ''
+        for ip in self.ip_list:
+            exec_result = VbsClient.send_and_receive(
+                ip, self.port, msg
+            )
+            if exec_result:
+                return exec_result
+        return exec_result
+
+    @staticmethod
+    def send_and_receive(ip, port, request):
+        rsp = None
+        socket_instance = None
+        try:
+            socket_instance = socket.socket(socket.AF_INET,
+                                            socket.SOCK_STREAM)
+            socket_instance.connect((ip, port))
+            LOG.debug('Start sending requests.')
+            socket_instance.send(request.encode('utf-8', 'strict'))
+            LOG.debug('Waiting for response.')
+            rsp = socket_instance.recv(units.Ki).decode(
+                'utf-8', 'strict')
+            LOG.debug('Response received: %s.' % repr(rsp))
+            return rsp
+        except OSError as ose:
+            LOG.exception(_('Send message failed,OSError. %s.'), ose)
+        except Exception as e:
+            LOG.exception(_('Send message failed. %s.'), e)
+        finally:
+            if socket_instance:
+                socket_instance.close()
+        return rsp