]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Driver for Fusion-io ioControl Hybrid array
authorEd Balduf <ebalduf@fusionio.com>
Wed, 7 May 2014 19:04:14 +0000 (13:04 -0600)
committerEd Balduf <ebalduf@fusionio.com>
Sat, 30 Aug 2014 20:12:29 +0000 (14:12 -0600)
Standard Cinder driver for iSCSI target array.
Uses REST API of the ioControl array.
Implements Quality of Service through the 5 Policies avaliable on the array.
Add Apache License block
Test results: https://bugs.launchpad.net/cinder/+bug/1317248

Change-Id: I27fdaedb9f75629a6af625f4e1c7d3f89a8cbb48
Implements: blueprint fusion-io-iocontrol-driver

cinder/tests/test_fusionio_ioControl.py [new file with mode: 0644]
cinder/volume/drivers/fusionio/__init__.py [new file with mode: 0644]
cinder/volume/drivers/fusionio/ioControl.py [new file with mode: 0644]
etc/cinder/cinder.conf.sample

diff --git a/cinder/tests/test_fusionio_ioControl.py b/cinder/tests/test_fusionio_ioControl.py
new file mode 100644 (file)
index 0000000..373d6d9
--- /dev/null
@@ -0,0 +1,839 @@
+# Copyright (c) 2014 Fusion-io, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import json
+
+import mock
+import requests
+
+from cinder import context
+from cinder.db.sqlalchemy.models import VolumeMetadata
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import timeutils
+from cinder.openstack.common import units
+from cinder import test
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.fusionio.ioControl import FIOconnection
+from cinder.volume.drivers.fusionio.ioControl import FIOioControlDriver
+from cinder.volume import qos_specs
+from cinder.volume import volume_types
+
+
+LOG = logging.getLogger(__name__)
+
+basic_net_response = [{"IsManagementPort": True,
+                       "NetworkAddress": "10.10.1.82",
+                       "IsReplicationPort": True, "OperationalState": "up",
+                       "ControllerUID": "FakeControl1_UID",
+                       "IfIndex": 2},
+                      {"IsManagementPort": True,
+                       "NetworkAddress": "10.10.1.83",
+                       "IsReplicationPort": True, "OperationalState": "up",
+                       "ControllerUID": "FakeControl1_UID",
+                       "IfIndex": 3},
+                      {"IsManagementPort": False,
+                       "NetworkAddress": "",
+                       "IsReplicationPort": False, "OperationalState": "down",
+                       "ControllerUID": "FakeControl1_UID",
+                       "IfIndex": 4},
+                      {"IsManagementPort": True,
+                       "NetworkAddress": "10.10.2.88",
+                       "IsReplicationPort": True, "OperationalState": "up",
+                       "ControllerUID": "FakeControl2_UID",
+                       "IfIndex": 2},
+                      {"IsManagementPort": False,
+                       "NetworkAddress": "10.10.2.84",
+                       "IsReplicationPort": False, "OperationalState": "up",
+                       "ControllerUID": "FakeControl2_UID",
+                       "IfIndex": 3},
+                      {"IsManagementPort": False,
+                       "NetworkAddress": "",
+                       "IsReplicationPort": False, "OperationalState": "down",
+                       "ControllerUID": "FakeControl2_UID",
+                       "IfIndex": 4}]
+
+basic_pools_response = [{"TotalMB": 5079, "Name": "PoolOwnerA",
+                         "ExportedVolumeMB": 2049,
+                         "basetype": "StoragePool", "UsedVolumeMB": 3,
+                         "ObjectPath": "", "UsedMetaMB": 4, "UsedMB": 4,
+                         "SizeMB": 68677278, "UsedSnapMB": 0,
+                         "PagingUsedMB": 4,
+                         "CurrentOwnerUUID": "FakeControl1_UID",
+                         "TaskId": "", "PagingTotalMB": 5079, "Ready": True,
+                         "id": "FakePoolA_id",
+                         "Size": 72013345456128},
+                        {"TotalMB": 5079, "Name": "PoolOwnerB",
+                         "ExportedVolumeMB": 2049,
+                         "basetype": "StoragePool", "UsedVolumeMB": 193,
+                         "ObjectPath": "", "UsedMetaMB": 3, "UsedMB": 211,
+                         "SizeMB": 68677278, "UsedSnapMB": 0,
+                         "PagingUsedMB": 211,
+                         "CurrentOwnerUUID": "FakeControl2_UID",
+                         "TaskId": "", "PagingTotalMB": 5079, "Ready": True,
+                         "id": "FakePoolB_id",
+                         "Size": 72013345456128}
+                        ]
+
+basic_vol_response = [{"basetype": "Volume", "ObjectPath": "", "TaskId": "",
+                       "id": "FakeBasicVolID",
+                       "Name": "cinderVolumeID",
+                       "IQN": "iqn.2010-11.com.ngs:Volume:FakeBasicVolID",
+                       "Size": 1074266112, "SizeMB": 1024, "HighWaterMark": 0,
+                       "HighWaterMarkMB": 0, "MetadataSize": 262144,
+                       "MetadataSizeMB": 0, "DupedSize": 1074266112,
+                       "DupedSizeMB": 1024, "FaultTolerance": 0,
+                       "PathTolerance": 0,
+                       "AllowedTierMask": 18446744073709551615,
+                       "RequiredTierMask": 0, "NumberOfPagesPerChapter": 0,
+                       "CreateDateTime": 1390837136,
+                       "LayerId": "407115424bb9539c",
+                       "ParentLayerId": "0", "Protocol": "iscsi",
+                       "PoolUUID": "FakePoolB_id",
+                       "PolicyUUID": "00000000-00000000-0000-000000000000",
+                       "CurrentOwnerUUID": "FakeControl2_UID",
+                       "AclGroupList": ["1"], "ReplicaPeerList": [],
+                       "SnapshotRetention": 0}
+                      ]
+
+basic_policy_response = [{"id": "00000000-00000000-0000-000000000000",
+                          "Name": "Policy 5", },
+                         {"id": "00000000-00000000-0000-000000000002",
+                          "Name": "Policy 4", },
+                         {"id": "00000000-00000000-0000-000000000004",
+                          "Name": "Policy 3", },
+                         {"id": "00000000-00000000-0000-000000000008",
+                          "Name": "Policy 2", },
+                         {"id": "00000000-00000000-0000-000000000010",
+                          "Name": "Policy 1", },
+                         ]
+
+basic_snapshot_response = [{"basetype": "Snapshot", "ObjectPath": "",
+                            "TaskId": "", "id": "407115424bb9539c",
+                            "Name": "cinderSnapshotID",
+                            "VolumeUUID": "FakeBasicVolID",
+                            "PoolUUID": "FakePoolB_id",
+                            "ParentUUID": "0", "Size": 1074266112,
+                            "SizeMB": 1024, "SizeUsed": 0, "SizeUsedMB": 0,
+                            "SizeReclaimable": 0, "SizeReclaimableMB": 0,
+                            "CreateDateTime": 1390952554, "ChildCount": 1,
+                            "IsMounted": False, "IsHostConsistent": False,
+                            "ReplicaInfoList": []}
+                           ]
+
+basic_acl_group_response = [{"id": 1,
+                             "GroupName": "Deny Access",
+                             "InitiatorList": [], },
+                            {"id": 2,
+                             "GroupName": "Allow Access",
+                             "InitiatorList": ["iqn*"], },
+                            {"id": 3,
+                             "GroupName": "fake:01", "Description": "",
+                             "InitiatorList": ["fake:01"], },
+                            {"id": 4,
+                             "GroupName": "iqn.1994-05.com.redhat:fake1",
+                             "InitiatorList": ["iqn.1994-05.com.rhel:fake1"],
+                             },
+                            {"id": 5,
+                             "GroupName": "MyGroup", "Description": "",
+                             "InitiatorList": "iqn.1994-05.com.rhel:fake2", }
+                            ]
+
+
+def create_configuration():
+    configuration = conf.Configuration(None)
+    configuration.san_ip = "10.123.10.123"
+    configuration.san_login = "fioTestUser"
+    configuration.san_password = "fioTestUserPassword"
+    # we can set targetdelay to 0 for testing
+    configuration.fusionio_iocontrol_targetdelay = 0
+    configuration.fusionio_iocontrol_retry = 3
+    configuration.fusionio_iocontrol_verify_cert = True
+    return configuration
+
+
+class FIOFakeResponse(object):
+    """Fake response to requests."""
+
+    def __init__(self, code=None, text=None):
+        self.status_code = code
+        self.text = text
+
+    def json(self):
+        return json.loads(self.text)
+
+    def raise_for_status(self):
+        if self.status_code > 300:
+            raise requests.exceptions.HTTPError
+
+
+class FIOioControlConnectionTests(test.TestCase):
+
+    VERSION = '1.0.0'
+    fakeSessionID = '12345678'
+
+    def setUp(self):
+        super(FIOioControlConnectionTests, self).setUp()
+        self.configuration = create_configuration()
+        self.ctxt = context.get_admin_context()
+        return_text = json.dumps({"Version": FIOconnection.APIVERSION})
+        get_return = FIOFakeResponse(code=200,
+                                     text=return_text)
+        requests.get = mock.Mock(return_value=get_return)
+        self.conn = FIOconnection(self.configuration.san_ip,
+                                  self.configuration.san_login,
+                                  self.configuration.san_password,
+                                  self.configuration.fusionio_iocontrol_retry,
+                                  (self.configuration.
+                                   fusionio_iocontrol_verify_cert),)
+
+    def test_conn_init_sucess(self):
+        expected = [mock.call(url=("https://" +
+                                   self.configuration.san_ip +
+                                   "/AUTH/Version"),
+                              headers=self.conn.defhdrs,
+                              verify=True)]
+        requests.get.assert_has_calls(expected)
+
+    def test_wrong_version(self):
+        expected = json.dumps({"Version": (FIOconnection.APIVERSION + ".1")})
+        get_return = FIOFakeResponse(code=200,
+                                     text=expected)
+        requests.get = mock.Mock(return_value=get_return)
+        self.assertRaises(exception.VolumeDriverException,
+                          FIOconnection,
+                          self.configuration.san_ip,
+                          self.configuration.san_login,
+                          self.configuration.san_password,
+                          self.configuration.fusionio_iocontrol_retry,
+                          self.configuration.fusionio_iocontrol_verify_cert,)
+
+    def test_create_session_sucess(self):
+        expected_text = json.dumps({"id": self.fakeSessionID})
+        post_return = FIOFakeResponse(code=201,
+                                      text=expected_text)
+        put_return = FIOFakeResponse(code=201,
+                                     text=json.dumps({"Status": 1}))
+        requests.post = mock.Mock(return_value=post_return)
+        requests.put = mock.Mock(return_value=put_return)
+        result = self.conn._create_session()
+        expectedhdr = copy.deepcopy(self.conn.defhdrs)
+        expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
+        assert result == expectedhdr
+
+    def test_create_session_auth_fail(self):
+        expected_text = json.dumps({"id": self.fakeSessionID})
+        post_return = FIOFakeResponse(code=201,
+                                      text=expected_text)
+        put_return = FIOFakeResponse(code=201,
+                                     text=json.dumps({"Status": (-1)}))
+        requests.post = mock.Mock(return_value=post_return)
+        requests.put = mock.Mock(return_value=put_return)
+        requests.delete = mock.Mock()
+        self.assertRaises(exception.VolumeDriverException,
+                          self.conn._create_session,)
+
+    def test_delete_session_sucess(self):
+        requests.delete = mock.Mock(return_value=True)
+        hdrs = copy.deepcopy(self.conn.defhdrs)
+        hdrs["Cookie"] = 'session=' + self.fakeSessionID
+        self.conn._delete_session(hdrs)
+        expected = [mock.call(url=("https://" +
+                                   self.configuration.san_ip +
+                                   "/AUTH/SESSION/" + self.fakeSessionID),
+                              headers=self.conn.defhdrs,
+                              verify=True), ]
+        requests.delete.assert_has_calls(expected)
+
+    def test_put_sucess(self):
+        put_return = FIOFakeResponse(code=201,
+                                     text=json.dumps({"Status": 1}))
+        requests.put = mock.Mock(return_value=put_return)
+        expectedhdr = copy.deepcopy(self.conn.defhdrs)
+        expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
+        self.conn._create_session = mock.Mock(return_value=expectedhdr)
+        self.conn._delete_session = mock.Mock()
+        testurl = '/test/url/'
+        testcontent = {'testdict': 'testvalue'}
+        self.conn.put(testurl, testcontent)
+        self.conn.post(testurl, testcontent)
+        expected = [mock.call(), ]
+        self.conn._create_session.assert_has_calls(expected)
+        expected = [mock.call(expectedhdr), ]
+        self.conn._delete_session.assert_has_calls(expected)
+        expected = [mock.call(url=self.conn._complete_uri(testurl),
+                              data=json.dumps(testcontent, sort_keys=True),
+                              headers=expectedhdr, verify=True), ]
+        requests.put.assert_has_calls(expected)
+
+    def test_post_sucess(self):
+        expected_text = json.dumps({"id": self.fakeSessionID})
+        post_return = FIOFakeResponse(code=201,
+                                      text=expected_text)
+        requests.post = mock.Mock(return_value=post_return)
+        expectedhdr = copy.deepcopy(self.conn.defhdrs)
+        expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
+        self.conn._create_session = mock.Mock(return_value=expectedhdr)
+        self.conn._delete_session = mock.Mock()
+        testurl = '/test/url/'
+        testcontent = {'testdict': 'testvalue'}
+        self.conn.post(testurl, testcontent)
+        expected = [mock.call(), ]
+        self.conn._create_session.assert_has_calls(expected)
+        expected = [mock.call(expectedhdr), ]
+        self.conn._delete_session.assert_has_calls(expected)
+        expected = [mock.call(url=self.conn._complete_uri(testurl),
+                              data=json.dumps(testcontent, sort_keys=True),
+                              headers=expectedhdr, verify=True), ]
+        requests.post.assert_has_calls(expected)
+
+    def test_delete_sucess(self):
+        del_return = FIOFakeResponse(code=201, text=json.dumps({}))
+        requests.delete = mock.Mock(return_value=del_return)
+        expectedhdr = copy.deepcopy(self.conn.defhdrs)
+        expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
+        self.conn._create_session = mock.Mock(return_value=expectedhdr)
+        self.conn._delete_session = mock.Mock()
+        testurl = '/test/url/'
+        self.conn.delete(testurl,)
+        expected = [mock.call(), ]
+        self.conn._create_session.assert_has_calls(expected)
+        expected = [mock.call(expectedhdr), ]
+        self.conn._delete_session.assert_has_calls(expected)
+        expected = [mock.call(url=self.conn._complete_uri(testurl),
+                              headers=expectedhdr, verify=True), ]
+        requests.delete.assert_has_calls(expected)
+
+    def test_get_sucess(self):
+        get_return = FIOFakeResponse(code=200,
+                                     text=json.dumps(basic_acl_group_response))
+        expectedhdr = copy.deepcopy(self.conn.defhdrs)
+        expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
+        self.conn._create_session = mock.Mock(return_value=expectedhdr)
+        self.conn._delete_session = mock.Mock()
+        requests.get = mock.Mock(return_value=get_return)
+        testurl = '/test/url/'
+        result = self.conn.get(testurl,)
+        expected = [mock.call(), ]
+        self.conn._create_session.assert_has_calls(expected)
+        expected = [mock.call(expectedhdr), ]
+        self.conn._delete_session.assert_has_calls(expected)
+        expected = [mock.call(url=self.conn._complete_uri(testurl),
+                              headers=expectedhdr, verify=True), ]
+        requests.get.assert_has_calls(expected)
+        assert result == basic_acl_group_response
+
+    def test_get_bad_json_once(self):
+        expectedhdr = copy.deepcopy(self.conn.defhdrs)
+        expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
+        self.conn._create_session = mock.Mock(return_value=expectedhdr)
+        self.conn._delete_session = mock.Mock()
+        expected_text = json.dumps(basic_acl_group_response)
+        jsonErrEffect = [FIOFakeResponse(code=200,
+                                         text='{"badjson":"bad",,}'),
+                         FIOFakeResponse(code=200,
+                                         text=expected_text)]
+        requests.get = mock.Mock(side_effect=jsonErrEffect)
+        testurl = '/test/url/'
+        result = self.conn.get(testurl,)
+        expected = [mock.call(), ]
+        self.conn._create_session.assert_has_calls(expected)
+        expected = [mock.call(expectedhdr), ]
+        self.conn._delete_session.assert_has_calls(expected)
+        expected = [mock.call(url=self.conn._complete_uri(testurl),
+                              headers=expectedhdr, verify=True), ]
+        requests.get.assert_has_calls(expected)
+        assert result == basic_acl_group_response
+
+    def test_get_bad_json_retry_expire(self):
+        get_return = FIOFakeResponse(code=200, text='{"badjson":"bad",,}')
+        expectedhdr = copy.deepcopy(self.conn.defhdrs)
+        expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
+        self.conn._create_session = mock.Mock(return_value=expectedhdr)
+        self.conn._delete_session = mock.Mock()
+        requests.get = mock.Mock(return_value=get_return)
+        testurl = '/test/url/'
+        self.assertRaises(exception.VolumeDriverException,
+                          self.conn.get, testurl)
+        expected = [mock.call(), ]
+        self.conn._create_session.assert_has_calls(expected)
+        expected = [mock.call(expectedhdr), ]
+        self.conn._delete_session.assert_has_calls(expected)
+        expected = [mock.call(url=self.conn._complete_uri(testurl),
+                              headers=expectedhdr, verify=True),
+                    mock.call(url=self.conn._complete_uri(testurl),
+                              headers=expectedhdr, verify=True),
+                    mock.call(url=self.conn._complete_uri(testurl),
+                              headers=expectedhdr, verify=True), ]
+        requests.get.assert_has_calls(expected)
+
+    def test_get_failed_http_response(self):
+        get_return = FIOFakeResponse(code=404,
+                                     text=json.dumps(basic_acl_group_response))
+        expectedhdr = copy.deepcopy(self.conn.defhdrs)
+        expectedhdr["Cookie"] = 'session=' + self.fakeSessionID
+        self.conn._create_session = mock.Mock(return_value=expectedhdr)
+        self.conn._delete_session = mock.Mock()
+        requests.get = mock.Mock(return_value=get_return)
+        testurl = '/test/url/'
+        self.assertRaises(requests.exceptions.HTTPError,
+                          self.conn.get, testurl)
+        expected = [mock.call(), ]
+        self.conn._create_session.assert_has_calls(expected)
+        expected = [mock.call(expectedhdr), ]
+        self.conn._delete_session.assert_has_calls(expected)
+        expected = [mock.call(url=self.conn._complete_uri(testurl),
+                              headers=expectedhdr, verify=True), ]
+        requests.get.assert_has_calls(expected)
+
+
+@mock.patch('cinder.volume.drivers.fusionio.ioControl.FIOconnection',
+            autospec=True)
+class FIOioControlTestCases(test.TestCase):
+
+    VERSION = '1.0.0'
+    policyTable = {'Policy 4': '00000000-00000000-0000-000000000002',
+                   'Policy 5': '00000000-00000000-0000-000000000000',
+                   'Policy 2': '00000000-00000000-0000-000000000008',
+                   'Policy 3': '00000000-00000000-0000-000000000004',
+                   'Policy 1': '00000000-00000000-0000-000000000010'}
+
+    def setUp(self):
+        super(FIOioControlTestCases, self).setUp()
+        self.configuration = create_configuration()
+        self.ctxt = context.get_admin_context()
+        self.drv = FIOioControlDriver(configuration=self.configuration)
+        self.drv.fio_qos_dict = self.policyTable
+
+    def test_do_setup_sucess(self, connmock):
+        # erase policy table, then make sure drv.do_setup builds it
+        self.drv.fio_qos_dict = {}
+        instance = connmock.return_value
+        instance.get.return_value = basic_policy_response
+        self.drv.do_setup(context="")
+        self.assertEqual(self.policyTable, self.drv.fio_qos_dict,
+                         "wrong policy table built")
+
+    def test_create_volume_simple_success_poolA(self, connmock):
+        self.drv.conn = connmock.return_value
+        bPoolResponse = copy.deepcopy(basic_pools_response)
+        bPoolResponse[1]['ExportedVolumeMB'] = 5009
+        self.drv.conn.get.return_value = bPoolResponse
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow()}
+        self.drv.create_volume(testvol)
+        cmd = {"Size": int(testvol['size']) * units.Gi,
+               "PolicyUUID": '00000000-00000000-0000-000000000000',
+               "PoolUUID": "FakePoolA_id",
+               "Name": testvol['id'], }
+        expected = [mock.call.get('TierStore/Pools/by-id/'),
+                    mock.call.post('TierStore/Volumes/by-id/', cmd)]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_create_volume_simple_success_poolB(self, connmock):
+        self.drv.conn = connmock.return_value
+        bPoolResponse = copy.deepcopy(basic_pools_response)
+        bPoolResponse[0]['ExportedVolumeMB'] = 5009
+        self.drv.conn.get.return_value = bPoolResponse
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow()}
+        self.drv.create_volume(testvol)
+        cmd = {"Size": int(testvol['size']) * units.Gi,
+               "PolicyUUID": '00000000-00000000-0000-000000000000',
+               "PoolUUID": "FakePoolB_id",
+               "Name": testvol['id'], }
+        expected = [mock.call.get('TierStore/Pools/by-id/'),
+                    mock.call.post('TierStore/Volumes/by-id/', cmd)]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_delete_volume_sucess(self, connmock):
+        self.drv.conn = connmock.return_value
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow()}
+        self.drv.conn.get.return_value = basic_vol_response
+        self.drv.delete_volume(testvol)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.delete('TierStore/Volumes/by-id/FakeBasicVolID')]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_create_snapshot_sucess(self, connmock):
+        self.drv.conn = connmock.return_value
+        snapshot = {'volume_id': 'cinderVolumeID',
+                    'id': 'a720b3c0-d1f0-11e1-9b23-1234500cab39', }
+        self.drv.conn.get.return_value = basic_vol_response
+        cmd = {"VolumeUUID": "FakeBasicVolID",
+               "Name": snapshot['id'], }
+        self.drv.create_snapshot(snapshot)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.post('TierStore/Snapshots/by-id/', cmd), ]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_delete_snapshot_sucess(self, connmock):
+        self.drv.conn = connmock.return_value
+        snapshot = {'volume_id': '1dead3c0-d1f0-beef-9b23-1274500cab58',
+                    'id': 'cinderSnapshotID'}
+        self.drv.conn.get.return_value = basic_snapshot_response
+        self.drv.delete_snapshot(snapshot)
+        expected = [mock.call.get('TierStore/Snapshots/by-id/'),
+                    mock.call.delete(
+                                    ('TierStore/Snapshots/by-id/' +
+                                     '407115424bb9539c')), ]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_create_volume_from_snapshot_simple_sucess(self, connmock):
+        self.drv.conn = connmock.return_value
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow()}
+        snapshot = {'volume_id': testvol['id'],
+                    'id': 'cinderSnapshotID'}
+        self.drv.conn.get.return_value = basic_snapshot_response
+        cmd = {"ParentLayerId": "407115424bb9539c",
+               "Name": testvol['id'],
+               "PolicyUUID": '00000000-00000000-0000-000000000000'}
+        self.drv.create_volume_from_snapshot(testvol, snapshot)
+        expected = [mock.call.get('TierStore/Snapshots/by-id/'),
+                    mock.call.put(
+                        'TierStore/Snapshots/functions/CloneSnapshot', cmd), ]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_initialize_connection_no_usable_Networks_fail(self, connmock):
+        self.drv.conn = connmock.return_value
+        connector = {'initiator': 'fake:01'}
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow(),
+                   'provider_auth': {}}
+        cmd = {"GroupName": "fake:01",
+               "InitiatorList": ["fake:01"]}
+        cmd2 = {"AclGroupList": ["3"], }
+        netResponse = copy.deepcopy(basic_net_response)
+        netResponse[4]['OperationalState'] = "down"
+        get_effect = [basic_vol_response,
+                      basic_acl_group_response,
+                      basic_vol_response,
+                      netResponse, ]
+        self.drv.conn.get.side_effect = get_effect
+        self.assertRaises(exception.VolumeDriverException,
+                          self.drv.initialize_connection, testvol,
+                          connector)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.post('TierStore/ACLGroup/by-id/', cmd),
+                    mock.call.get('TierStore/ACLGroup/by-id/'),
+                    mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
+                                  cmd2),
+                    mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.get('System/Network/by-id/'), ]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_initialize_connection_simple_sucess(self, connmock):
+        self.drv.conn = connmock.return_value
+        connector = {'initiator': 'fake:01'}
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow(),
+                   'provider_auth': {}}
+        cmd = {"GroupName": "fake:01",
+               "InitiatorList": ["fake:01"]}
+        cmd2 = {"AclGroupList": ["3"], }
+        netResponse = copy.deepcopy(basic_net_response)
+        netResponse[2]['OperationalState'] = "up"
+        get_effect = [basic_vol_response,
+                      basic_acl_group_response,
+                      basic_vol_response,
+                      netResponse, ]
+        self.drv.conn.get.side_effect = get_effect
+        result = self.drv.initialize_connection(testvol, connector)
+        expected = {'driver_volume_type': 'iscsi',
+                    'data': {'target_lun': 0,
+                             'target_portal': u'10.10.2.84:3260',
+                             'target_iqn': (
+                                 'iqn.2010-11.com.ngs:Volume:FakeBasicVolID'),
+                             'target_discovered': False,
+                             'volume_id': 'cinderVolumeID'}}
+        self.assertEqual(result, expected, "wrong result from init connection")
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.post('TierStore/ACLGroup/by-id/', cmd),
+                    mock.call.get('TierStore/ACLGroup/by-id/'),
+                    mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
+                                  cmd2),
+                    mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.get('System/Network/by-id/'), ]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_terminate_connection_single_delete_sucess(self, connmock):
+        self.drv.conn = connmock.return_value
+        connector = {'initiator': 'fake:01'}
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow(),
+                   'provider_auth': {}}
+        cmd = {"AclGroupList": ["1"], }
+        get_effect = [basic_vol_response,
+                      basic_acl_group_response,
+                      basic_acl_group_response,
+                      basic_vol_response, ]
+        self.drv.conn.get.side_effect = get_effect
+        self.drv.terminate_connection(testvol, connector)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.get('TierStore/ACLGroup/by-id/'),
+                    mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
+                                  cmd),
+                    mock.call.get('TierStore/ACLGroup/by-id/'),
+                    mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.delete('TierStore/ACLGroup/by-id/3')]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_terminate_connection_multiple_no_delete(self, connmock):
+        self.drv.conn = connmock.return_value
+        connector = {'initiator': 'fake:01'}
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow(),
+                   'provider_auth': {}}
+        cmd = {"AclGroupList": ["1"], }
+        return2vol = copy.deepcopy(basic_vol_response)
+        return2vol.append(copy.deepcopy(basic_vol_response[0]))
+        return2vol[1]['AclGroupList'] = ["3"]
+        get_effect = [basic_vol_response,
+                      basic_acl_group_response,
+                      basic_acl_group_response,
+                      return2vol, ]
+        self.drv.conn.get.side_effect = get_effect
+        self.drv.terminate_connection(testvol, connector)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.get('TierStore/ACLGroup/by-id/'),
+                    mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
+                                  cmd),
+                    mock.call.get('TierStore/ACLGroup/by-id/'),
+                    mock.call.get('TierStore/Volumes/by-id/')]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_terminate_connection_multiple_delete(self, connmock):
+        self.drv.conn = connmock.return_value
+        connector = {'initiator': 'fake:01'}
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow(),
+                   'provider_auth': {}}
+        cmd = {"AclGroupList": ["1"], }
+        return2vol = copy.deepcopy(basic_vol_response)
+        return2vol.append(copy.deepcopy(basic_vol_response[0]))
+        get_effect = [basic_vol_response,
+                      basic_acl_group_response,
+                      basic_acl_group_response,
+                      return2vol, ]
+        self.drv.conn.get.side_effect = get_effect
+        self.drv.terminate_connection(testvol, connector)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.get('TierStore/ACLGroup/by-id/'),
+                    mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
+                                  cmd),
+                    mock.call.get('TierStore/ACLGroup/by-id/'),
+                    mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.delete('TierStore/ACLGroup/by-id/3')]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_create_cloned_volume_simple_sucess(self, connmock):
+        self.drv.conn = connmock.return_value
+        srcvol = {'id': 'cinderVolumeID'}
+        dstvol = {'project_id': 'testproject',
+                  'name': 'cinderVolumeName',
+                  'size': 1,
+                  'id': 'cinderVolumeID-dst',
+                  'volume_type_id': None,
+                  'created_at': timeutils.utcnow()}
+        cmd = {'VolumeUUID': 'FakeBasicVolID',
+               'Name': 'mockedFakeUUID'}
+        # also mock _getSnapshotByName because of the random snapshotname.
+        self.drv._get_snapshot_by_name = mock.MagicMock()
+        self.drv._get_snapshot_by_name.return_value = \
+            basic_snapshot_response[0]
+        cmd2 = {"ParentLayerId": "407115424bb9539c",
+                "Name": "cinderVolumeID-dst",
+                "PolicyUUID": "00000000-00000000-0000-000000000000"}
+        get_effect = [basic_vol_response, ]
+        self.drv.conn.get.side_effect = get_effect
+
+        with mock.patch('cinder.volume.drivers.fusionio.ioControl.uuid',
+                        autospec=True) as uuidmock:
+            uuidmock.uuid4.return_value = cmd['Name']
+            self.drv.create_cloned_volume(dstvol, srcvol)
+
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.post('TierStore/Snapshots/by-id/', cmd),
+                    mock.call.put(('TierStore/Snapshots/functions/' +
+                                   'CloneSnapshot'), cmd2), ]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_create_cloned_volume_snapfails(self, connmock):
+        self.drv.conn = connmock.return_value
+        # this operation is a 2 part process, snap, then clone.
+        # This tests for the snap failing
+        srcvol = {'id': 'cinderVolumeID'}
+        dstvol = {'project_id': 'testproject',
+                  'name': 'cinderVolumeName',
+                  'size': 1,
+                  'id': 'cinderVolumeID-dst',
+                  'volume_type_id': None,
+                  'created_at': timeutils.utcnow()}
+        cmd = {'VolumeUUID': 'FakeBasicVolID',
+               'Name': 'mockedFakeUUID'}
+        get_effect = [basic_vol_response, ]
+        self.drv.conn.get.side_effect = get_effect
+        self.drv.conn.post.side_effect = requests.exceptions.HTTPError
+        with mock.patch('cinder.volume.drivers.fusionio.ioControl.uuid',
+                        autospec=True) as uuidmock:
+            uuidmock.uuid4.return_value = cmd['Name']
+            self.assertRaises(requests.exceptions.HTTPError,
+                              self.drv.create_cloned_volume,
+                              dstvol, srcvol)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.post('TierStore/Snapshots/by-id/', cmd), ]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_create_cloned_volume_clonefails(self, connmock):
+        self.drv.conn = connmock.return_value
+        srcvol = {'id': 'cinderVolumeID'}
+        dstvol = {'project_id': 'testproject',
+                  'name': 'cinderVolumeName',
+                  'size': 1,
+                  'id': 'cinderVolumeID-dst',
+                  'volume_type_id': None,
+                  'created_at': timeutils.utcnow()}
+        get_effect = [basic_vol_response,
+                      basic_snapshot_response[0], ]
+        self.drv.conn.get.side_effect = get_effect
+        # also mock _getSnapshotByName because of the random snapshotname.
+        self.drv._get_snapshot_by_name = mock.MagicMock()
+        self.drv._get_snapshot_by_name.return_value = \
+            basic_snapshot_response[0]
+        cmd = {'VolumeUUID': 'FakeBasicVolID',
+               'Name': 'mockedFakeUUID'}
+        cmd2 = {"ParentLayerId": "407115424bb9539c",
+                "Name": "cinderVolumeID-dst",
+                "PolicyUUID": "00000000-00000000-0000-000000000000"}
+        self.drv.conn.put.side_effect = requests.exceptions.HTTPError
+        with mock.patch('cinder.volume.drivers.fusionio.ioControl.uuid',
+                        autospec=True) as uuidmock:
+                uuidmock.uuid4.return_value = cmd['Name']
+                self.assertRaises(requests.exceptions.HTTPError,
+                                  self.drv.create_cloned_volume,
+                                  dstvol, srcvol)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.post('TierStore/Snapshots/by-id/', cmd),
+                    mock.call.put(('TierStore/Snapshots/functions/' +
+                                   'CloneSnapshot'), cmd2),
+                    mock.call.delete(('TierStore/Snapshots/by-id/' +
+                                      cmd2['ParentLayerId'])), ]
+        self.drv.conn.assert_has_calls(expected)
+
+    def test_get_volume_stats_simple_sucess(self, connmock):
+        self.drv.conn = connmock.return_value
+        self.drv.conn.get.side_effect = [basic_pools_response, ]
+        result = self.drv.get_volume_stats(refresh=True)
+        self.assertEqual(basic_pools_response[0]['PagingTotalMB'] +
+                         basic_pools_response[1]['PagingTotalMB'],
+                         result['total_capacity_gb'],
+                         "capacity calc wrong")
+        self.assertEqual(self.VERSION, result['driver_version'],
+                         "Driver/Test version Mismatch")
+
+    def test_create_volume_QoS_by_presets(self, connmock):
+        preset_qos = VolumeMetadata(key='fio-qos', value='Policy 2')
+        testvol = {'project_id': 'testprjid',
+                   'name': 'testvol',
+                   'size': 1,
+                   'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
+                   'volume_metadata': [preset_qos],
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow()}
+
+        expected_qos_result = '00000000-00000000-0000-000000000008'  # Policy 2
+        qos = self.drv._set_qos_presets(testvol)
+        self.assertEqual(qos, expected_qos_result)
+
+    def test_create_volume_Qos_by_volumeType_QoSspec(self, connmock):
+        qos_ref = qos_specs.create(self.ctxt,
+                                   'qos-specs-1', {'fio-qos': 'Policy 2'})
+        type_ref = volume_types.create(self.ctxt,
+                                       "type1",
+                                       {"volume_backend_name": "fio-ioControl",
+                                        "qos:fio-qos": "Policy 4"}
+                                       )
+        qos_specs.associate_qos_with_type(self.ctxt,
+                                          qos_ref['id'],
+                                          type_ref['id'])
+        expected_qos_result = '00000000-00000000-0000-000000000008'  # Policy 2
+        qos = self.drv._set_qos_by_volume_type(type_ref['id'])
+        self.assertEqual(qos, expected_qos_result)
+
+    def test_create_volume_Qos_by_volumeType_extraSpec(self, connmock):
+        type_ref = volume_types.create(self.ctxt,
+                                       "type1",
+                                       {"volume_backend_name": "fio-ioControl",
+                                        "qos:fio-qos": "Policy 4"}
+                                       )
+        expected_qos_result = '00000000-00000000-0000-000000000002'  # Policy 4
+        qos = self.drv._set_qos_by_volume_type(type_ref['id'])
+        self.assertEqual(qos, expected_qos_result)
+
+    def test_extend_volume_simple_success(self, connmock):
+        self.drv.conn = connmock.return_value
+        testvol = {'project_id': 'testproject',
+                   'name': 'cinderVolumeName',
+                   'size': 1,
+                   'id': 'cinderVolumeID',
+                   'volume_type_id': None,
+                   'created_at': timeutils.utcnow()}
+        new_size = 10
+        cmd = {"Size": int(new_size) * units.Gi}
+        self.drv.conn.get.side_effect = [basic_vol_response, ]
+        self.drv.extend_volume(testvol, new_size)
+        expected = [mock.call.get('TierStore/Volumes/by-id/'),
+                    mock.call.put('TierStore/Volumes/by-id/FakeBasicVolID',
+                                  cmd)]
+        self.drv.conn.assert_has_calls(expected)
diff --git a/cinder/volume/drivers/fusionio/__init__.py b/cinder/volume/drivers/fusionio/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/volume/drivers/fusionio/ioControl.py b/cinder/volume/drivers/fusionio/ioControl.py
new file mode 100644 (file)
index 0000000..b1709da
--- /dev/null
@@ -0,0 +1,557 @@
+# Copyright (c) 2014 Fusion-io, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Fusion-io Driver for the ioControl Hybrid storage subsystem
+"""
+
+import copy
+import hashlib
+import json
+import random
+import uuid
+
+from oslo.config import cfg
+import requests
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import loopingcall
+from cinder.openstack.common import units
+from cinder.volume.drivers.san.san import SanISCSIDriver
+from cinder.volume import qos_specs
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+fusionio_iocontrol_opts = [
+    cfg.IntOpt('fusionio_iocontrol_targetdelay',
+               default=5,
+               help='amount of time wait for iSCSI target to come online'),
+    cfg.IntOpt('fusionio_iocontrol_retry',
+               default=3,
+               help='number of retries for GET operations'),
+    cfg.BoolOpt('fusionio_iocontrol_verify_cert',
+                default=True,
+                help='verify the array certificate on each transaction'), ]
+
+CONF = cfg.CONF
+CONF.register_opts(fusionio_iocontrol_opts)
+
+
+class FIOconnection(object):
+    """Connection class for connection to ioControl array."""
+
+    APIVERSION = '1.1'
+
+    def _complete_uri(self, suburi=None, ver='1', loc='en'):
+        uri = "https://" + self.array_addr + "/API/"
+        if ver is not None:
+            uri = uri + ver + "/"
+        if loc is not None:
+            uri = uri + loc + "/"
+        if suburi is not None:
+            uri = uri + suburi
+        return uri
+
+    def __init__(self, array_addr, array_login, array_passwd, retry, verify):
+        self.client = "client=openstack"
+        self.defhdrs = {"User-Agent": "OpenStack-agent",
+                        "Content-Type": "application/json"}
+        self.array_addr = array_addr
+        self.array_login = array_login
+        self.hashpass = hashlib.md5()
+        self.hashpass.update(array_passwd)
+        self.login_content = ("username=" + array_login + "&hash=" +
+                              self.hashpass.hexdigest())
+        self.retry = retry
+        self.verify = verify
+        # check the version of the API on the array. We only support 1.1
+        # for now.
+        resp = requests.get(url=("https://" + array_addr + "/AUTH/Version"),
+                            headers=self.defhdrs, verify=self.verify)
+        resp.raise_for_status()
+        dictresp = resp.json()
+        if dictresp["Version"] != self.APIVERSION:
+            msg = _("FIO ioControl API version not supported")
+            raise exception.VolumeDriverException(message=msg)
+        LOG.debug('FIO Connection initialized to %s' % array_addr)
+
+    def _create_session(self):
+        # get the session id
+        res = requests.post(url=("https://" + self.array_addr +
+                                 "/AUTH/SESSION"),
+                            data=self.client,
+                            headers=self.defhdrs,
+                            verify=self.verify)
+        res.raise_for_status()
+        result = res.json()
+        session_key = result["id"]
+        hdrs = copy.deepcopy(self.defhdrs)
+        hdrs["Cookie"] = "session=" + session_key
+        # Authenticate the session
+        res = requests.put(url=("https://" + self.array_addr +
+                                "/AUTH/SESSION/" + session_key),
+                           data=self.login_content,
+                           headers=self.defhdrs,
+                           verify=self.verify)
+        try:
+            res.raise_for_status()
+        except requests.exceptions:
+            self._delete_session(hdrs)
+            raise
+        result = res.json()
+        if result["Status"] != 1:
+            # Authentication error delete the session ID
+            self._delete_session(hdrs)
+            msg = (_('FIO ioControl Authentication Error: %s') % (result))
+            raise exception.VolumeDriverException(message=msg)
+        return hdrs
+
+    def _delete_session(self, hdrs):
+        session = hdrs["Cookie"].split('=')[1]
+        requests.delete(url=("https://" + self.array_addr +
+                             "/AUTH/SESSION/" + session),
+                        headers=self.defhdrs,
+                        verify=self.verify)
+
+    def get(self, suburl):
+        session_hdrs = self._create_session()
+        trynum = 0
+        try:
+            while (trynum < self.retry):
+                trynum += 1
+                res = requests.get(url=self._complete_uri(suburl),
+                                   headers=session_hdrs,
+                                   verify=self.verify)
+                res.raise_for_status()
+                # work around a bug whereby bad json is returned by the array
+                try:
+                    jres = res.json()
+                    break
+                except Exception:
+                    if (trynum == self.retry):
+                        # this shouldn't happen, but check for it
+                        msg = (_('FIO ioControl persistent json Error.'))
+                        raise exception.VolumeDriverException(message=msg)
+                    pass
+        finally:
+            # deal with the bad result here
+            self._delete_session(session_hdrs)
+        return jres
+
+    def put(self, suburl, content=None):
+        session_hdrs = self._create_session()
+        try:
+            result = requests.put(url=self._complete_uri(suburl),
+                                  data=json.dumps(content,
+                                                  sort_keys=True),
+                                  headers=session_hdrs,
+                                  verify=self.verify)
+            result.raise_for_status()
+        finally:
+            self._delete_session(session_hdrs)
+        return
+
+    def post(self, suburl, content=None):
+        session_hdrs = self._create_session()
+        try:
+            result = requests.post(url=self._complete_uri(suburl),
+                                   data=json.dumps(content,
+                                                   sort_keys=True),
+                                   headers=session_hdrs,
+                                   verify=self.verify)
+            result.raise_for_status()
+        finally:
+            self._delete_session(session_hdrs)
+        return
+
+    def delete(self, suburl,):
+        session_hdrs = self._create_session()
+        try:
+            result = requests.delete(url=self._complete_uri(suburl),
+                                     headers=session_hdrs,
+                                     verify=self.verify)
+            result.raise_for_status()
+        finally:
+            self._delete_session(session_hdrs)
+        return
+
+
+class FIOioControlDriver(SanISCSIDriver):
+    """Fusion-io ioControl iSCSI volume driver."""
+
+    VERSION = '1.0.0'
+
+    def __init__(self, *args, **kwargs):
+        super(FIOioControlDriver, self).__init__(*args, **kwargs)
+        LOG.debug('FIO __init__ w/ %s' % kwargs)
+        self.configuration.append_config_values(fusionio_iocontrol_opts)
+        self.fio_qos_dict = {}
+
+    def _get_volume_by_name(self, name):
+        result = self.conn.get("TierStore/Volumes/by-id/")
+        vol = [x for x in result
+               if x['Name'] == name]
+        if len(vol) == 1:
+            return vol[0]
+        elif len(vol) == 0:
+            raise exception.VolumeNotFound(name)
+        else:
+            msg = (_("FIO _get_volume_by_name Error: %(name)s, %(len)s") %
+                   {'name': name,
+                    'len': len(vol)})
+            raise exception.VolumeDriverException(msg)
+
+    def _get_acl_by_name(self, name):
+        result = self.conn.get("TierStore/ACLGroup/by-id/")
+        acl = [x for x in result
+               if x['GroupName'] == name]
+        if len(acl) == 1:
+            return acl[0]
+        elif len(acl) == 0:
+            return []
+        else:
+            msg = (_("FIO _get_acl_by_name Error: %(name)s, %(len)s") %
+                   {'name': name,
+                    'len': len(acl), })
+            raise exception.VolumeDriverException(message=msg)
+
+    def _get_snapshot_by_name(self, name):
+        result = self.conn.get("TierStore/Snapshots/by-id/")
+        snap = [x for x in result
+                if x['Name'] == name]
+        if len(snap) == 1:
+            return snap[0]
+        elif len(snap) == 0:
+            raise exception.SnapshotNotFound(name)
+        else:
+            msg = (_("FIO _get_snapshot_by_name Error: %(name)s, %(len)s") %
+                   {'name': name,
+                    'len': len(snap), })
+            raise exception.VolumeDriverException(message=msg)
+
+    def _set_qos_presets(self, volume):
+        valid_presets = self.fio_qos_dict.keys()
+
+        presets = [i.value for i in volume.get('volume_metadata')
+                   if i.key == 'fio-qos' and i.value in valid_presets]
+        if len(presets) > 0:
+            if len(presets) > 1:
+                LOG.warning(_('More than one valid preset was '
+                              'detected, using %s') % presets[0])
+            return self.fio_qos_dict[presets[0]]
+
+    def _set_qos_by_volume_type(self, type_id):
+        valid_presets = self.fio_qos_dict.keys()
+        volume_type = volume_types.get_volume_type(ctxt=None,
+                                                   id=type_id)
+        qos_specs_id = volume_type.get('qos_specs_id')
+        specs = volume_type.get('extra_specs')
+        if qos_specs_id is not None:
+            kvs = qos_specs.get_qos_specs(ctxt=None,
+                                          id=qos_specs_id)['specs']
+        else:
+            kvs = specs
+        for key, value in kvs.iteritems():
+            if ':' in key:
+                fields = key.split(':')
+                key = fields[1]
+            if 'fio-qos' in key:
+                if value in valid_presets:
+                    return self.fio_qos_dict[value]
+
+    def do_setup(self, context):
+        LOG.debug('FIO do_setup() called')
+        required_flags = ['san_ip',
+                          'san_login',
+                          'san_password', ]
+        for flag in required_flags:
+            if not getattr(self.configuration, flag, None):
+                raise exception.InvalidInput(reason=_('%s is not set') % flag)
+        if not (self.configuration.san_ip and
+                self.configuration.san_login and
+                self.configuration.san_password):
+            raise exception.InvalidInput(
+                reason=_('All of '
+                         'san_ip '
+                         'san_login '
+                         'san_password '
+                         'must be set'))
+        self.conn = FIOconnection(self.configuration.san_ip,
+                                  self.configuration.san_login,
+                                  self.configuration.san_password,
+                                  self.configuration.fusionio_iocontrol_retry,
+                                  (self.configuration.
+                                   fusionio_iocontrol_verify_cert))
+        result = self.conn.get("TierStore/Policies/by-id/")
+        for x in result:
+            self.fio_qos_dict[x['Name']] = x['id']
+
+    def check_for_setup_error(self):
+        pass
+
+    def create_volume(self, volume):
+        LOG.debug('FIO create_volume() called: %s' % (volume['id']))
+        # Roughly we pick the less full pool.
+        # Someday change the default the policy to be configurable
+        qos = self.fio_qos_dict['Policy 5']
+        result = self.conn.get("TierStore/Pools/by-id/")
+        poola = result[0]['PagingTotalMB'] - result[0]['ExportedVolumeMB']
+        poolb = result[1]['PagingTotalMB'] - result[1]['ExportedVolumeMB']
+        if poola >= poolb:
+            pool = result[0]['id']
+        else:
+            pool = result[1]['id']
+        if volume.get('volume_metadata')is not None:
+            qos = self._set_qos_presets(volume)
+
+        type_id = volume['volume_type_id']
+        if type_id is not None:
+            qos = self._set_qos_by_volume_type(type_id)
+
+        cmd = {"Size": int(volume['size']) * units.Gi,
+               "PolicyUUID": qos,
+               "PoolUUID": pool,
+               "Name": volume['id'], }
+        self.conn.post("TierStore/Volumes/by-id/", cmd)
+        LOG.debug(('FIO create_vol(%(id)s) on %(pool)s vals %(poola)s '
+                   '%(poolb)s') %
+                  {'id': volume['id'],
+                   'pool': pool,
+                   'poola': poola,
+                   'poolb': poolb})
+
+    def delete_volume(self, volume):
+        LOG.debug('FIO delete_volume() volID %s' % (volume['id']))
+        vol = self._get_volume_by_name(volume['id'])
+        self.conn.delete("TierStore/Volumes/by-id/" + vol['id'])
+
+    def ensure_export(self, context, volume):
+        pass
+
+    def create_export(self, context, volume):
+        pass
+
+    def remove_export(self, context, volume):
+        pass
+
+    def initialize_connection(self, volume, connector):
+        LOG.debug('FIO init_connection() w/ %(id)s and %(conn)s' %
+                  {'id': volume['id'],
+                   'conn': connector['initiator']})
+        # setup the access group each initiator will go in a unique access
+        # group.
+        # TODO(ebalduf) implement w/ CHAP
+        volumedata = self._get_volume_by_name(volume['id'])
+        cmd = {"GroupName": connector['initiator'],
+               "InitiatorList": [connector['initiator']]}
+        self.conn.post("TierStore/ACLGroup/by-id/", cmd)
+
+        acl = self._get_acl_by_name(connector['initiator'])
+        if acl is not []:
+            cmd = {"AclGroupList": [str(acl['id'])], }
+            self.conn.put("TierStore/Volumes/by-id/" + volumedata['id'], cmd)
+        else:
+            # this should never happen, but check for it in case
+            msg = _('FIO: ACL does not exist!')
+            raise exception.VolumeDriverException(message=msg)
+        # handle the fact that the Application of the ACL to the volume
+        # is asynchronous.  In the future we'll add a call back to the API
+
+        def _wait_routine():
+            # unfortunately, the array API at this time doesn't have any
+            # way to poll.  In the future we will add that ability and
+            # this routine is where were will poll for ready.
+            if self._looping_count == 0:
+                self._looping_count += 1
+            else:
+                raise loopingcall.LoopingCallDone()
+
+        # time.sleep(self.configuration.fusionio_iocontrol_targetdelay)
+        self._looping_count = 0
+        timer = loopingcall.FixedIntervalLoopingCall(_wait_routine)
+        timer.start(
+            interval=self.configuration.fusionio_iocontrol_targetdelay).wait()
+        volumedata = self._get_volume_by_name(volume['id'])
+
+        properties = {}
+        properties['target_discovered'] = False
+        properties['target_iqn'] = volumedata['IQN']
+        properties['target_lun'] = 0
+        properties['volume_id'] = volume['id']
+
+        result = self.conn.get("System/Network/by-id/")
+
+        # probably way too complicated, but pick a random network interface
+        # on the controller this LUN is owned by
+        networksinfo = [x for x in result
+                        if x['OperationalState'] == 'up'
+                        if x['IsManagementPort'] is not True
+                        if x['IsReplicationPort'] is not True
+                        if x['ControllerUID'] ==
+                        volumedata['CurrentOwnerUUID']]
+        LOG.debug('NetworkInfo %s' % (networksinfo))
+        if len(networksinfo):
+            ipaddr = (networksinfo[random.randint(0, len(networksinfo) - 1)]
+                      ['NetworkAddress'])
+        else:
+            msg = _('No usable Networks found: %s') % (result)
+            raise exception.VolumeDriverException(message=msg)
+        properties['target_portal'] = unicode('%s:%s' % (ipaddr, '3260'))
+
+        auth = volume['provider_auth']
+        if auth:
+            (auth_method, auth_username, auth_secret) = auth.split()
+
+            properties['auth_method'] = auth_method
+            properties['auth_username'] = auth_username
+            properties['auth_password'] = auth_secret
+
+        LOG.debug('Result from initialize connection: %s' % properties)
+        return {
+            'driver_volume_type': 'iscsi',
+            'data': properties,
+        }
+
+    def create_snapshot(self, snapshot):
+        LOG.debug(('FIO create_snapshot() vol ID: %(volID)s snapID '
+                   '%(snapID)s') %
+                  {'volID': snapshot['volume_id'],
+                   'snapID': snapshot['id']})
+        vol = self._get_volume_by_name(snapshot['volume_id'])
+        cmd = {"VolumeUUID": vol['id'],
+               "Name": snapshot['id'], }
+        self.conn.post("TierStore/Snapshots/by-id/", cmd)
+
+    def delete_snapshot(self, snapshot):
+        LOG.debug('FIO delete_snapshot() SnapID: %s' % (snapshot['id']))
+        snap = self._get_snapshot_by_name(snapshot['id'])
+        self.conn.delete("TierStore/Snapshots/by-id/" + snap['id'])
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        LOG.debug('FIO create_volume_from_snapshot()  w/ %s' %
+                  volume['id'])
+
+        qos = self.fio_qos_dict['Policy 5']
+        if volume.get('volume_metadata')is not None:
+            qos = self._set_qos_presets(volume)
+
+        type_id = volume['volume_type_id']
+        if type_id is not None:
+            qos = self._set_qos_by_volume_type(type_id)
+        snap = self._get_snapshot_by_name(snapshot['id'])
+        cmd = {"ParentLayerId": snap['id'],
+               "Name": volume['id'],
+               "PolicyUUID": qos}
+        self.conn.put("TierStore/Snapshots/functions/CloneSnapshot", cmd)
+
+    def _delete_acl_by_name(self, name):
+        aclname = self._get_acl_by_name(name)
+        if aclname is []:
+            return
+        result = self.conn.get("TierStore/Volumes/by-id/")
+        inuse = False
+        for vol in result:
+            for acl in vol['AclGroupList']:
+                if int(acl) == aclname['id']:
+                    inuse = True
+                    break
+            if inuse:
+                break
+        if not inuse:
+            result = self.conn.delete("TierStore/ACLGroup/by-id/" +
+                                      str(aclname['id']))
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        LOG.debug('FIO terminate_connection() w/ %(id)s %(conn)s ' %
+                  {'id': volume['id'],
+                   'conn': connector['initiator']})
+        vol = self._get_volume_by_name(volume['id'])
+        acl = self._get_acl_by_name("Deny Access")
+        if acl is []:
+            msg = _('FIO: ACL does not exist!')
+            raise exception.VolumeDriverException(message=msg)
+        cmd = {"AclGroupList": [str(acl['id'])], }
+        self.conn.put("TierStore/Volumes/by-id/" + vol['id'], cmd)
+        self._delete_acl_by_name(connector['initiator'])
+
+    def create_cloned_volume(self, volume, src_vref):
+        LOG.debug('FIO create_cloned_volume() w/ %(id)s %(src)s' %
+                  {'id': volume['id'],
+                   'src': src_vref})
+        qos = self.fio_qos_dict['Policy 5']
+        # take a snapshot of the volume (use random UUID for name)
+        snapshotname = str(uuid.uuid4())
+        vol = self._get_volume_by_name(src_vref['id'])
+        cmd = {"VolumeUUID": vol['id'],
+               "Name": snapshotname, }
+        self.conn.post("TierStore/Snapshots/by-id/", cmd)
+
+        # create a volume from the snapshot with the new name.
+        # Rollback = Delete the snapshot if needed.
+        if volume.get('volume_metadata')is not None:
+            qos = self._set_qos_presets(volume)
+
+        type_id = volume['volume_type_id']
+        if type_id is not None:
+            qos = self._set_qos_by_volume_type(type_id)
+
+        snap = self._get_snapshot_by_name(snapshotname)
+        cmd = {"ParentLayerId": snap['id'],
+               "Name": volume['id'],
+               "PolicyUUID": qos, }
+        try:
+            # watch for any issues here, and if there are, clean up the
+            # snapshot and re-raise
+            self.conn.put("TierStore/Snapshots/functions/CloneSnapshot", cmd)
+        except Exception:
+            snap = self._get_snapshot_by_name(snapshotname)
+            self.conn.delete("TierStore/Snapshots/by-id/" + snap['id'])
+            raise
+
+    def get_volume_stats(self, refresh=False):
+        """Retrieve status info from volume group."""
+        LOG.debug("FIO Updating volume status")
+        if refresh:
+            result = self.conn.get("TierStore/Pools/by-id/")
+            data = {}
+            backend_name = self.configuration.safe_get('volume_backend_name')
+            data["volume_backend_name"] = (backend_name
+                                           or self.__class__.__name__)
+            data["vendor_name"] = 'Fusion-io Inc'
+            data["driver_version"] = self.VERSION
+            data["storage_protocol"] = 'iSCSI'
+            data['total_capacity_gb'] = (result[0]['PagingTotalMB'] +
+                                         result[1]['PagingTotalMB'])
+            data['free_capacity_gb'] = (max((result[0]['PagingTotalMB'] -
+                                             result[0]['ExportedVolumeMB']),
+                                            (result[1]['PagingTotalMB'] -
+                                             result[1]['ExportedVolumeMB'])))
+            data['reserved_percentage'] = 10
+            data['QoS_support'] = True
+            self._stats = data
+
+        LOG.debug('Result from status: %s' % data)
+        return self._stats
+
+    def extend_volume(self, volume, new_size):
+        LOG.debug("FIO extend_volume %(id)s to %(size)s" %
+                  {'id': volume['id'],
+                   'size': new_size})
+        cmd = {"Size": int(new_size) * units.Gi}
+        vol = self._get_volume_by_name(volume['id'])
+        self.conn.put("TierStore/Volumes/by-id/" + vol['id'], cmd)
index b42cacf8cfdf3bb9c25c7db4fdd5381046f9d4de..9c94166e6b484b211c1dda99dcc58065aefc8eb1 100644 (file)
 #eqlx_pool=default
 
 
+#
+# Options defined in cinder.volume.drivers.fusionio.ioControl
+#
+
+# amount of time wait for iSCSI target to come online (integer
+# value)
+#fusionio_iocontrol_targetdelay=5
+
+# number of retries for GET operations (integer value)
+#fusionio_iocontrol_retry=3
+
+# verify the array certificate on each transaction (boolean
+# value)
+#fusionio_iocontrol_verify_cert=true
+
+
 #
 # Options defined in cinder.volume.drivers.glusterfs
 #