]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add cinder volume driver for Blockbridge EPS
authorJoshua Huber <jhuber@blockbridge.com>
Fri, 12 Jun 2015 01:17:13 +0000 (21:17 -0400)
committerJoshua Huber <jhuber@blockbridge.com>
Fri, 12 Jun 2015 21:45:00 +0000 (17:45 -0400)
This initial commit includes support for the minimum required volume
operations:

- Volume Create/Delete
- Volume Attach/Detach
- Snapshot Create/Delete
- Create Volume from Snapshot
- Get Volume Stats
- Copy Image to Volume
- Copy Volume to Image
- Clone Volume
- Extend Volume

For more details about our storage backend, please see the blueprint.

blueprint blockbridge-eps-driver
Change-Id: I99c95243bec708c6519cb69c4cb68085ad698c83

cinder/tests/unit/test_blockbridge.py [new file with mode: 0644]
cinder/volume/drivers/blockbridge.py [new file with mode: 0644]

diff --git a/cinder/tests/unit/test_blockbridge.py b/cinder/tests/unit/test_blockbridge.py
new file mode 100644 (file)
index 0000000..09745db
--- /dev/null
@@ -0,0 +1,567 @@
+# Copyright 2015 Blockbridge Networks, LLC.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Blockbridge EPS iSCSI Volume Driver Tests
+"""
+
+import base64
+import httplib
+import urllib
+
+import mock
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+from oslo_utils import units
+
+from cinder import context
+from cinder import exception
+from cinder import test
+from cinder.volume import configuration as conf
+import cinder.volume.drivers.blockbridge as bb
+
+LOG = logging.getLogger(__name__)
+
+
+DEFAULT_POOL_NAME = "OpenStack"
+DEFAULT_POOL_QUERY = "+openstack"
+
+FIXTURE_VOL_EXPORT_OK = """{
+  "target_ip":"127.0.0.1",
+  "target_port":3260,
+  "target_iqn":"iqn.2009-12.com.blockbridge:t-pjxczxh-t001",
+  "target_lun":0,
+  "initiator_login":"mock-user-abcdef123456"
+}
+"""
+
+POOL_STATS_WITHOUT_USAGE = {
+    'driver_version': '1.3.0',
+    'pools': [{
+        'filter_function': None,
+        'free_capacity_gb': 'unknown',
+        'goodness_function': None,
+        'location_info': 'BlockbridgeDriver:unknown:OpenStack',
+        'max_over_subscription_ratio': None,
+        'pool_name': 'OpenStack',
+        'thin_provisioning_support': True,
+        'reserved_percentage': 0,
+        'total_capacity_gb': 'unknown'},
+    ],
+    'storage_protocol': 'iSCSI',
+    'vendor_name': 'Blockbridge',
+    'volume_backend_name': 'BlockbridgeISCSIDriver',
+}
+
+
+def common_mocks(f):
+    """Decorator to set mocks common to all tests.
+
+    The point of doing these mocks here is so that we don't accidentally set
+    mocks that can't/don't get unset.
+    """
+    def _common_inner_inner1(inst, *args, **kwargs):
+        @mock.patch("httplib.HTTPSConnection", autospec=True)
+        def _common_inner_inner2(mock_conn):
+            inst.mock_httplib = mock_conn
+            inst.mock_conn = mock_conn.return_value
+            inst.mock_response = mock.Mock()
+
+            inst.mock_response.read.return_value = '{}'
+            inst.mock_response.status = 200
+
+            inst.mock_conn.request.return_value = True
+            inst.mock_conn.getresponse.return_value = inst.mock_response
+
+            return f(inst, *args, **kwargs)
+
+        return _common_inner_inner2()
+
+    return _common_inner_inner1
+
+
+class BlockbridgeISCSIDriverTestCase(test.TestCase):
+
+    def setUp(self):
+        super(BlockbridgeISCSIDriverTestCase, self).setUp()
+
+        self.cfg = mock.Mock(spec=conf.Configuration)
+        self.cfg.blockbridge_api_host = 'ut-api.blockbridge.com'
+        self.cfg.blockbridge_api_port = None
+        self.cfg.blockbridge_auth_scheme = 'token'
+        self.cfg.blockbridge_auth_token = '0//kPIw7Ck7PUkPSKY...'
+        self.cfg.blockbridge_pools = {DEFAULT_POOL_NAME: DEFAULT_POOL_QUERY}
+        self.cfg.blockbridge_default_pool = None
+        self.cfg.filter_function = None
+        self.cfg.goodness_function = None
+
+        def _cfg_safe_get(arg):
+            return getattr(self.cfg, arg, None)
+
+        self.cfg.safe_get.side_effect = _cfg_safe_get
+
+        mock_exec = mock.Mock()
+        mock_exec.return_value = ('', '')
+
+        self.real_client = bb.BlockbridgeAPIClient(configuration=self.cfg)
+        self.mock_client = mock.Mock(wraps=self.real_client)
+
+        self.driver = bb.BlockbridgeISCSIDriver(execute=mock_exec,
+                                                client=self.mock_client,
+                                                configuration=self.cfg)
+
+        self.user_id = '2c13bc8ef717015fda1e12e70dab24654cb6a6da'
+        self.project_id = '62110b9d37f1ff3ea1f51e75812cb92ed9a08b28'
+
+        self.volume_name = u'testvol-1'
+        self.volume_id = '6546b9e9-1980-4241-a4e9-0ad9d382c032'
+        self.volume_size = 1
+        self.volume = dict(
+            name=self.volume_name,
+            size=self.volume_size,
+            id=self.volume_id,
+            user_id=self.user_id,
+            project_id=self.project_id,
+            host='fake-host')
+
+        self.snapshot_name = u'testsnap-1'
+        self.snapshot_id = '207c12af-85a7-4da6-8d39-a7457548f965'
+        self.snapshot = dict(
+            volume_name=self.volume_name,
+            name=self.snapshot_name,
+            id=self.snapshot_id,
+            volume_id='55ff8a46-c35f-4ca3-9991-74e1697b220e',
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.connector = dict(
+            initiator='iqn.1994-05.com.redhat:6a528422b61')
+
+        self.driver.do_setup(context.get_admin_context())
+
+    @common_mocks
+    def test_http_mock_success(self):
+        self.mock_response.read.return_value = '{}'
+        self.mock_response.status = 200
+
+        conn = httplib.HTTPSConnection('whatever', None)
+        conn.request('GET', '/blah', '{}', {})
+        rsp = conn.getresponse()
+
+        self.assertEqual('{}', rsp.read())
+        self.assertEqual(200, rsp.status)
+
+    @common_mocks
+    def test_http_mock_failure(self):
+        mock_body = '{"error": "no results matching query", "status": 413}'
+
+        self.mock_response.read.return_value = mock_body
+        self.mock_response.status = 413
+
+        conn = httplib.HTTPSConnection('whatever', None)
+        conn.request('GET', '/blah', '{}', {})
+        rsp = conn.getresponse()
+
+        self.assertEqual(mock_body, rsp.read())
+        self.assertEqual(413, rsp.status)
+
+    @common_mocks
+    def test_cfg_api_host(self):
+        with mock.patch.object(self.cfg, 'blockbridge_api_host', 'test.host'):
+            self.driver.get_volume_stats(True)
+        self.mock_httplib.assert_called_once_with('test.host', None)
+
+    @common_mocks
+    def test_cfg_api_port(self):
+        with mock.patch.object(self.cfg, 'blockbridge_api_port', 1234):
+            self.driver.get_volume_stats(True)
+        self.mock_httplib.assert_called_once_with(
+            self.cfg.blockbridge_api_host, 1234)
+
+    @common_mocks
+    def test_cfg_api_auth_scheme_password(self):
+        self.cfg.blockbridge_auth_scheme = 'password'
+        self.cfg.blockbridge_auth_user = 'mock-user'
+        self.cfg.blockbridge_auth_password = 'mock-password'
+        with mock.patch.object(self.driver, 'hostname', 'mock-hostname'):
+            self.driver.get_volume_stats(True)
+
+        b64_creds = base64.encodestring("%s:%s" % (
+            self.cfg.blockbridge_auth_user,
+            self.cfg.blockbridge_auth_password)).replace("\n", "")
+
+        params = dict(
+            hostname='mock-hostname',
+            version=self.driver.VERSION,
+            backend_name='BlockbridgeISCSIDriver',
+            pool='OpenStack',
+            query='%2Bopenstack')
+
+        full_url = ("/api/cinder/status?query=%(query)s&"
+                    "hostname=%(hostname)s&backend_name=%(backend_name)s&"
+                    "version=%(version)s&pool=%(pool)s" % params)
+        headers = {
+            'Accept': 'application/vnd.blockbridge-3+json',
+            'Authorization': "Basic %s" % b64_creds,
+            'User-Agent': "cinder-volume/%s" % self.driver.VERSION,
+        }
+
+        self.mock_conn.request.assert_called_once_with(
+            'GET', full_url, None, headers)
+
+    @common_mocks
+    def test_create_volume(self):
+        self.driver.create_volume(self.volume)
+
+        url = "/volumes/%s" % self.volume_id
+        create_params = dict(
+            name=self.volume_name,
+            query=DEFAULT_POOL_QUERY,
+            capacity=self.volume_size * units.Gi)
+
+        kwargs = dict(
+            method='PUT',
+            params=create_params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+        full_url = "/api/cinder" + url
+        raw_body = jsonutils.dumps(create_params)
+        tsk_header = "ext_auth=keystone/%(project_id)s/%(user_id)s" % kwargs
+        authz_header = "Bearer %s" % self.cfg.blockbridge_auth_token
+        headers = {
+            'X-Blockbridge-Task': tsk_header,
+            'Accept': 'application/vnd.blockbridge-3+json',
+            'Content-Type': 'application/json',
+            'Authorization': authz_header,
+            'User-Agent': "cinder-volume/%s" % self.driver.VERSION,
+        }
+
+        self.mock_conn.request.assert_called_once_with(
+            'PUT', full_url, raw_body, headers)
+
+    @common_mocks
+    def test_create_volume_no_results(self):
+        mock_body = '{"message": "no results matching query", "status": 413}'
+
+        self.mock_response.read.return_value = mock_body
+        self.mock_response.status = 413
+
+        self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+                                "no results matching query",
+                                self.driver.create_volume,
+                                self.volume)
+
+        create_params = dict(
+            name=self.volume_name,
+            query=DEFAULT_POOL_QUERY,
+            capacity=self.volume_size * units.Gi)
+
+        kwargs = dict(
+            method='PUT',
+            params=create_params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(
+            "/volumes/%s" % self.volume_id, **kwargs)
+
+    @common_mocks
+    def test_create_volume_from_snapshot(self):
+        self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
+
+        vol_src = dict(
+            snapshot_id=self.snapshot_id,
+            volume_id=self.snapshot['volume_id'])
+        create_params = dict(
+            name=self.volume_name,
+            src=vol_src)
+        kwargs = dict(
+            method='PUT',
+            params=create_params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(
+            "/volumes/%s" % self.volume_id, **kwargs)
+
+    @common_mocks
+    def test_create_volume_from_snapshot_overquota(self):
+        mock_body = '{"message": "over quota", "status": 413}'
+
+        self.mock_response.read.return_value = mock_body
+        self.mock_response.status = 413
+
+        self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+                                "over quota",
+                                self.driver.create_volume_from_snapshot,
+                                self.volume,
+                                self.snapshot)
+
+        vol_src = dict(
+            snapshot_id=self.snapshot_id,
+            volume_id=self.snapshot['volume_id'])
+        create_params = dict(
+            name=self.volume_name,
+            src=vol_src)
+        kwargs = dict(
+            method='PUT',
+            params=create_params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(
+            "/volumes/%s" % self.volume_id, **kwargs)
+
+    @common_mocks
+    def test_create_cloned_volume(self):
+        src_vref = dict(
+            name='cloned_volume_source',
+            size=self.volume_size,
+            id='5d734467-5d77-461c-b5ac-5009dbeaa5d5',
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.driver.create_cloned_volume(self.volume, src_vref)
+
+        create_params = dict(
+            name=self.volume_name,
+            src=dict(volume_id=src_vref['id']))
+        kwargs = dict(
+            method='PUT',
+            params=create_params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(
+            "/volumes/%s" % self.volume_id, **kwargs)
+
+    @common_mocks
+    def test_create_cloned_volume_overquota(self):
+        mock_body = '{"message": "over quota", "status": 413}'
+
+        self.mock_response.read.return_value = mock_body
+        self.mock_response.status = 413
+
+        src_vref = dict(
+            name='cloned_volume_source',
+            size=self.volume_size,
+            id='5d734467-5d77-461c-b5ac-5009dbeaa5d5',
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+                                "over quota",
+                                self.driver.create_cloned_volume,
+                                self.volume,
+                                src_vref)
+
+        create_params = dict(
+            name=self.volume_name,
+            src=dict(volume_id=src_vref['id']))
+        kwargs = dict(
+            method='PUT',
+            params=create_params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(
+            "/volumes/%s" % self.volume_id, **kwargs)
+
+    @common_mocks
+    def test_extend_volume(self):
+        self.driver.extend_volume(self.volume, 2)
+
+        url = "/volumes/%s" % self.volume_id
+        kwargs = dict(
+            action='grow',
+            method='POST',
+            params=dict(capacity=(2 * units.Gi)),
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+    @common_mocks
+    def test_extend_volume_overquota(self):
+        mock_body = '{"message": "over quota", "status": 413}'
+        self.mock_response.read.return_value = mock_body
+        self.mock_response.status = 413
+
+        self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+                                "over quota",
+                                self.driver.extend_volume,
+                                self.volume,
+                                2)
+
+        url = "/volumes/%s" % self.volume_id
+        kwargs = dict(
+            action='grow',
+            method='POST',
+            params=dict(capacity=(2 * units.Gi)),
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+    @common_mocks
+    def test_delete_volume(self):
+        self.driver.delete_volume(self.volume)
+
+        url = "/volumes/%s" % self.volume_id
+        kwargs = dict(
+            method='DELETE',
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+    @common_mocks
+    def test_create_snapshot(self):
+        self.driver.create_snapshot(self.snapshot)
+
+        url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'],
+                                            self.snapshot['id'])
+        create_params = dict(
+            name=self.snapshot_name)
+        kwargs = dict(
+            method='PUT',
+            params=create_params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+    @common_mocks
+    def test_create_snapshot_overquota(self):
+        mock_body = '{"message": "over quota", "status": 413}'
+        self.mock_response.read.return_value = mock_body
+        self.mock_response.status = 413
+
+        self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+                                "over quota",
+                                self.driver.create_snapshot,
+                                self.snapshot)
+
+        url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'],
+                                            self.snapshot['id'])
+        create_params = dict(
+            name=self.snapshot_name)
+        kwargs = dict(
+            method='PUT',
+            params=create_params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+    @common_mocks
+    def test_delete_snapshot(self):
+        self.driver.delete_snapshot(self.snapshot)
+
+        url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'],
+                                            self.snapshot['id'])
+        kwargs = dict(
+            method='DELETE',
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+    @common_mocks
+    @mock.patch('cinder.volume.utils.generate_username')
+    @mock.patch('cinder.volume.utils.generate_password')
+    def test_initialize_connection(self,
+                                   mock_generate_password,
+                                   mock_generate_username):
+        mock_generate_username.return_value = 'mock-user-abcdef123456'
+        mock_generate_password.return_value = 'mock-password-abcdef123456'
+
+        self.mock_response.read.return_value = FIXTURE_VOL_EXPORT_OK
+        self.mock_response.status = 200
+
+        props = self.driver.initialize_connection(self.volume, self.connector)
+
+        expected_props = dict(
+            driver_volume_type="iscsi",
+            data=dict(
+                auth_method="CHAP",
+                auth_username='mock-user-abcdef123456',
+                auth_password='mock-password-abcdef123456',
+                target_discovered=False,
+                target_iqn="iqn.2009-12.com.blockbridge:t-pjxczxh-t001",
+                target_lun=0,
+                target_portal="127.0.0.1:3260",
+                volume_id=self.volume_id))
+
+        self.assertEqual(expected_props, props)
+
+        ini_name = urllib.quote(self.connector["initiator"], "")
+        url = "/volumes/%s/exports/%s" % (self.volume_id, ini_name)
+        params = dict(
+            chap_user="mock-user-abcdef123456",
+            chap_secret="mock-password-abcdef123456")
+        kwargs = dict(
+            method='PUT',
+            params=params,
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+    @common_mocks
+    def test_terminate_connection(self):
+        self.driver.terminate_connection(self.volume, self.connector)
+
+        ini_name = urllib.quote(self.connector["initiator"], "")
+        url = "/volumes/%s/exports/%s" % (self.volume_id, ini_name)
+        kwargs = dict(
+            method='DELETE',
+            user_id=self.user_id,
+            project_id=self.project_id)
+
+        self.mock_client.submit.assert_called_once_with(url, **kwargs)
+
+    @common_mocks
+    def test_get_volume_stats_without_usage(self):
+        with mock.patch.object(self.driver, 'hostname', 'mock-hostname'):
+            self.driver.get_volume_stats(True)
+
+        p = {
+            'query': '+openstack',
+            'pool': 'OpenStack',
+            'hostname': 'mock-hostname',
+            'version': '1.3.0',
+            'backend_name': 'BlockbridgeISCSIDriver',
+        }
+
+        self.mock_client.submit.assert_called_once_with('/status', params=p)
+        self.assertEqual(POOL_STATS_WITHOUT_USAGE, self.driver._stats)
+
+    @common_mocks
+    def test_get_volume_stats_forbidden(self):
+        self.mock_response.status = 403
+        self.assertRaisesRegexp(exception.NotAuthorized,
+                                "Insufficient privileges",
+                                self.driver.get_volume_stats,
+                                True)
+
+    @common_mocks
+    def test_get_volume_stats_unauthorized(self):
+        self.mock_response.status = 401
+        self.assertRaisesRegexp(exception.NotAuthorized,
+                                "Invalid credentials",
+                                self.driver.get_volume_stats,
+                                True)
diff --git a/cinder/volume/drivers/blockbridge.py b/cinder/volume/drivers/blockbridge.py
new file mode 100644 (file)
index 0000000..ff3b887
--- /dev/null
@@ -0,0 +1,592 @@
+# Copyright 2013-2015 Blockbridge Networks, LLC.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Blockbridge EPS iSCSI Volume Driver
+"""
+
+import base64
+import httplib
+import socket
+import urllib
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+from oslo_utils import units
+
+from cinder import context
+from cinder import exception
+from cinder.i18n import _
+from cinder.volume import driver
+from cinder.volume import utils as volume_utils
+
+LOG = logging.getLogger(__name__)
+
+blockbridge_opts = [
+    cfg.StrOpt("blockbridge_api_host",
+               default=None,
+               help=_("IP address/hostname of Blockbridge API.")),
+    cfg.IntOpt("blockbridge_api_port",
+               default=None,
+               help=_("Override HTTPS port to connect to Blockbridge "
+                      "API server.")),
+    cfg.StrOpt("blockbridge_auth_scheme",
+               default='token',
+               choices=['token', 'password'],
+               help=_("Blockbridge API authentication scheme (token "
+                      "or password)")),
+    cfg.StrOpt("blockbridge_auth_token",
+               default=None,
+               help=_("Blockbridge API token (for auth scheme 'token')"),
+               secret=True),
+    cfg.StrOpt("blockbridge_auth_user",
+               default=None,
+               help=_("Blockbridge API user (for auth scheme 'password')")),
+    cfg.StrOpt("blockbridge_auth_password",
+               default=None,
+               help=_("Blockbridge API password (for auth scheme 'password')"),
+               secret=True),
+    cfg.DictOpt("blockbridge_pools",
+                default={'OpenStack': '+openstack'},
+                help=_("Defines the set of exposed pools and their associated "
+                       "backend query strings")),
+    cfg.StrOpt("blockbridge_default_pool",
+               default=None,
+               help=_("Default pool name if unspecified.")),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(blockbridge_opts)
+
+
+class BlockbridgeAPIClient(object):
+    _api_cfg = None
+
+    def __init__(self, configuration=None):
+        self.configuration = configuration
+
+    def _get_api_cfg(self):
+        if self._api_cfg:
+            # return cached configuration
+            return self._api_cfg
+
+        if self.configuration.blockbridge_auth_scheme == 'password':
+            user = self.configuration.safe_get('blockbridge_auth_user')
+            pw = self.configuration.safe_get('blockbridge_auth_password')
+            b64_creds = base64.encodestring("%s:%s" % (user, pw))
+            authz = "Basic %s" % b64_creds.replace("\n", "")
+        elif self.configuration.blockbridge_auth_scheme == 'token':
+            token = self.configuration.blockbridge_auth_token or ''
+            authz = "Bearer %s" % token
+
+        # set and return cached api cfg
+        self._api_cfg = {
+            'host': self.configuration.blockbridge_api_host,
+            'port': self.configuration.blockbridge_api_port,
+            'base_url': '/api/cinder',
+            'default_headers': {
+                'User-Agent': ("cinder-volume/%s" %
+                               BlockbridgeISCSIDriver.VERSION),
+                'Accept': 'application/vnd.blockbridge-3+json',
+                'Authorization': authz,
+            },
+        }
+
+        return self._api_cfg
+
+    def submit(self, rel_url, method='GET', params=None, user_id=None,
+               project_id=None, req_id=None, action=None, **kwargs):
+        """Submit a request to the configured API endpoint."""
+
+        cfg = self._get_api_cfg()
+        if cfg is None:
+            msg = _("Failed to determine blockbridge API configuration")
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        # alter the url appropriately if an action is requested
+        if action:
+            rel_url += "/actions/%s" % action
+
+        headers = cfg['default_headers'].copy()
+        url = cfg['base_url'] + rel_url
+        body = None
+
+        # include user, project and req-id, if supplied
+        tsk_ctx = []
+        if user_id and project_id:
+            tsk_ctx.append("ext_auth=keystone/%s/%s" % (project_id, user_id))
+        if req_id:
+            tsk_ctx.append("id=%s", req_id)
+
+        if tsk_ctx:
+            headers['X-Blockbridge-Task'] = ','.join(tsk_ctx)
+
+        # encode params based on request method
+        if method in ['GET', 'DELETE']:
+            # For GET method add parameters to the URL
+            if params:
+                url += '?' + urllib.urlencode(params)
+        elif method in ['POST', 'PUT', 'PATCH']:
+            body = jsonutils.dumps(params)
+            headers['Content-Type'] = 'application/json'
+        else:
+            raise exception.UnknownCmd(cmd=method)
+
+        # connect and execute the request
+        connection = httplib.HTTPSConnection(cfg['host'], cfg['port'])
+        connection.request(method, url, body, headers)
+        response = connection.getresponse()
+
+        # read response data
+        rsp_body = response.read()
+        rsp_data = jsonutils.loads(rsp_body)
+
+        connection.close()
+
+        code = response.status
+        if code in [200, 201, 202, 204]:
+            pass
+        elif code == 401:
+            raise exception.NotAuthorized(_("Invalid credentials"))
+        elif code == 403:
+            raise exception.NotAuthorized(_("Insufficient privileges"))
+        else:
+            raise exception.VolumeBackendAPIException(data=rsp_data['message'])
+
+        return rsp_data
+
+
+class BlockbridgeISCSIDriver(driver.ISCSIDriver):
+    """Manages volumes hosted on Blockbridge EPS."""
+
+    VERSION = '1.3.0'
+
+    def __init__(self, *args, **kwargs):
+        super(BlockbridgeISCSIDriver, self).__init__(*args, **kwargs)
+
+        self.client = kwargs.get('client', None) or (
+            BlockbridgeAPIClient(configuration=self.configuration))
+
+        self.configuration.append_config_values(blockbridge_opts)
+        self.hostname = socket.gethostname()
+
+    def do_setup(self, context):
+        """Set up the Blockbridge volume driver."""
+        pass
+
+    def check_for_setup_error(self):
+        """Verify configuration is valid."""
+
+        # ensure the host is configured
+        if self.configuration.safe_get('blockbridge_api_host') is None:
+            raise exception.InvalidInput(
+                reason=_("Blockbridge api host not configured"))
+
+        # ensure the auth scheme is valid and has the necessary configuration.
+        auth_scheme = self.configuration.safe_get("blockbridge_auth_scheme")
+
+        if auth_scheme == 'password':
+            auth_user = self.configuration.safe_get('blockbridge_auth_user')
+            auth_pw = self.configuration.safe_get('blockbridge_auth_password')
+            if auth_user is None:
+                raise exception.InvalidInput(
+                    reason=_("Blockbridge user not configured (required for "
+                             "auth scheme 'password')"))
+            if auth_pw is None:
+                raise exception.InvalidInput(
+                    reason=_("Blockbridge password not configured (required "
+                             "for auth scheme 'password')"))
+        elif auth_scheme == 'token':
+            token = self.configuration.safe_get('blockbridge_auth_token')
+            if token is None:
+                raise exception.InvalidInput(
+                    reason=_("Blockbridge token not configured (required "
+                             "for auth scheme 'token')"))
+        else:
+            raise exception.InvalidInput(
+                reason=(_("Blockbridge configured with invalid auth scheme "
+                          "'%(auth_scheme)s'") % {'auth_scheme': auth_scheme}))
+
+        # ensure at least one pool is defined
+        pools = self.configuration.safe_get('blockbridge_pools')
+        if pools is None:
+            raise exception.InvalidInput(
+                reason=_("Blockbridge pools not configured"))
+
+        default_pool = self.configuration.safe_get('blockbridge_default_pool')
+        if default_pool and default_pool not in pools:
+            raise exception.InvalidInput(
+                reason=_("Blockbridge default pool does not exist"))
+
+    def _vol_api_submit(self, vol_id, **kwargs):
+        vol_id = urllib.quote(vol_id, '')
+        rel_url = "/volumes/%s" % vol_id
+
+        return self.client.submit(rel_url, **kwargs)
+
+    def _create_volume(self, vol_id, params, **kwargs):
+        """Execute a backend volume create operation."""
+
+        self._vol_api_submit(vol_id, method='PUT', params=params, **kwargs)
+
+    def _delete_volume(self, vol_id, **kwargs):
+        """Execute a backend volume delete operation."""
+
+        self._vol_api_submit(vol_id, method='DELETE', **kwargs)
+
+    def _extend_volume(self, vol_id, capacity, **kwargs):
+        """Execute a backend volume grow operation."""
+
+        params = kwargs.get('params', {})
+        params['capacity'] = capacity
+
+        self._vol_api_submit(vol_id, method='POST', action='grow',
+                             params=params, **kwargs)
+
+    def _snap_api_submit(self, vol_id, snap_id, **kwargs):
+        vol_id = urllib.quote(vol_id, '')
+        snap_id = urllib.quote(snap_id, '')
+        rel_url = "/volumes/%s/snapshots/%s" % (vol_id, snap_id)
+
+        return self.client.submit(rel_url, **kwargs)
+
+    def _create_snapshot(self, vol_id, snap_id, params, **kwargs):
+        """Execute a backend snapshot create operation."""
+
+        self._snap_api_submit(vol_id, snap_id, method='PUT',
+                              params=params, **kwargs)
+
+    def _delete_snapshot(self, vol_id, snap_id, **kwargs):
+        """Execute a backend snapshot delete operation."""
+
+        return self._snap_api_submit(vol_id, snap_id, method='DELETE',
+                                     **kwargs)
+
+    def _export_api_submit(self, vol_id, ini_name, **kwargs):
+        vol_id = urllib.quote(vol_id, '')
+        ini_name = urllib.quote(ini_name, '')
+        rel_url = "/volumes/%s/exports/%s" % (vol_id, ini_name)
+
+        return self.client.submit(rel_url, **kwargs)
+
+    def _create_export(self, vol_id, ini_name, params, **kwargs):
+        """Execute a backend volume export operation."""
+
+        return self._export_api_submit(vol_id, ini_name, method='PUT',
+                                       params=params, **kwargs)
+
+    def _delete_export(self, vol_id, ini_name, **kwargs):
+        """Remove a previously created volume export."""
+
+        self._export_api_submit(vol_id, ini_name, method='DELETE',
+                                **kwargs)
+
+    def _get_pool_stats(self, pool, query, **kwargs):
+        """Retrieve pool statistics and capabilities."""
+
+        pq = {
+            'pool': pool,
+            'query': query,
+        }
+        pq.update(kwargs)
+
+        return self.client.submit('/status', params=pq)
+
+    def _get_dbref_name(self, ref):
+        display_name = ref.get('display_name')
+        if not display_name:
+            return ref.get('name')
+        return display_name
+
+    def _get_query_string(self, ctxt, volume):
+        pools = self.configuration.blockbridge_pools
+        default_pool = self.configuration.blockbridge_default_pool
+        explicit_pool = volume_utils.extract_host(volume['host'], 'pool')
+
+        pool_name = explicit_pool or default_pool
+        if pool_name:
+            return pools[pool_name]
+        else:
+            # no pool specified or defaulted -- just pick whatever comes out of
+            # the dictionary first.
+            return pools.values()[0]
+
+    def create_volume(self, volume):
+        """Create a volume on a Blockbridge EPS backend.
+
+        :param volume: volume reference
+        """
+
+        ctxt = context.get_admin_context()
+        create_params = {
+            'name': self._get_dbref_name(volume),
+            'query': self._get_query_string(ctxt, volume),
+            'capacity': int(volume['size'] * units.Gi),
+        }
+
+        LOG.debug("Provisioning %(capacity)s byte volume "
+                  "with query '%(query)s'", create_params, resource=volume)
+
+        return self._create_volume(volume['id'],
+                                   create_params,
+                                   user_id=volume['user_id'],
+                                   project_id=volume['project_id'])
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Creates a clone of the specified volume."""
+
+        create_params = {
+            'name': self._get_dbref_name(volume),
+            'src': {
+                'volume_id': src_vref['id'],
+            },
+        }
+
+        LOG.debug("Cloning source volume %(id)s", src_vref, resource=volume)
+
+        return self._create_volume(volume['id'],
+                                   create_params,
+                                   user_id=volume['user_id'],
+                                   project_id=volume['project_id'])
+
+    def delete_volume(self, volume):
+        """Remove an existing volume.
+
+        :param volume: volume reference
+        """
+
+        LOG.debug("Removing volume %(id)s", volume, resource=volume)
+
+        return self._delete_volume(volume['id'],
+                                   user_id=volume['user_id'],
+                                   project_id=volume['project_id'])
+
+    def create_snapshot(self, snapshot):
+        """Create snapshot of existing volume.
+
+        :param snapshot: shapshot reference
+        """
+
+        create_params = {
+            'name': self._get_dbref_name(snapshot),
+        }
+
+        LOG.debug("Creating snapshot of volume %(volume_id)s", snapshot,
+                  resource=snapshot)
+
+        return self._create_snapshot(snapshot['volume_id'],
+                                     snapshot['id'],
+                                     create_params,
+                                     user_id=snapshot['user_id'],
+                                     project_id=snapshot['project_id'])
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Create new volume from existing snapshot.
+
+        :param volume: reference of volume to be created
+        :param snapshot: reference of source snapshot
+        """
+
+        create_params = {
+            'name': self._get_dbref_name(volume),
+            'src': {
+                'volume_id': snapshot['volume_id'],
+                'snapshot_id': snapshot['id'],
+            },
+        }
+
+        LOG.debug("Creating volume from snapshot %(id)s", snapshot,
+                  resource=volume)
+
+        return self._create_volume(volume['id'],
+                                   create_params,
+                                   user_id=volume['user_id'],
+                                   project_id=volume['project_id'])
+
+    def delete_snapshot(self, snapshot):
+        """Delete volume's snapshot.
+
+        :param snapshot: shapshot reference
+        """
+
+        LOG.debug("Deleting snapshot of volume %(volume_id)s", snapshot,
+                  resource=snapshot)
+
+        self._delete_snapshot(snapshot['volume_id'],
+                              snapshot['id'],
+                              user_id=snapshot['user_id'],
+                              project_id=snapshot['project_id'])
+
+    def create_export(self, _ctx, volume):
+        """Do nothing: target created during instance attachment."""
+        pass
+
+    def ensure_export(self, _ctx, volume):
+        """Do nothing: target created during instance attachment."""
+        pass
+
+    def remove_export(self, _ctx, volume):
+        """Do nothing: target created during instance attachment."""
+        pass
+
+    def initialize_connection(self, volume, connector, **kwargs):
+        """Attach volume to initiator/host.
+
+        Creates a profile for the initiator, and adds the new profile to the
+        target ACL.
+
+        """
+
+        # generate a CHAP secret here -- there is no way to retrieve an
+        # existing CHAP secret over the Blockbridge API, so it must be
+        # supplied by the volume driver.
+        export_params = {
+            'chap_user': (
+                kwargs.get('user', volume_utils.generate_username(16))),
+            'chap_secret': (
+                kwargs.get('password', volume_utils.generate_password(32))),
+        }
+
+        LOG.debug("Configuring export for %(initiator)s", connector,
+                  resource=volume)
+
+        rsp = self._create_export(volume['id'],
+                                  connector['initiator'],
+                                  export_params,
+                                  user_id=volume['user_id'],
+                                  project_id=volume['project_id'])
+
+        # combine locally generated chap credentials with target iqn/lun to
+        # present the attach properties.
+        target_portal = "%s:%s" % (rsp['target_ip'], rsp['target_port'])
+
+        properties = {
+            'target_discovered': False,
+            'target_portal': target_portal,
+            'target_iqn': rsp['target_iqn'],
+            'target_lun': rsp['target_lun'],
+            'volume_id': volume['id'],
+            'auth_method': 'CHAP',
+            'auth_username': rsp['initiator_login'],
+            'auth_password': export_params['chap_secret'],
+        }
+
+        LOG.debug("Attach properties: %(properties)s",
+                  {'properties': properties})
+
+        return {
+            'driver_volume_type': 'iscsi',
+            'data': properties,
+        }
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Detach volume from the initiator.
+
+        Removes initiator profile entry from target ACL.
+
+        """
+
+        LOG.debug("Unconfiguring export for %(initiator)s", connector,
+                  resource=volume)
+
+        self._delete_export(volume['id'],
+                            connector['initiator'],
+                            user_id=volume['user_id'],
+                            project_id=volume['project_id'])
+
+    def extend_volume(self, volume, new_size):
+        """Extend an existing volume."""
+
+        capacity = new_size * units.Gi
+
+        LOG.debug("Extending volume to %(capacity)s bytes",
+                  {'capacity': capacity}, resource=volume)
+
+        self._extend_volume(volume['id'],
+                            int(new_size * units.Gi),
+                            user_id=volume['user_id'],
+                            project_id=volume['project_id'])
+
+    def get_volume_stats(self, refresh=False):
+        if refresh:
+            self._update_volume_stats()
+        return self._stats
+
+    def _update_volume_stats(self):
+        if self.configuration:
+            cfg_name = self.configuration.safe_get('volume_backend_name')
+        backend_name = cfg_name or self.__class__.__name__
+
+        driver_cfg = {
+            'hostname': self.hostname,
+            'version': self.VERSION,
+            'backend_name': backend_name,
+        }
+
+        filter_function = self.get_filter_function()
+        goodness_function = self.get_goodness_function()
+        pools = []
+
+        LOG.debug("Updating volume driver statistics",
+                  resource={'type': 'driver', 'id': backend_name})
+
+        for pool_name, query in self.configuration.blockbridge_pools.items():
+            stats = self._get_pool_stats(pool_name, query, **driver_cfg)
+
+            system_serial = stats.get('system_serial', 'unknown')
+            free_capacity = stats.get('free_capacity', None)
+            total_capacity = stats.get('total_capacity', None)
+            provisioned_capacity = stats.get('provisioned_capacity', None)
+
+            if free_capacity is None:
+                free_capacity = 'unknown'
+            else:
+                free_capacity = int(free_capacity / units.Gi)
+
+            if total_capacity is None:
+                total_capacity = 'unknown'
+            else:
+                total_capacity = int(total_capacity / units.Gi)
+
+            pool = {
+                'pool_name': pool_name,
+                'location_info': ('BlockbridgeDriver:%(sys_id)s:%(pool)s' %
+                                  {'sys_id': system_serial,
+                                   'pool': pool_name}),
+                'max_over_subscription_ratio': (
+                    self.configuration.safe_get('max_over_subscription_ratio')
+                ),
+                'free_capacity_gb': free_capacity,
+                'total_capacity_gb': total_capacity,
+                'reserved_percentage': 0,
+                'thin_provisioning_support': True,
+                'filter_function': filter_function,
+                'goodness_function': goodness_function,
+            }
+
+            if provisioned_capacity is not None:
+                pool['provisioned_capacity_gb'] = int(
+                    provisioned_capacity / units.Gi
+                )
+
+            pools.append(pool)
+
+        self._stats = {
+            'volume_backend_name': backend_name,
+            'vendor_name': 'Blockbridge',
+            'driver_version': self.VERSION,
+            'storage_protocol': 'iSCSI',
+            'pools': pools,
+        }