]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add Datera driver for Cinder
authorMike Perez <thingee@gmail.com>
Thu, 3 Jul 2014 06:38:20 +0000 (23:38 -0700)
committerMike Perez <thingee@gmail.com>
Wed, 3 Sep 2014 17:24:41 +0000 (10:24 -0700)
Initial commit of the Datera Driver using iSCSI.

Implements blueprint datera-driver
Change-Id: Idbfff71883b3f97166e8372a3818f80a37cf422c

cinder/exception.py
cinder/tests/volume/drivers/datera.py [new file with mode: 0644]
cinder/volume/driver.py
cinder/volume/drivers/datera.py [new file with mode: 0644]
etc/cinder/cinder.conf.sample

index f297f7af4977e4866f5d6d15f2788348df84ef26..1ffa6ab3b108fce687b7db688e3a77c87d7b5b87 100644 (file)
@@ -849,3 +849,8 @@ class HBSDBusy(HBSDError):
 
 class HBSDNotFound(NotFound):
     message = _("Storage resource could not be found.")
+
+
+# Datera driver
+class DateraAPIException(VolumeBackendAPIException):
+    message = _("Bad response from Datera API")
diff --git a/cinder/tests/volume/drivers/datera.py b/cinder/tests/volume/drivers/datera.py
new file mode 100644 (file)
index 0000000..928a37c
--- /dev/null
@@ -0,0 +1,292 @@
+# Copyright 2014 Datera
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from cinder import context
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder import test
+from cinder.volume import configuration as conf
+from cinder.volume.drivers import datera
+
+
+LOG = logging.getLogger(__name__)
+
+
+class DateraVolumeTestCase(test.TestCase):
+    def setUp(self):
+        super(DateraVolumeTestCase, self).setUp()
+
+        self.cfg = mock.Mock(spec=conf.Configuration)
+        self.cfg.san_ip = '127.0.0.1'
+        self.cfg.san_is_local = True
+        self.cfg.datera_api_token = 'secret'
+        self.cfg.datera_api_port = '7717'
+        self.cfg.datera_api_version = '1'
+        self.cfg.datera_num_replicas = '2'
+
+        mock_exec = mock.Mock()
+        mock_exec.return_value = ('', '')
+
+        self.driver = datera.DateraDriver(execute=mock_exec,
+                                          configuration=self.cfg)
+        self.driver.set_initialized()
+        self.volume = _stub_volume()
+        self.api_patcher = mock.patch('cinder.volume.drivers.datera.'
+                                      'DateraDriver._issue_api_request')
+        self.mock_api = self.api_patcher.start()
+
+        self.addCleanup(self.api_patcher.stop)
+
+    def test_volume_create_success(self):
+        self.mock_api.return_value = {
+            'uuid': 'c20aba21-6ef6-446b-b374-45733b4883ba',
+            'size': '1073741824',
+            'name': 'volume-00000001',
+            'parent': '00000000-0000-0000-0000-000000000000',
+            'numReplicas': '2',
+            'subType': 'IS_ORIGINAL'
+        }
+        self.assertIsNone(self.driver.create_volume(self.volume))
+
+    def test_volume_create_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.create_volume, self.volume)
+
+    def test_create_cloned_volume_success(self):
+        self.mock_api.return_value = {
+            'uuid': 'c20aba21-6ef6-446b-b374-45733b4883ba',
+            'size': '1073741824',
+            'name': 'volume-00000001',
+            'parent': '7f91abfa-7964-41ed-88fc-207c3a290b4f',
+            'numReplicas': '2',
+            'subType': 'IS_CLONE'
+        }
+        source_volume = _stub_volume(
+            id='7f91abfa-7964-41ed-88fc-207c3a290b4f',
+            display_name='foo'
+        )
+        self.assertIsNone(self.driver.create_cloned_volume(self.volume,
+                                                           source_volume))
+
+    def test_create_cloned_volume_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        source_volume = _stub_volume(
+            id='7f91abfa-7964-41ed-88fc-207c3a290b4f',
+            display_name='foo'
+        )
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.create_cloned_volume, self.volume,
+                          source_volume)
+
+    def test_delete_volume_success(self):
+        self.mock_api.return_value = {
+            'uuid': 'c20aba21-6ef6-446b-b374-45733b4883ba',
+            'size': '1073741824',
+            'name': 'volume-00000001',
+            'parent': '00000000-0000-0000-0000-000000000000',
+            'numReplicas': '2',
+            'subType': 'IS_ORIGINAL',
+            'target': None
+        }
+        self.assertIsNone(self.driver.delete_volume(self.volume))
+
+    def test_delete_volume_not_found(self):
+        self.mock_api.side_effect = exception.NotFound
+        self.assertIsNone(self.driver.delete_volume(self.volume))
+
+    def test_delete_volume_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.delete_volume, self.volume)
+
+    def test_ensure_export_success(self):
+        self.mock_api.return_value = stub_export
+        ctxt = context.get_admin_context()
+        expected = {
+            'provider_location': u'172.28.121.10:3260 iqn.2013-05.com.daterain'
+                                 'c::01:sn:fc372bc0490b2dbe 1'
+        }
+        self.assertEqual(expected, self.driver.ensure_export(ctxt,
+                                                             self.volume))
+
+    def test_ensure_export_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        ctxt = context.get_admin_context()
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.ensure_export, ctxt, self.volume)
+
+    def test_create_export_success(self):
+        self.mock_api.return_value = stub_export
+        ctxt = context.get_admin_context()
+        expected = {
+            'provider_location': u'172.28.121.10:3260 iqn.2013-05.com.daterain'
+                                 'c::01:sn:fc372bc0490b2dbe 1'
+        }
+        self.assertEqual(expected, self.driver.create_export(ctxt,
+                                                             self.volume))
+
+    def test_create_export_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        ctxt = context.get_admin_context()
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.create_export, ctxt, self.volume)
+
+    def test_create_snapshot_success(self):
+        self.mock_api.return_value = {
+            u'uuid': u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c',
+            u'parent': u'c20aba21-6ef6-446b-b374-45733b4883ba',
+            u'subType': u'IS_SNAPSHOT',
+            u'numReplicas': 2,
+            u'size': u'1073741824',
+            u'name': u'snapshot-00000001'
+        }
+        snapshot = _stub_snapshot(volume_id=self.volume['id'])
+        self.assertIsNone(self.driver.create_snapshot(snapshot))
+
+    def test_create_snapshot_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        snapshot = _stub_snapshot(volume_id=self.volume['id'])
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.create_snapshot, snapshot)
+
+    def test_delete_snapshot_success(self):
+        self.mock_api.return_value = {
+            u'uuid': u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c',
+            u'parent': u'c20aba21-6ef6-446b-b374-45733b4883ba',
+            u'subType': u'IS_SNAPSHOT',
+            u'numReplicas': 2,
+            u'size': u'1073741824',
+            u'name': u'snapshot-00000001'
+        }
+        snapshot = _stub_snapshot(volume_id=self.volume['id'])
+        self.assertIsNone(self.driver.delete_snapshot(snapshot))
+
+    def test_delete_snapshot_not_found(self):
+        self.mock_api.side_effect = exception.NotFound
+        snapshot = _stub_snapshot(self.volume['id'])
+        self.assertIsNone(self.driver.delete_snapshot(snapshot))
+
+    def test_delete_snapshot_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        snapshot = _stub_snapshot(volume_id=self.volume['id'])
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.delete_snapshot, snapshot)
+
+    def test_create_volume_from_snapshot_success(self):
+        self.mock_api.return_value = {
+            u'uuid': u'c20aba21-6ef6-446b-b374-45733b4883ba',
+            u'parent': u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c',
+            u'subType': u'IS_ORIGINAL',
+            u'numReplicas': 2,
+            u'size': u'1073741824',
+            u'name': u'volume-00000001'
+        }
+        snapshot = _stub_snapshot(volume_id=self.volume['id'])
+        self.assertIsNone(
+            self.driver.create_volume_from_snapshot(self.volume, snapshot))
+
+    def test_create_volume_from_snapshot_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        snapshot = _stub_snapshot(volume_id=self.volume['id'])
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.create_volume_from_snapshot, self.volume,
+                          snapshot)
+
+    def test_extend_volume_success(self):
+        self.mock_api.return_value = {
+            u'uuid': u'c20aba21-6ef6-446b-b374-45733b4883ba',
+            u'parent': u'00000000-0000-0000-0000-000000000000',
+            u'subType': u'IS_ORIGINAL',
+            u'numReplicas': 2,
+            u'size': u'2147483648',
+            u'name': u'volume-00000001'
+        }
+        volume = _stub_volume(size=1)
+        self.assertIsNone(self.driver.extend_volume(volume, 2))
+
+    def test_extend_volume_fails(self):
+        self.mock_api.side_effect = exception.DateraAPIException
+        volume = _stub_volume(size=1)
+        self.assertRaises(exception.DateraAPIException,
+                          self.driver.extend_volume, volume, 2)
+
+
+stub_export = {
+    u'_ipColl': [u'172.28.121.10', u'172.28.120.10'],
+    u'acls': {},
+    u'activeServers': {u'4594953e-f97f-e111-ad85-001e6738c0f0': u'1'},
+    u'ctype': u'TC_BLOCK_ISCSI',
+    u'endpointsExt1': {
+        u'4594953e-f97f-e111-ad85-001e6738c0f0': {
+            u'ipHigh': 0,
+            u'ipLow': u'192421036',
+            u'ipStr': u'172.28.120.11',
+            u'ipV': 4,
+            u'name': u'',
+            u'network': 24
+        }
+    },
+    u'endpointsExt2': {
+        u'4594953e-f97f-e111-ad85-001e6738c0f0': {
+            u'ipHigh': 0,
+            u'ipLow': u'192486572',
+            u'ipStr': u'172.28.121.11',
+            u'ipV': 4,
+            u'name': u'',
+            u'network': 24
+        }
+    },
+    u'inodes': {u'c20aba21-6ef6-446b-b374-45733b4883ba': u'1'},
+    u'name': u'',
+    u'networkPort': 0,
+    u'serverAllocation': u'TS_ALLOC_COMPLETED',
+    u'servers': {u'4594953e-f97f-e111-ad85-001e6738c0f0': u'1'},
+    u'targetAllocation': u'TS_ALLOC_COMPLETED',
+    u'targetIds': {
+        u'4594953e-f97f-e111-ad85-001e6738c0f0': {
+            u'ids': [{
+                u'dev': None,
+                u'id': u'iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe'
+            }]
+        }
+    },
+    u'typeName': u'TargetIscsiConfig',
+    u'uuid': u'7071efd7-9f22-4996-8f68-47e9ab19d0fd'
+}
+
+
+def _stub_volume(*args, **kwargs):
+    uuid = u'c20aba21-6ef6-446b-b374-45733b4883ba'
+    name = u'volume-00000001'
+    size = 1
+    volume = {}
+    volume['id'] = kwargs.get('id', uuid)
+    volume['display_name'] = kwargs.get('display_name', name)
+    volume['size'] = kwargs.get('size', size)
+    volume['provider_location'] = kwargs.get('provider_location', None)
+    return volume
+
+
+def _stub_snapshot(*args, **kwargs):
+    uuid = u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c'
+    name = u'snapshot-00000001'
+    volume = {}
+    volume['id'] = kwargs.get('id', uuid)
+    volume['display_name'] = kwargs.get('display_name', name)
+    volume['volume_id'] = kwargs.get('volume_id', None)
+    return volume
index aec0f1ef1277938aa6500680381fa8ddec066a30..04887e782a7454309283aeec62bacb0769c6a5e0 100644 (file)
@@ -119,6 +119,14 @@ volume_opts = [
                     'perform write-back(on) or write-through(off). '
                     'This parameter is valid if iscsi_helper is set '
                     'to tgtadm or iseradm.'),
+    cfg.StrOpt('driver_client_cert_key',
+               default=None,
+               help='The path to the client certificate key for verification, '
+                    'if the driver supports it.'),
+    cfg.StrOpt('driver_client_cert',
+               default=None,
+               help='The path to the client certificate for verification, '
+                    'if the driver supports it.'),
 ]
 
 # for backward compatibility
diff --git a/cinder/volume/drivers/datera.py b/cinder/volume/drivers/datera.py
new file mode 100644 (file)
index 0000000..4fb08ad
--- /dev/null
@@ -0,0 +1,252 @@
+# Copyright 2014 Datera
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from oslo.config import cfg
+import requests
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import units
+from cinder.volume.drivers.san import san
+
+LOG = logging.getLogger(__name__)
+
+d_opts = [
+    cfg.StrOpt('datera_api_token',
+               default=None,
+               help='Datera API token.'),
+    cfg.StrOpt('datera_api_port',
+               default='7717',
+               help='Datera API port.'),
+    cfg.StrOpt('datera_api_version',
+               default='1',
+               help='Datera API version.'),
+    cfg.StrOpt('datera_num_replicas',
+               default='3',
+               help='Number of replicas to create of an inode.')
+]
+
+
+CONF = cfg.CONF
+CONF.import_opt('driver_client_cert_key', 'cinder.volume.driver')
+CONF.import_opt('driver_client_cert', 'cinder.volume.driver')
+CONF.register_opts(d_opts)
+
+
+class DateraDriver(san.SanISCSIDriver):
+    """The OpenStack Datera Driver
+
+    Version history:
+        1.0 - Initial driver
+    """
+    VERSION = '1.0'
+
+    def __init__(self, *args, **kwargs):
+        super(DateraDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(d_opts)
+        self.num_replicas = self.configuration.datera_num_replicas
+        self.cluster_stats = {}
+
+    def create_volume(self, volume):
+        """Create a logical volume."""
+        params = {
+            'name': volume['display_name'] or volume['id'],
+            'size': str(volume['size'] * units.Gi),
+            'uuid': volume['id'],
+            'numReplicas': self.num_replicas
+        }
+        self._issue_api_request('volumes', 'post', body=params)
+
+    def create_cloned_volume(self, volume, src_vref):
+        data = {
+            'name': volume['display_name'] or volume['id'],
+            'uuid': volume['id'],
+            'clone_uuid': src_vref['id'],
+            'numReplicas': self.num_replicas
+        }
+        self._issue_api_request('volumes', 'post', body=data)
+
+    def delete_volume(self, volume):
+        try:
+            self._issue_api_request('volumes', 'delete', volume['id'])
+        except exception.NotFound:
+            msg = _("Tried to delete volume %s, but was not found in Datera "
+                    "cluster. Continuing with delete.")
+            LOG.info(msg)
+
+    def _do_export(self, context, volume):
+        """Gets the associated account, retrieves CHAP info and updates."""
+        if volume['provider_location']:
+            return {'provider_location': volume['provider_location']}
+
+        export = self._issue_api_request(
+            'volumes', action='export', method='post',
+            body={'ctype': 'TC_BLOCK_ISCSI'}, resource=volume['id'])
+
+        # NOTE(thingee): Refer to the Datera test for a stub of what this looks
+        # like. We're just going to pull the first IP that the Datera cluster
+        # makes available for the portal.
+        iscsi_portal = export['_ipColl'][0] + ':3260'
+        iqn = export['targetIds'].itervalues().next()['ids'][0]['id']
+
+        provider_location = '%s %s %s' % (iscsi_portal, iqn, 1)
+        model_update = {'provider_location': provider_location}
+        return model_update
+
+    def ensure_export(self, context, volume):
+        return self._do_export(context, volume)
+
+    def create_export(self, context, volume):
+        return self._do_export(context, volume)
+
+    def delete_snapshot(self, snapshot):
+        try:
+            self._issue_api_request('snapshots', 'delete', snapshot['id'])
+        except exception.NotFound:
+            msg = _("Tried to delete snapshot %s, but was not found in Datera "
+                    "cluster. Continuing with delete.")
+            LOG.info(msg)
+
+    def create_snapshot(self, snapshot):
+        data = {
+            'uuid': snapshot['id'],
+            'parentUUID': snapshot['volume_id']
+        }
+        self._issue_api_request('snapshots', 'post', body=data)
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        data = {
+            'name': volume['display_name'] or volume['id'],
+            'uuid': volume['id'],
+            'snapshot_uuid': snapshot['id'],
+            'numReplicas': self.num_replicas
+        }
+        self._issue_api_request('volumes', 'post', body=data)
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume stats.
+
+        If 'refresh' is True, run update first.
+        The name is a bit misleading as
+        the majority of the data here is cluster
+        data.
+        """
+        if refresh:
+            try:
+                self._update_cluster_stats()
+            except exception.DateraAPIException:
+                LOG.error('Failed to get updated stats from Datera cluster.')
+                pass
+
+        return self.cluster_stats
+
+    def extend_volume(self, volume, new_size):
+        data = {
+            'size': str(new_size * units.Gi)
+        }
+        self._issue_api_request('volumes', 'put', body=data,
+                                resource=volume['id'])
+
+    def _update_cluster_stats(self):
+        LOG.debug("Updating cluster stats info.")
+
+        results = self._issue_api_request('cluster')
+
+        if 'uuid' not in results:
+            LOG.error(_('Failed to get updated stats from Datera Cluster.'))
+
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        stats = {
+            'volume_backend_name': backend_name or 'Datera',
+            'vendor_name': 'Datera',
+            'driver_version': self.VERSION,
+            'storage_protocol': 'iSCSI',
+            'total_capacity_gb': int(results['totalRawSpace']),
+            'free_capacity_gb': int(results['availableSpace']),
+            'reserved_percentage': 0,
+        }
+
+        self.cluster_stats = stats
+
+    def _issue_api_request(self, resource_type, method='get', resource=None,
+                           body=None, action=None):
+        """All API requests to Datera cluster go through this method.
+
+        :param resource_type: the type of the resource
+        :param method: the request verb
+        :param resource: the identifier of the resource
+        :param body: a dict with options for the action_type
+        :param action: the action to perform
+        :returns: a dict of the response from the Datera cluster
+        """
+        host = self.configuration.san_ip
+        port = self.configuration.datera_api_port
+        api_token = self.configuration.datera_api_token
+        api_version = self.configuration.datera_api_version
+
+        payload = json.dumps(body, ensure_ascii=False)
+        payload.encode('utf-8')
+        header = {'Content-Type': 'application/json; charset=utf-8'}
+
+        if api_token:
+            header['Auth-Token'] = api_token
+
+        LOG.debug("Payload for Datera API call: %s", payload)
+
+        client_cert = self.configuration.driver_client_cert
+        client_cert_key = self.configuration.driver_client_cert_key
+        protocol = 'http'
+        cert_data = None
+
+        if client_cert:
+            protocol = 'https'
+            cert_data = (client_cert, client_cert_key)
+
+        connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
+                                                   api_version, resource_type)
+
+        if resource is not None:
+            connection_string += '/%s' % resource
+        if action is not None:
+            connection_string += '/%s' % action
+
+        LOG.debug("Endpoint for Datera API call: %s", connection_string)
+        try:
+            response = getattr(requests, method)(connection_string,
+                                                 data=payload, headers=header,
+                                                 verify=False, cert=cert_data)
+        except requests.exceptions.RequestException as ex:
+            msg = _('Failed to make a request to Datera cluster endpoint due '
+                    'to the following reason: %s') % ex.message
+            LOG.error(msg)
+            raise exception.DateraAPIException(msg)
+
+        data = response.json()
+        LOG.debug("Results of Datera API call: %s", data)
+        if not response.ok:
+            if response.status_code == 404:
+                raise exception.NotFound(data['message'])
+            else:
+                msg = _('Request to Datera cluster returned bad status:'
+                        ' %(status)s | %(reason)s') % {
+                            'status': response.status_code,
+                            'reason': response.reason}
+                LOG.error(msg)
+                raise exception.DateraAPIException(msg)
+
+        return data
index d7e0838d7fe0516fe9ccccdc83ead9cc70a53479..b0328baaad09d0f5a8882593debc7e40c1d9d4bd 100644 (file)
 # value)
 #iscsi_write_cache=on
 
+# The path to the client certificate key for verification, if
+# the driver supports it. (string value)
+#driver_client_cert_key=<None>
+
+# The path to the client certificate for verification, if the
+# driver supports it. (string value)
+#driver_client_cert=<None>
+
 
 #
 # Options defined in cinder.volume.drivers.block_device
 #coraid_repository_key=coraid_repository
 
 
+#
+# Options defined in cinder.volume.drivers.datera
+#
+
+# Datera API token. (string value)
+#datera_api_token=<None>
+
+# Datera API port. (string value)
+#datera_api_port=7717
+
+# Datera API version. (string value)
+#datera_api_version=1
+
+# Number of replicas to create of an inode. (string value)
+#datera_num_replicas=3
+
+
 #
 # Options defined in cinder.volume.drivers.emc.emc_vmax_common
 #