]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Re-integrate Oracle iSCSI Cinder driver
authorDiem Tran <diem.tran@oracle.com>
Tue, 28 Apr 2015 18:33:14 +0000 (14:33 -0400)
committerDiem Tran <diem.tran@oracle.com>
Tue, 9 Jun 2015 17:50:37 +0000 (13:50 -0400)
This reverts commit c742566.

The Oracle iSCSI Cinder driver now has a working CI and is ready to be re-integrated.

Change-Id: I0522dc054a46d62f912df1694d54c0083c5f4f7e

cinder/tests/unit/test_zfssa.py
cinder/volume/drivers/zfssa/zfssaiscsi.py [new file with mode: 0644]
cinder/volume/drivers/zfssa/zfssarest.py

index d7e71997b45f8a75fd7bbc8a96cc967f3d01ad1b..a25f91a5e7aa949826015959589845f696c965cc 100644 (file)
@@ -21,7 +21,12 @@ import mock
 from oslo_log import log as logging
 from oslo_utils import units
 
+from cinder import test
+from cinder.tests.unit import fake_utils
+from cinder.volume import configuration as conf
 from cinder.volume.drivers.zfssa import restclient as client
+from cinder.volume.drivers.zfssa import zfssaiscsi as iscsi
+from cinder.volume.drivers.zfssa import zfssanfs
 from cinder.volume.drivers.zfssa import zfssarest as rest
 
 
@@ -119,14 +124,17 @@ class FakeZFSSA(object):
         if not self.host and not self.user:
             return out
 
-        out = {"status": "online",
-               "lunguid": "600144F0F8FBD5BD000053CE53AB0001",
-               "initiatorgroup": ["fake_initgrp"],
-               "volsize": volsize,
-               "pool": pool,
-               "name": lun,
-               "project": project,
-               "targetgroup": targetgroup}
+        out = {
+            "status": "online",
+            "lunguid": "600144F0F8FBD5BD000053CE53AB0001",
+            "initiatorgroup": ["fake_initgrp"],
+            "volsize": volsize,
+            "pool": pool,
+            "name": lun,
+            "project": project,
+            "targetgroup": targetgroup,
+            "lun": {"assignednumber": 0},
+        }
         if specs:
             out.update(specs)
 
@@ -292,7 +300,141 @@ class FakeNFSZFSSA(FakeZFSSA):
         return out
 
 
+class TestZFSSAISCSIDriver(test.TestCase):
+
+    test_vol = {
+        'name': 'cindervol',
+        'size': 1,
+        'id': 1,
+        'provider_location': 'fake_location 1 2',
+        'provider_auth': 'fake_auth user pass',
+    }
+
+    test_snap = {
+        'name': 'cindersnap',
+        'volume_name': test_vol['name']
+    }
+
+    test_vol_snap = {
+        'name': 'cindersnapvol',
+        'size': test_vol['size']
+    }
+
+    def __init__(self, method):
+        super(TestZFSSAISCSIDriver, self).__init__(method)
+
+    @mock.patch.object(iscsi, 'factory_zfssa')
+    def setUp(self, _factory_zfssa):
+        super(TestZFSSAISCSIDriver, self).setUp()
+        self._create_fake_config()
+        _factory_zfssa.return_value = FakeZFSSA()
+        iscsi.ZFSSAISCSIDriver._execute = fake_utils.fake_execute
+        self.drv = iscsi.ZFSSAISCSIDriver(configuration=self.configuration)
+        self.drv.do_setup({})
+
+    def _create_fake_config(self):
+        self.configuration = mock.Mock(spec=conf.Configuration)
+        self.configuration.san_ip = '1.1.1.1'
+        self.configuration.san_login = 'user'
+        self.configuration.san_password = 'passwd'
+        self.configuration.zfssa_pool = 'pool'
+        self.configuration.zfssa_project = 'project'
+        self.configuration.zfssa_lun_volblocksize = '8k'
+        self.configuration.zfssa_lun_sparse = 'false'
+        self.configuration.zfssa_lun_logbias = 'latency'
+        self.configuration.zfssa_lun_compression = 'off'
+        self.configuration.zfssa_initiator_group = 'test-init-grp1'
+        self.configuration.zfssa_initiator = \
+            'iqn.1-0.org.deb:01:d7, iqn.1-0.org.deb:01:d9'
+        self.configuration.zfssa_initiator_user = ''
+        self.configuration.zfssa_initiator_password = ''
+        self.configuration.zfssa_initiator_config = "{'test-init-grp1':[{'iqn':\
+            'iqn.1-0.org.deb:01:d7','user':'','password':''}],'test-init-grp\
+            2':[{'iqn':'iqn.1-0.org.deb:01:d9','user':'','password':''}]}"
+        self.configuration.zfssa_target_group = 'test-target-grp1'
+        self.configuration.zfssa_target_user = ''
+        self.configuration.zfssa_target_password = ''
+        self.configuration.zfssa_target_portal = '1.1.1.1:3260'
+        self.configuration.zfssa_target_interfaces = 'e1000g0'
+        self.configuration.zfssa_rest_timeout = 60
+        self.configuration.volume_backend_name = 'fake_zfssa'
+        self.configuration.safe_get = self.fake_safe_get
+
+    def test_create_delete_volume(self):
+        self.drv.create_volume(self.test_vol)
+        self.drv.delete_volume(self.test_vol)
+
+    def test_create_delete_snapshot(self):
+        self.drv.create_volume(self.test_vol)
+        self.drv.create_snapshot(self.test_snap)
+        self.drv.delete_snapshot(self.test_snap)
+        self.drv.delete_volume(self.test_vol)
+
+    def test_create_volume_from_snapshot(self):
+        self.drv.create_volume(self.test_vol)
+        self.drv.create_snapshot(self.test_snap)
+        self.drv.create_volume_from_snapshot(self.test_vol_snap,
+                                             self.test_snap)
+        self.drv.delete_volume(self.test_vol)
+
+    def test_remove_export(self):
+        self.drv.create_volume(self.test_vol)
+        self.drv.terminate_connection(self.test_vol, '')
+        self.drv.delete_volume(self.test_vol)
+
+    def test_volume_attach_detach(self):
+        self.drv.create_volume(self.test_vol)
+
+        connector = dict(initiator='iqn.1-0.org.deb:01:d7')
+        props = self.drv.initialize_connection(self.test_vol, connector)
+        self.assertEqual('iscsi', props['driver_volume_type'])
+        self.assertEqual(self.test_vol['id'], props['data']['volume_id'])
+
+        self.drv.terminate_connection(self.test_vol, '')
+        self.drv.delete_volume(self.test_vol)
+
+    def test_get_volume_stats(self):
+        self.drv.get_volume_stats(refresh=False)
+
+    def test_extend_volume(self):
+        self.drv.create_volume(self.test_vol)
+        self.drv.extend_volume(self.test_vol, 3)
+        self.drv.delete_volume(self.test_vol)
+
+    @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
+    def test_get_voltype_specs(self, get_volume_type_extra_specs):
+        volume_type_id = mock.sentinel.volume_type_id
+        volume = {'volume_type_id': volume_type_id}
+        get_volume_type_extra_specs.return_value = {
+            'zfssa:volblocksize': '128k',
+            'zfssa:compression': 'gzip'
+        }
+        ret = self.drv._get_voltype_specs(volume)
+        self.assertEqual(ret.get('volblocksize'), '128k')
+        self.assertEqual(ret.get('sparse'),
+                         self.configuration.zfssa_lun_sparse)
+        self.assertEqual(ret.get('compression'), 'gzip')
+        self.assertEqual(ret.get('logbias'),
+                         self.configuration.zfssa_lun_logbias)
+
+    def tearDown(self):
+        super(TestZFSSAISCSIDriver, self).tearDown()
+
+    def fake_safe_get(self, value):
+        try:
+            val = getattr(self.configuration, value)
+        except AttributeError:
+            val = None
+        return val
+
+
 class FakeAddIni2InitGrp(object):
+
+    def logout(self):
+        result = client.RestResult()
+        result.status = client.Status.ACCEPTED
+        return result
+
     def get(self, path, **kwargs):
         result = client.RestResult()
         result.status = client.Status.OK
@@ -310,3 +452,79 @@ class FakeAddIni2InitGrp(object):
         result = client.RestResult()
         result.status = client.Status.CREATED
         return result
+
+    def islogin(self):
+        return True
+
+
+class TestZFSSANFSDriver(test.TestCase):
+
+    test_vol = {
+        'name': 'test-vol',
+        'size': 1
+    }
+
+    test_snap = {
+        'name': 'cindersnap',
+        'volume_name': test_vol['name'],
+        'volume_size': test_vol['size']
+    }
+
+    test_vol_snap = {
+        'name': 'cindersnapvol',
+        'size': test_vol['size']
+    }
+
+    def __init__(self, method):
+        super(TestZFSSANFSDriver, self).__init__(method)
+
+    @mock.patch.object(zfssanfs, 'factory_zfssa')
+    def setUp(self, _factory_zfssa):
+        super(TestZFSSANFSDriver, self).setUp()
+        self._create_fake_config()
+        _factory_zfssa.return_value = FakeNFSZFSSA()
+        self.drv = zfssanfs.ZFSSANFSDriver(configuration=self.configuration)
+        self.drv._execute = fake_utils.fake_execute
+        self.drv.do_setup({})
+
+    def _create_fake_config(self):
+        self.configuration = mock.Mock(spec=conf.Configuration)
+        self.configuration.san_ip = '1.1.1.1'
+        self.configuration.san_login = 'user'
+        self.configuration.san_password = 'passwd'
+        self.configuration.zfssa_data_ip = '2.2.2.2'
+        self.configuration.zfssa_https_port = '443'
+        self.configuration.zfssa_nfs_pool = 'pool'
+        self.configuration.zfssa_nfs_project = 'nfs_project'
+        self.configuration.zfssa_nfs_share = 'nfs_share'
+        self.configuration.zfssa_nfs_share_logbias = nfs_logbias
+        self.configuration.zfssa_nfs_share_compression = nfs_compression
+        self.configuration.zfssa_nfs_mount_options = ''
+        self.configuration.zfssa_rest_timeout = '30'
+        self.configuration.nfs_oversub_ratio = 1
+        self.configuration.nfs_used_ratio = 1
+
+    def test_create_delete_snapshot(self):
+        self.drv.create_snapshot(self.test_snap)
+        self.drv.delete_snapshot(self.test_snap)
+
+    def test_create_volume_from_snapshot(self):
+        self.drv.create_snapshot(self.test_snap)
+        with mock.patch.object(self.drv, '_ensure_shares_mounted'):
+            prov_loc = self.drv.create_volume_from_snapshot(self.test_vol_snap,
+                                                            self.test_snap,
+                                                            method='COPY')
+        self.assertEqual('2.2.2.2:/export/nfs_share',
+                         prov_loc['provider_location'])
+
+    def test_get_volume_stats(self):
+        self.drv._mounted_shares = ['nfs_share']
+        with mock.patch.object(self.drv, '_ensure_shares_mounted'):
+            with mock.patch.object(self.drv, '_get_share_capacity_info') as \
+                    mock_get_share_capacity_info:
+                mock_get_share_capacity_info.return_value = (1073741824,
+                                                             9663676416)
+                self.drv.get_volume_stats(refresh=True)
+
+    def tearDown(self):
+        super(TestZFSSANFSDriver, self).tearDown()
diff --git a/cinder/volume/drivers/zfssa/zfssaiscsi.py b/cinder/volume/drivers/zfssa/zfssaiscsi.py
new file mode 100644 (file)
index 0000000..2b0b81a
--- /dev/null
@@ -0,0 +1,476 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+ZFS Storage Appliance Cinder Volume Driver
+"""
+import ast
+import base64
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import units
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder.volume import driver
+from cinder.volume.drivers.san import san
+from cinder.volume.drivers.zfssa import zfssarest
+from cinder.volume import volume_types
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+ZFSSA_OPTS = [
+    cfg.StrOpt('zfssa_pool',
+               help='Storage pool name.'),
+    cfg.StrOpt('zfssa_project',
+               help='Project name.'),
+    cfg.StrOpt('zfssa_lun_volblocksize', default='8k',
+               choices=['512', '1k', '2k', '4k', '8k', '16k', '32k', '64k',
+                        '128k'],
+               help='Block size.'),
+    cfg.BoolOpt('zfssa_lun_sparse', default=False,
+                help='Flag to enable sparse (thin-provisioned): True, False.'),
+    cfg.StrOpt('zfssa_lun_compression', default='off',
+               choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'],
+               help='Data compression.'),
+    cfg.StrOpt('zfssa_lun_logbias', default='latency',
+               choices=['latency', 'throughput'],
+               help='Synchronous write bias.'),
+    cfg.StrOpt('zfssa_initiator_group', default='',
+               help='iSCSI initiator group.'),
+    cfg.StrOpt('zfssa_initiator', default='',
+               help='iSCSI initiator IQNs. (comma separated)'),
+    cfg.StrOpt('zfssa_initiator_user', default='',
+               help='iSCSI initiator CHAP user (name).'),
+    cfg.StrOpt('zfssa_initiator_password', default='',
+               help='Secret of the iSCSI initiator CHAP user.', secret=True),
+    cfg.StrOpt('zfssa_initiator_config', default='',
+               help='iSCSI initiators configuration.'),
+    cfg.StrOpt('zfssa_target_group', default='tgt-grp',
+               help='iSCSI target group name.'),
+    cfg.StrOpt('zfssa_target_user', default='',
+               help='iSCSI target CHAP user (name).'),
+    cfg.StrOpt('zfssa_target_password', default='', secret=True,
+               help='Secret of the iSCSI target CHAP user.'),
+    cfg.StrOpt('zfssa_target_portal',
+               help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'),
+    cfg.StrOpt('zfssa_target_interfaces',
+               help='Network interfaces of iSCSI targets. (comma separated)'),
+    cfg.IntOpt('zfssa_rest_timeout',
+               help='REST connection timeout. (seconds)')
+
+]
+
+CONF.register_opts(ZFSSA_OPTS)
+
+ZFSSA_LUN_SPECS = {
+    'zfssa:volblocksize',
+    'zfssa:sparse',
+    'zfssa:compression',
+    'zfssa:logbias',
+}
+
+
+def factory_zfssa():
+    return zfssarest.ZFSSAApi()
+
+
+class ZFSSAISCSIDriver(driver.ISCSIDriver):
+    """ZFSSA Cinder iSCSI volume driver."""
+
+    VERSION = '1.0.0'
+    protocol = 'iSCSI'
+
+    def __init__(self, *args, **kwargs):
+        super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(ZFSSA_OPTS)
+        self.configuration.append_config_values(san.san_opts)
+        self.zfssa = None
+        self._stats = None
+        self.tgtiqn = None
+
+    def _get_target_alias(self):
+        """return target alias."""
+        return self.configuration.zfssa_target_group
+
+    def do_setup(self, context):
+        """Setup - create multiple elements.
+
+        Project, initiators, initiatorgroup, target and targetgroup.
+        """
+        lcfg = self.configuration
+        LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip)
+        self.zfssa = factory_zfssa()
+        self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout)
+        auth_str = base64.encodestring('%s:%s' %
+                                       (lcfg.san_login,
+                                        lcfg.san_password))[:-1]
+        self.zfssa.login(auth_str)
+        self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project,
+                                  compression=lcfg.zfssa_lun_compression,
+                                  logbias=lcfg.zfssa_lun_logbias)
+
+        if (lcfg.zfssa_initiator_config != ''):
+            initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
+            for initiator_group in initiator_config:
+                zfssa_initiator_group = initiator_group
+                for zfssa_initiator in initiator_config[zfssa_initiator_group]:
+                    self.zfssa.create_initiator(zfssa_initiator['iqn'],
+                                                zfssa_initiator_group + '-' +
+                                                zfssa_initiator['iqn'],
+                                                chapuser=
+                                                zfssa_initiator['user'],
+                                                chapsecret=
+                                                zfssa_initiator['password'])
+                    if (zfssa_initiator_group != 'default'):
+                        self.zfssa.add_to_initiatorgroup(
+                            zfssa_initiator['iqn'],
+                            zfssa_initiator_group)
+        else:
+            LOG.warning(_LW('zfssa_initiator_config not found. '
+                            'Using deprecated configuration options.'))
+            if (lcfg.zfssa_initiator != '' and
+                (lcfg.zfssa_initiator_group == '' or
+                 lcfg.zfssa_initiator_group == 'default')):
+                LOG.warning(_LW('zfssa_initiator: %(ini)s'
+                                ' wont be used on '
+                                'zfssa_initiator_group= %(inigrp)s.'),
+                            {'ini': lcfg.zfssa_initiator,
+                             'inigrp': lcfg.zfssa_initiator_group})
+
+            # Setup initiator and initiator group
+            if (lcfg.zfssa_initiator != '' and
+               lcfg.zfssa_initiator_group != '' and
+               lcfg.zfssa_initiator_group != 'default'):
+                for initiator in lcfg.zfssa_initiator.split(','):
+                    self.zfssa.create_initiator(
+                        initiator, lcfg.zfssa_initiator_group + '-' +
+                        initiator, chapuser=lcfg.zfssa_initiator_user,
+                        chapsecret=lcfg.zfssa_initiator_password)
+                    self.zfssa.add_to_initiatorgroup(
+                        initiator, lcfg.zfssa_initiator_group)
+
+        # Parse interfaces
+        interfaces = []
+        for interface in lcfg.zfssa_target_interfaces.split(','):
+            if interface == '':
+                continue
+            interfaces.append(interface)
+
+        # Setup target and target group
+        iqn = self.zfssa.create_target(
+            self._get_target_alias(),
+            interfaces,
+            tchapuser=lcfg.zfssa_target_user,
+            tchapsecret=lcfg.zfssa_target_password)
+
+        self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
+
+    def check_for_setup_error(self):
+        """Check that driver can login.
+
+        Check also pool, project, initiators, initiatorgroup, target and
+        targetgroup.
+        """
+        lcfg = self.configuration
+
+        self.zfssa.verify_pool(lcfg.zfssa_pool)
+        self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project)
+
+        if (lcfg.zfssa_initiator_config != ''):
+            initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
+            for initiator_group in initiator_config:
+                zfssa_initiator_group = initiator_group
+                for zfssa_initiator in initiator_config[zfssa_initiator_group]:
+                    self.zfssa.verify_initiator(zfssa_initiator['iqn'])
+        else:
+            if (lcfg.zfssa_initiator != '' and
+               lcfg.zfssa_initiator_group != '' and
+               lcfg.zfssa_initiator_group != 'default'):
+                for initiator in lcfg.zfssa_initiator.split(','):
+                    self.zfssa.verify_initiator(initiator)
+
+            self.zfssa.verify_target(self._get_target_alias())
+
+    def _get_provider_info(self, volume, lun=None):
+        """Return provider information."""
+        lcfg = self.configuration
+        if lun is None:
+            lun = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                     lcfg.zfssa_project,
+                                     volume['name'])
+
+        if isinstance(lun['number'], list):
+            lun['number'] = lun['number'][0]
+
+        if self.tgtiqn is None:
+            self.tgtiqn = self.zfssa.get_target(self._get_target_alias())
+
+        loc = "%s %s %s" % (lcfg.zfssa_target_portal, self.tgtiqn,
+                            lun['number'])
+        LOG.debug('_get_provider_info: provider_location: %s', loc)
+        provider = {'provider_location': loc}
+        if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '':
+            provider['provider_auth'] = ('CHAP %s %s' %
+                                         (lcfg.zfssa_target_user,
+                                          lcfg.zfssa_target_password))
+
+        return provider
+
+    def create_volume(self, volume):
+        """Create a volume on ZFSSA."""
+        LOG.debug('zfssa.create_volume: volume=' + volume['name'])
+        lcfg = self.configuration
+        volsize = str(volume['size']) + 'g'
+        specs = self._get_voltype_specs(volume)
+        self.zfssa.create_lun(lcfg.zfssa_pool,
+                              lcfg.zfssa_project,
+                              volume['name'],
+                              volsize,
+                              lcfg.zfssa_target_group,
+                              specs)
+
+    def delete_volume(self, volume):
+        """Deletes a volume with the given volume['name']."""
+        LOG.debug('zfssa.delete_volume: name=%s', volume['name'])
+        lcfg = self.configuration
+        lun2del = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                     lcfg.zfssa_project,
+                                     volume['name'])
+        # Delete clone temp snapshot. see create_cloned_volume()
+        if 'origin' in lun2del and 'id' in volume:
+            if lun2del['nodestroy']:
+                self.zfssa.set_lun_props(lcfg.zfssa_pool,
+                                         lcfg.zfssa_project,
+                                         volume['name'],
+                                         nodestroy=False)
+
+            tmpsnap = 'tmp-snapshot-%s' % volume['id']
+            if lun2del['origin']['snapshot'] == tmpsnap:
+                self.zfssa.delete_snapshot(lcfg.zfssa_pool,
+                                           lcfg.zfssa_project,
+                                           lun2del['origin']['share'],
+                                           lun2del['origin']['snapshot'])
+                return
+
+        self.zfssa.delete_lun(pool=lcfg.zfssa_pool,
+                              project=lcfg.zfssa_project,
+                              lun=volume['name'])
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot of a volume.
+
+        Snapshot name: snapshot['name']
+        Volume name: snapshot['volume_name']
+        """
+        LOG.debug('zfssa.create_snapshot: snapshot=%s', snapshot['name'])
+        lcfg = self.configuration
+        self.zfssa.create_snapshot(lcfg.zfssa_pool,
+                                   lcfg.zfssa_project,
+                                   snapshot['volume_name'],
+                                   snapshot['name'])
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+        LOG.debug('zfssa.delete_snapshot: snapshot=%s', snapshot['name'])
+        lcfg = self.configuration
+        has_clones = self.zfssa.has_clones(lcfg.zfssa_pool,
+                                           lcfg.zfssa_project,
+                                           snapshot['volume_name'],
+                                           snapshot['name'])
+        if has_clones:
+            LOG.error(_LE('Snapshot %s: has clones'), snapshot['name'])
+            raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
+
+        self.zfssa.delete_snapshot(lcfg.zfssa_pool,
+                                   lcfg.zfssa_project,
+                                   snapshot['volume_name'],
+                                   snapshot['name'])
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot - clone a snapshot."""
+        LOG.debug('zfssa.create_volume_from_snapshot: volume=%s',
+                  volume['name'])
+        LOG.debug('zfssa.create_volume_from_snapshot: snapshot=%s',
+                  snapshot['name'])
+        if not self._verify_clone_size(snapshot, volume['size'] * units.Gi):
+            exception_msg = (_('Error verifying clone size on '
+                               'Volume clone: %(clone)s '
+                               'Size: %(size)d on'
+                               'Snapshot: %(snapshot)s')
+                             % {'clone': volume['name'],
+                                'size': volume['size'],
+                                'snapshot': snapshot['name']})
+            LOG.error(exception_msg)
+            raise exception.InvalidInput(reason=exception_msg)
+
+        lcfg = self.configuration
+        self.zfssa.clone_snapshot(lcfg.zfssa_pool,
+                                  lcfg.zfssa_project,
+                                  snapshot['volume_name'],
+                                  snapshot['name'],
+                                  volume['name'])
+
+    def _update_volume_status(self):
+        """Retrieve status info from volume group."""
+        LOG.debug("Updating volume status")
+        self._stats = None
+        data = {}
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        data["volume_backend_name"] = backend_name or self.__class__.__name__
+        data["vendor_name"] = 'Oracle'
+        data["driver_version"] = self.VERSION
+        data["storage_protocol"] = self.protocol
+
+        lcfg = self.configuration
+        (avail, total) = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
+        if avail is None or total is None:
+            return
+
+        data['total_capacity_gb'] = int(total) / units.Gi
+        data['free_capacity_gb'] = int(avail) / units.Gi
+        data['reserved_percentage'] = 0
+        data['QoS_support'] = False
+        self._stats = data
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume status.
+
+        If 'refresh' is True, run update the stats first.
+        """
+        if refresh:
+            self._update_volume_status()
+        return self._stats
+
+    def create_export(self, context, volume):
+        pass
+
+    def remove_export(self, context, volume):
+        pass
+
+    def ensure_export(self, context, volume):
+        pass
+
+    def extend_volume(self, volume, new_size):
+        """Driver entry point to extent volume size."""
+        LOG.debug('extend_volume: volume name: %s', volume['name'])
+        lcfg = self.configuration
+        self.zfssa.set_lun_props(lcfg.zfssa_pool,
+                                 lcfg.zfssa_project,
+                                 volume['name'],
+                                 volsize=new_size * units.Gi)
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Create a clone of the specified volume."""
+        zfssa_snapshot = {'volume_name': src_vref['name'],
+                          'name': 'tmp-snapshot-%s' % volume['id']}
+        self.create_snapshot(zfssa_snapshot)
+        try:
+            self.create_volume_from_snapshot(volume, zfssa_snapshot)
+        except exception.VolumeBackendAPIException:
+            LOG.error(_LE('Clone Volume:'
+                          '%(volume)s failed from source volume:'
+                          '%(src_vref)s'),
+                      {'volume': volume['name'],
+                       'src_vref': src_vref['name']})
+            # Cleanup snapshot
+            self.delete_snapshot(zfssa_snapshot)
+
+    def local_path(self, volume):
+        """Not implemented."""
+        pass
+
+    def backup_volume(self, context, backup, backup_service):
+        """Not implemented."""
+        pass
+
+    def restore_backup(self, context, backup, volume, backup_service):
+        """Not implemented."""
+        pass
+
+    def _verify_clone_size(self, snapshot, size):
+        """Check whether the clone size is the same as the parent volume."""
+        lcfg = self.configuration
+        lun = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                 lcfg.zfssa_project,
+                                 snapshot['volume_name'])
+        return lun['size'] == size
+
+    def initialize_connection(self, volume, connector):
+        lcfg = self.configuration
+        init_groups = self.zfssa.get_initiator_initiatorgroup(
+            connector['initiator'])
+        for initiator_group in init_groups:
+            self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
+                                              lcfg.zfssa_project,
+                                              volume['name'],
+                                              initiator_group)
+        iscsi_properties = {}
+        provider = self._get_provider_info(volume)
+
+        (target_portal, iqn, lun) = provider['provider_location'].split()
+        iscsi_properties['target_discovered'] = False
+        iscsi_properties['target_portal'] = target_portal
+        iscsi_properties['target_iqn'] = iqn
+        iscsi_properties['target_lun'] = lun
+        iscsi_properties['volume_id'] = volume['id']
+
+        if 'provider_auth' in provider:
+            (auth_method, auth_username, auth_password) = provider[
+                'provider_auth'].split()
+            iscsi_properties['auth_method'] = auth_method
+            iscsi_properties['auth_username'] = auth_username
+            iscsi_properties['auth_password'] = auth_password
+
+        return {
+            'driver_volume_type': 'iscsi',
+            'data': iscsi_properties
+        }
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Driver entry point to terminate a connection for a volume."""
+        LOG.debug('terminate_connection: volume name: %s.', volume['name'])
+        lcfg = self.configuration
+        self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
+                                          lcfg.zfssa_project,
+                                          volume['name'],
+                                          '')
+
+    def _get_voltype_specs(self, volume):
+        """Get specs suitable for volume creation."""
+        vtype = volume.get('volume_type_id', None)
+        extra_specs = None
+        if vtype:
+            extra_specs = volume_types.get_volume_type_extra_specs(vtype)
+
+        return self._get_specs(extra_specs)
+
+    def _get_specs(self, xspecs):
+        """Return a dict with extra specs and/or config values."""
+        result = {}
+        for spc in ZFSSA_LUN_SPECS:
+            val = None
+            prop = spc.split(':')[1]
+            cfg = 'zfssa_lun_' + prop
+            if xspecs:
+                val = xspecs.pop(spc, None)
+
+            if val is None:
+                val = self.configuration.safe_get(cfg)
+
+            if val is not None and val != '':
+                result.update({prop: val})
+
+        return result
index f9ccf896a4f23d7e06f3143940f46bce0f3b0f5d..53ec0678c18cc2b5444578aa475d60efe1a30cd2 100644 (file)
@@ -89,12 +89,9 @@ class ZFSSAApi(object):
         val = json.loads(ret.data)
 
         if not self._is_pool_owned(val):
-            exception_msg = (_('Error Pool ownership: '
-                               'Pool %(pool)s is not owned '
-                               'by %(host)s.')
-                             % {'pool': pool,
-                                'host': self.host})
-            LOG.error(exception_msg)
+            LOG.error(_LE('Error Pool ownership: Pool %(pool)s is not owned '
+                          'by %(host)s.'),
+                      {'pool': pool, 'host': self.host})
             raise exception.InvalidInput(reason=pool)
 
         avail = val['pool']['usage']['available']
@@ -416,6 +413,9 @@ class ZFSSAApi(object):
             LOG.error(exception_msg)
             raise exception.VolumeBackendAPIException(data=exception_msg)
 
+        val = json.loads(ret.data)
+        return val
+
     def get_lun(self, pool, project, lun):
         """return iscsi lun properties."""
         svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
@@ -446,8 +446,6 @@ class ZFSSAApi(object):
         }
         if 'origin' in val['lun']:
             ret.update({'origin': val['lun']['origin']})
-        if isinstance(ret['number'], list):
-            ret['number'] = ret['number'][0]
 
         return ret
 
@@ -464,20 +462,16 @@ class ZFSSAApi(object):
 
         ret = self.rclient.put(svc, arg)
         if ret.status != restclient.Status.ACCEPTED:
-            exception_msg = (_('Error Setting '
-                               'Volume: %(lun)s to '
-                               'InitiatorGroup: %(initiatorgroup)s '
-                               'Pool: %(pool)s '
-                               'Project: %(project)s  '
-                               'Return code: %(ret.status)d '
-                               'Message: %(ret.data)s.')
-                             % {'lun': lun,
-                                'initiatorgroup': initiatorgroup,
-                                'pool': pool,
-                                'project': project,
-                                'ret.status': ret.status,
-                                'ret.data': ret.data})
-            LOG.error(exception_msg)
+            LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: '
+                          '%(initiatorgroup)s Pool: %(pool)s Project: '
+                          '%(project)s  Return code: %(ret.status)d Message: '
+                          '%(ret.data)s.'),
+                      {'lun': lun,
+                       'initiatorgroup': initiatorgroup,
+                       'pool': pool,
+                       'project': project,
+                       'ret.status': ret.status,
+                       'ret.data': ret.data})
 
     def delete_lun(self, pool, project, lun):
         """delete iscsi lun."""
@@ -486,18 +480,14 @@ class ZFSSAApi(object):
 
         ret = self.rclient.delete(svc)
         if ret.status != restclient.Status.NO_CONTENT:
-            exception_msg = (_('Error Deleting '
-                               'Volume: %(lun)s to '
-                               'Pool: %(pool)s '
-                               'Project: %(project)s  '
-                               'Return code: %(ret.status)d '
-                               'Message: %(ret.data)s.')
-                             % {'lun': lun,
-                                'pool': pool,
-                                'project': project,
-                                'ret.status': ret.status,
-                                'ret.data': ret.data})
-            LOG.error(exception_msg)
+            LOG.error(_LE('Error Deleting Volume: %(lun)s to Pool: %(pool)s '
+                          'Project: %(project)s  Return code: %(ret.status)d '
+                          'Message: %(ret.data)s.'),
+                      {'lun': lun,
+                       'pool': pool,
+                       'project': project,
+                       'ret.status': ret.status,
+                       'ret.data': ret.data})
 
     def create_snapshot(self, pool, project, lun, snapshot):
         """create snapshot."""
@@ -633,9 +623,9 @@ class ZFSSAApi(object):
         svc = "/api/san/v1/iscsi/initiator-groups"
         ret = self.rclient.get(svc)
         if ret.status != restclient.Status.OK:
-            LOG.error(_LE('Error getting initiator groups.'))
-            exception_msg = (_('Error getting initiator groups.'))
-            raise exception.VolumeBackendAPIException(data=exception_msg)
+            msg = _('Error getting initiator groups.')
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
         val = json.loads(ret.data)
         for initiator_group in val['groups']:
             if initiator in initiator_group['initiators']:
@@ -762,7 +752,8 @@ class ZFSSANfsApi(ZFSSAApi):
             LOG.error(exception_msg)
             raise exception.VolumeBackendAPIException(data=exception_msg)
         data = json.loads(ret.data)['service']
-        LOG.debug('%s service state: %s' % (service, data))
+        LOG.debug('%(service)s service state: %(data)s',
+                  {'service': service, 'data': data})
 
         status = 'online' if state == 'enable' else 'disabled'
 
@@ -833,9 +824,9 @@ class ZFSSANfsApi(ZFSSAApi):
             raise exception.VolumeBackendAPIException(data=exception_msg)
         data = json.loads(ret.data)['service']
         LOG.debug('Modify %(service)s service '
-                  'return data: %(data)s'
-                  {'service': service,
-                     'data': data})
+                  'return data: %(data)s',
+                  {'service': service,
+                   'data': data})
 
     def create_share(self, pool, project, share, args):
         """Create a share in the specified pool and project"""