# under the License.
"""Unit tests for Oracle's ZFSSA Cinder volume driver."""
+from datetime import date
import json
+import math
import mock
from oslo_utils import units
from cinder import test
from cinder.tests.unit import fake_utils
from cinder.volume import configuration as conf
+from cinder.volume import driver
+from cinder.volume.drivers import remotefs
from cinder.volume.drivers.zfssa import restclient as client
from cinder.volume.drivers.zfssa import webdavclient
from cinder.volume.drivers.zfssa import zfssaiscsi as iscsi
nfs_logbias = 'latency'
nfs_compression = 'off'
+zfssa_cache_dir = 'os-cinder-cache'
+
+no_virtsize_img = {
+ 'id': 'no_virtsize_img_id1234',
+ 'size': 654321,
+ 'updated_at': date(2015, 1, 1),
+}
+
+small_img = {
+ 'id': 'small_id1234',
+ 'size': 654321,
+ 'properties': {'virtual_size': 2361393152},
+ 'updated_at': date(2015, 1, 1),
+}
+
+large_img = {
+ 'id': 'large_id5678',
+ 'size': 50000000,
+ 'properties': {'virtual_size': 11806965760},
+ 'updated_at': date(2015, 2, 2),
+}
+
+fakespecs = {
+ 'prop1': 'prop1_val',
+ 'prop2': 'prop2_val',
+}
+
+small_img_props = {
+ 'size': 3,
+}
+
+img_props_nfs = {
+ 'image_id': small_img['id'],
+ 'updated_at': small_img['updated_at'].isoformat(),
+ 'size': 3,
+ 'name': '%(dir)s/os-cache-vol-%(name)s' % ({'dir': zfssa_cache_dir,
+ 'name': small_img['id']})
+}
+
+fakecontext = 'fakecontext'
+img_service = 'fakeimgservice'
+img_location = 'fakeimglocation'
class FakeResponse(object):
test_vol = {
'name': 'cindervol',
- 'size': 1,
+ 'size': 3,
'id': 1,
'provider_location': 'fake_location 1 2',
'provider_auth': 'fake_auth user pass',
}
+ test_vol2 = {
+ 'name': 'cindervol2',
+ 'size': 5,
+ 'id': 2,
+ 'provider_location': 'fake_location 3 4',
+ 'provider_auth': 'fake_auth user pass',
+ }
+
test_snap = {
'name': 'cindersnap',
'volume_name': test_vol['name']
self.configuration.zfssa_target_interfaces = 'e1000g0'
self.configuration.zfssa_rest_timeout = 60
self.configuration.volume_backend_name = 'fake_zfssa'
+ self.configuration.zfssa_enable_local_cache = True
+ self.configuration.zfssa_cache_project = zfssa_cache_dir
self.configuration.safe_get = self.fake_safe_get
self.configuration.zfssa_replication_ip = '1.1.1.1'
project=lcfg.zfssa_project,
lun=self.test_vol['name'])
+ @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_check_origin')
+ def test_delete_cache_volume(self, _check_origin):
+ lcfg = self.configuration
+ lun2del = {
+ 'guid': '00000000000000000000000000000',
+ 'number': 0,
+ 'initiatorgroup': 'default',
+ 'size': 1,
+ 'nodestroy': False,
+ 'origin': {
+ 'project': lcfg.zfssa_cache_project,
+ 'snapshot': 'image-%s' % small_img['id'],
+ 'share': 'os-cache-vol-%s' % small_img['id'],
+ }
+ }
+ self.drv.zfssa.get_lun.return_value = lun2del
+ self.drv.delete_volume(self.test_vol)
+ self.drv._check_origin.assert_called_once_with(lun2del,
+ self.test_vol['name'])
+
+ def test_check_origin(self):
+ lcfg = self.configuration
+ lun2del = {
+ 'guid': '00000000000000000000000000000',
+ 'number': 0,
+ 'initiatorgroup': 'default',
+ 'size': 1,
+ 'nodestroy': False,
+ 'origin': {
+ 'project': lcfg.zfssa_cache_project,
+ 'snapshot': 'image-%s' % small_img['id'],
+ 'share': 'os-cache-vol-%s' % small_img['id'],
+ }
+ }
+ cache = lun2del['origin']
+ self.drv.zfssa.num_clones.return_value = 0
+ self.drv._check_origin(lun2del, 'volname')
+ self.drv.zfssa.delete_lun.assert_called_once_with(
+ lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache['share'])
+
def test_create_delete_snapshot(self):
- self.drv.zfssa.has_clones.return_value = False
+ self.drv.zfssa.num_clones.return_value = 0
lcfg = self.configuration
self.drv.create_snapshot(self.test_snap)
self.drv.zfssa.create_snapshot.assert_called_once_with(
lcfg.zfssa_project,
self.test_snap['volume_name'],
self.test_snap['name'],
+ lcfg.zfssa_project,
self.test_vol_snap['name'])
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_provider_info')
val = None
return val
+ @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_cache_volume')
+ def test_clone_image_negative(self, _verify_cache_volume):
+ # Disabling local cache feature:
+ self.configuration.zfssa_enable_local_cache = False
+
+ self.assertEqual((None, False),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ small_img,
+ img_service))
+
+ self.configuration.zfssa_enable_local_cache = True
+ # Creating a volume smaller than image:
+ self.assertEqual((None, False),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ large_img,
+ img_service))
+
+ # The image does not have virtual_size property:
+ self.assertEqual((None, False),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ no_virtsize_img,
+ img_service))
+
+ # Exception raised in _verify_cache_image
+ self.drv._verify_cache_volume.side_effect = (
+ exception.VolumeBackendAPIException('fakeerror'))
+ self.assertEqual((None, False),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ small_img,
+ img_service))
+
+ @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_voltype_specs')
+ @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_cache_volume')
+ @mock.patch.object(iscsi.ZFSSAISCSIDriver, 'extend_volume')
+ def test_clone_image(self, _extend_vol, _verify_cache, _get_specs):
+ lcfg = self.configuration
+ cache_vol = 'os-cache-vol-%s' % small_img['id']
+ cache_snap = 'image-%s' % small_img['id']
+ self.drv._get_voltype_specs.return_value = fakespecs.copy()
+ self.drv._verify_cache_volume.return_value = cache_vol, cache_snap
+ model, cloned = self.drv.clone_image(fakecontext, self.test_vol2,
+ img_location,
+ small_img,
+ img_service)
+ self.drv._verify_cache_volume.assert_called_once_with(fakecontext,
+ small_img,
+ img_service,
+ fakespecs,
+ small_img_props)
+ self.drv.zfssa.clone_snapshot.assert_called_once_with(
+ lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache_vol,
+ cache_snap,
+ lcfg.zfssa_project,
+ self.test_vol2['name'])
+
+ self.drv.extend_volume.assert_called_once_with(self.test_vol2,
+ self.test_vol2['size'])
+
+ @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_create_cache_volume')
+ def test_verify_cache_vol_no_cache_vol(self, _create_cache_vol):
+ vol_name = 'os-cache-vol-%s' % small_img['id']
+ self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound(
+ volume_id=vol_name)
+ self.drv._verify_cache_volume(fakecontext, small_img,
+ img_service, fakespecs, small_img_props)
+ self.drv._create_cache_volume.assert_called_once_with(fakecontext,
+ small_img,
+ img_service,
+ fakespecs,
+ small_img_props)
+
+ def test_verify_cache_vol_no_cache_snap(self):
+ snap_name = 'image-%s' % small_img['id']
+ self.drv.zfssa.get_lun_snapshot.side_effect = (
+ exception.SnapshotNotFound(snapshot_id=snap_name))
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.drv._verify_cache_volume,
+ fakecontext,
+ small_img,
+ img_service,
+ fakespecs,
+ small_img_props)
+
+ def test_verify_cache_vol_stale_vol(self):
+ self.drv.zfssa.get_lun_snapshot.return_value = {'numclones': 5}
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.drv._verify_cache_volume,
+ fakecontext,
+ small_img,
+ img_service,
+ fakespecs,
+ small_img_props)
+
+ @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_create_cache_volume')
+ def test_verify_cache_vol_updated_vol(self, _create_cache_vol):
+ lcfg = self.configuration
+ updated_vol = {
+ 'updated_at': date(3000, 12, 12),
+ 'image_id': 'updated_id',
+ }
+ cachevol_name = 'os-cache-vol-%s' % small_img['id']
+ self.drv.zfssa.get_lun.return_value = updated_vol
+ self.drv.zfssa.get_lun_snapshot.return_value = {'numclones': 0}
+ self.drv._verify_cache_volume(fakecontext, small_img,
+ img_service, fakespecs, small_img_props)
+ self.drv.zfssa.delete_lun.assert_called_once_with(
+ lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cachevol_name)
+ self.drv._create_cache_volume.assert_called_once_with(fakecontext,
+ small_img,
+ img_service,
+ fakespecs,
+ small_img_props)
+
+ @mock.patch.object(driver.BaseVD, 'copy_image_to_volume')
+ def test_create_cache_volume(self, _copy_image):
+ lcfg = self.configuration
+ virtual_size = int(small_img['properties'].get('virtual_size'))
+ volsize = math.ceil(float(virtual_size) / units.Gi)
+ lunsize = "%sg" % six.text_type(int(volsize))
+ volname = 'os-cache-vol-%s' % small_img['id']
+ snapname = 'image-%s' % small_img['id']
+ cachevol_props = {
+ 'cache_name': volname,
+ 'snap_name': snapname,
+ }
+ cachevol_props.update(small_img_props)
+ cache_vol = {
+ 'name': volname,
+ 'id': small_img['id'],
+ 'size': volsize,
+ }
+ lun_props = {
+ 'custom:image_id': small_img['id'],
+ 'custom:updated_at': (
+ six.text_type(small_img['updated_at'].isoformat())),
+ }
+ lun_props.update(fakespecs)
+
+ self.drv._create_cache_volume(fakecontext,
+ small_img,
+ img_service,
+ fakespecs,
+ cachevol_props)
+
+ self.drv.zfssa.create_lun.assert_called_once_with(
+ lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache_vol['name'],
+ lunsize,
+ lcfg.zfssa_target_group,
+ lun_props)
+ _copy_image.assert_called_once_with(fakecontext,
+ cache_vol,
+ img_service,
+ small_img['id'])
+ self.drv.zfssa.create_snapshot.assert_called_once_with(
+ lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache_vol['name'],
+ snapname)
+
+ def test_create_cache_vol_negative(self):
+ lcfg = self.configuration
+ volname = 'os-cache-vol-%s' % small_img['id']
+ snapname = 'image-%s' % small_img['id']
+ cachevol_props = {
+ 'cache_name': volname,
+ 'snap_name': snapname,
+ }
+ cachevol_props.update(small_img)
+
+ self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound(
+ volume_id=volname)
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.drv._create_cache_volume,
+ fakecontext,
+ small_img,
+ img_service,
+ fakespecs,
+ cachevol_props)
+ self.drv.zfssa.delete_lun.assert_called_once_with(
+ lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ volname)
+
class TestZFSSANFSDriver(test.TestCase):
test_vol = {
'name': 'test-vol',
- 'size': 1,
- 'id': '1'
+ 'id': '1',
+ 'size': 3,
+ 'provider_location': 'fakelocation',
}
test_snap = {
self.configuration.zfssa_rest_timeout = '30'
self.configuration.nfs_oversub_ratio = 1
self.configuration.nfs_used_ratio = 1
+ self.configuration.zfssa_enable_local_cache = True
+ self.configuration.zfssa_cache_directory = zfssa_cache_dir
def test_migrate_volume(self):
self.drv.zfssa.get_asn.return_value = (
def tearDown(self):
super(TestZFSSANFSDriver, self).tearDown()
+ @mock.patch.object(remotefs.RemoteFSDriver, 'delete_volume')
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_check_origin')
+ def test_delete_volume(self, _check_origin, _delete_vol):
+ self.drv.zfssa.get_volume.side_effect = self._get_volume_side_effect
+ self.drv.delete_volume(self.test_vol)
+ _delete_vol.assert_called_once_with(self.test_vol)
+ self.drv._check_origin.assert_called_once_with(img_props_nfs['name'])
+
+ def _get_volume_side_effect(self, *args, **kwargs):
+ lcfg = self.configuration
+ volname = six.text_type(args[0])
+ if volname.startswith(lcfg.zfssa_cache_directory):
+ return {'numclones': 0}
+ else:
+ return {'origin': img_props_nfs['name']}
+
+ def test_check_origin(self):
+ self.drv.zfssa.get_volume.side_effect = self._get_volume_side_effect
+ self.drv._check_origin(img_props_nfs['name'])
+ self.drv.zfssa.delete_file.assert_called_once_with(
+ img_props_nfs['name'])
+
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_cache_volume')
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'create_cloned_volume')
+ def test_clone_image_negative(self, _create_clone, _verify_cache_volume):
+ # Disabling local cache feature:
+ self.configuration.zfssa_enable_local_cache = False
+ self.assertEqual((None, False),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ small_img,
+ img_service))
+
+ self.configuration.zfssa_enable_local_cache = True
+
+ # Creating a volume smaller than image:
+ self.assertEqual((None, False),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ large_img,
+ img_service))
+
+ # The image does not have virtual_size property:
+ self.assertEqual((None, False),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ no_virtsize_img,
+ img_service))
+
+ # Exception raised in _verify_cache_image
+ self.drv._verify_cache_volume.side_effect = (
+ exception.VolumeBackendAPIException('fakeerror'))
+ self.assertEqual((None, False),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ small_img,
+ img_service))
+
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'create_cloned_volume')
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_cache_volume')
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'extend_volume')
+ def test_clone_image(self, _extend_vol, _verify_cache, _create_clone):
+ self.drv._verify_cache_volume.return_value = img_props_nfs['name']
+ prov_loc = {'provider_location': self.test_vol['provider_location']}
+ self.drv.create_cloned_volume.return_value = prov_loc
+ self.assertEqual((prov_loc, True),
+ self.drv.clone_image(fakecontext, self.test_vol,
+ img_location,
+ small_img,
+ img_service))
+ self.drv._verify_cache_volume.assert_called_once_with(fakecontext,
+ small_img,
+ img_service,
+ img_props_nfs)
+ cache_vol = {
+ 'name': img_props_nfs['name'],
+ 'size': 3,
+ 'id': small_img['id'],
+ }
+ self.drv.create_cloned_volume.assert_called_once_with(self.test_vol,
+ cache_vol)
+
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_create_cache_volume')
+ def test_verify_cache_vol_no_cache_vol(self, _create_cache_vol):
+ self.drv.zfssa.get_volume.side_effect = exception.VolumeNotFound(
+ volume_id=img_props_nfs['name'])
+ self.drv._verify_cache_volume(fakecontext, small_img,
+ img_service, img_props_nfs)
+ self.drv._create_cache_volume.assert_called_once_with(fakecontext,
+ small_img,
+ img_service,
+ img_props_nfs)
+
+ def test_verify_cache_vol_stale_vol(self):
+ self.drv.zfssa.get_volume.return_value = {
+ 'numclones': 5,
+ 'updated_at': small_img['updated_at'].isoformat(),
+ 'image_id': 'wrong_id',
+ }
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.drv._verify_cache_volume,
+ fakecontext,
+ small_img,
+ img_service,
+ img_props_nfs)
+
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_create_cache_volume')
+ @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'delete_volume')
+ def test_verify_cache_vol_updated_vol(self, _del_vol, _create_cache_vol):
+ updated_vol = {
+ 'updated_at': date(3000, 12, 12),
+ 'image_id': 'updated_id',
+ 'numclones': 0,
+ }
+ self.drv.zfssa.get_volume.return_value = updated_vol
+ self.drv._verify_cache_volume(fakecontext, small_img,
+ img_service, img_props_nfs)
+ cache_vol = {
+ 'provider_location': mock.ANY,
+ 'name': img_props_nfs['name'],
+ }
+ self.drv.delete_volume.assert_called_once_with(cache_vol)
+ self.drv._create_cache_volume.assert_called_once_with(fakecontext,
+ small_img,
+ img_service,
+ img_props_nfs)
+
+ @mock.patch.object(remotefs.RemoteFSDriver, 'copy_image_to_volume')
+ @mock.patch.object(remotefs.RemoteFSDriver, 'create_volume')
+ def test_create_cache_volume(self, _create_vol, _copy_image):
+ virtual_size = int(small_img['properties'].get('virtual_size'))
+ volsize = math.ceil(float(virtual_size) / units.Gi)
+ cache_vol = {
+ 'name': img_props_nfs['name'],
+ 'size': volsize,
+ 'provider_location': mock.ANY,
+ }
+ self.drv._create_cache_volume(fakecontext,
+ small_img,
+ img_service,
+ img_props_nfs)
+
+ _create_vol.assert_called_once_with(cache_vol)
+ _copy_image.assert_called_once_with(fakecontext,
+ cache_vol,
+ img_service,
+ small_img['id'])
+
+ def test_create_cache_vol_negative(self):
+ self.drv.zfssa.get_lun.side_effect = (
+ exception.VolumeBackendAPIException)
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.drv._create_cache_volume,
+ fakecontext,
+ small_img,
+ img_service,
+ img_props_nfs)
+ self.drv.zfssa.delete_file.assert_called_once_with(
+ img_props_nfs['name'])
+
class TestZFSSAApi(test.TestCase):
self.project,
self.vol,
self.snap,
+ self.project,
self.clone)
expected_svc = '/api/storage/v1/pools/' + self.pool + '/projects/' + \
self.project + '/luns/' + self.vol + '/snapshots/' + self.snap + \
-# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
'Bad_Gateway': bad_gateway_err
}
+propertyupdate_data = """<?xml version="1.0"?>
+ <D:propertyupdate xmlns:D="DAV:">
+ <D:set>
+ <D:prop>
+ <D:prop_name>prop_val</D:prop_name>
+ </D:prop>
+ </D:set>
+ </D:propertyupdate>"""
+
class ZFSSAWebDAVClient(object):
def __init__(self, url, auth_str, **kwargs):
return msg
- def request(self, src_file="", dst_file="", method="", maxretries=10):
+ def build_data(self, data, propname, value):
+ res = data.replace('prop_name', propname)
+ res = res.replace('prop_val', value)
+ return res
+
+ def set_file_prop(self, filename, propname, propval):
+ data = self.build_data(propertyupdate_data, propname, propval)
+ return self.request(src_file=filename, data=data, method='PROPPATCH')
+
+ def request(self, src_file="", dst_file="", method="", maxretries=10,
+ data=""):
retry = 0
src_url = self.https_path + "/" + src_file
dst_url = self.https_path + "/" + dst_file
- request = urllib.request.Request(src_url)
+ request = urllib.request.Request(url=src_url, data=data)
if dst_file != "":
request.add_header('Destination', dst_url)
+ if method == "PROPPATCH":
+ request.add_header('Translate', 'F')
request.add_header("Authorization", "Basic %s" % self.auth_str)
"""
import ast
import base64
+import math
from oslo_config import cfg
from oslo_log import log
from oslo_utils import units
+import six
from cinder import exception
+from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume import driver
from cinder.volume.drivers.san import san
help='REST connection timeout. (seconds)'),
cfg.StrOpt('zfssa_replication_ip', default='',
help='IP address used for replication data. (maybe the same as '
- 'data ip)')
+ 'data ip)'),
+ cfg.BoolOpt('zfssa_enable_local_cache', default=True,
+ help='Flag to enable local caching: True, False.'),
+ cfg.StrOpt('zfssa_cache_project', default='os-cinder-cache',
+ help='Name of ZFSSA project where cache volumes are stored.')
]
"""ZFSSA Cinder iSCSI volume driver.
Version history:
- 1.0.1: Backend enabled volume migration.
+ 1.0.1:
+ Backend enabled volume migration.
+ Local cache feature.
"""
-
VERSION = '1.0.1'
protocol = 'iSCSI'
compression=lcfg.zfssa_lun_compression,
logbias=lcfg.zfssa_lun_logbias)
+ if lcfg.zfssa_enable_local_cache:
+ self.zfssa.create_project(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ compression=lcfg.zfssa_lun_compression,
+ logbias=lcfg.zfssa_lun_logbias)
+ schemas = [
+ {'property': 'image_id',
+ 'description': 'OpenStack image ID',
+ 'type': 'String'},
+ {'property': 'updated_at',
+ 'description': 'Most recent updated time of image',
+ 'type': 'String'}]
+ self.zfssa.create_schemas(schemas)
+
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
def _get_provider_info(self, volume, lun=None):
"""Return provider information."""
lcfg = self.configuration
+ project = lcfg.zfssa_project
+ if ((lcfg.zfssa_enable_local_cache is True) and
+ (volume['name'].startswith('os-cache-vol-'))):
+ project = lcfg.zfssa_cache_project
+
if lun is None:
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
- lcfg.zfssa_project,
+ project,
volume['name'])
if isinstance(lun['number'], list):
project=lcfg.zfssa_project,
lun=volume['name'])
+ if ('origin' in lun2del and
+ lun2del['origin']['project'] == lcfg.zfssa_cache_project):
+ self._check_origin(lun2del, volume['name'])
+
def create_snapshot(self, snapshot):
"""Creates a snapshot of a volume.
"""Deletes a snapshot."""
LOG.debug('zfssa.delete_snapshot: snapshot=%s', snapshot['name'])
lcfg = self.configuration
- has_clones = self.zfssa.has_clones(lcfg.zfssa_pool,
- lcfg.zfssa_project,
- snapshot['volume_name'],
- snapshot['name'])
- if has_clones:
+ numclones = self.zfssa.num_clones(lcfg.zfssa_pool,
+ lcfg.zfssa_project,
+ snapshot['volume_name'],
+ snapshot['name'])
+ if numclones > 0:
LOG.error(_LE('Snapshot %s: has clones'), snapshot['name'])
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'],
+ lcfg.zfssa_project,
volume['name'])
def _update_volume_status(self):
# Cleanup snapshot
self.delete_snapshot(zfssa_snapshot)
+ def clone_image(self, context, volume,
+ image_location, image_meta,
+ image_service):
+ """Create a volume efficiently from an existing image.
+
+ Verify the image ID being used:
+
+ (1) If there is no existing cache volume, create one and transfer
+ image data to it. Take a snapshot.
+
+ (2) If a cache volume already exists, verify if it is either alternated
+ or updated. If so try to remove it, raise exception if removal fails.
+ Create a new cache volume as in (1).
+
+ Clone a volume from the cache volume and returns it to Cinder.
+ """
+ LOG.debug('Cloning image %(image)s to volume %(volume)s',
+ {'image': image_meta['id'], 'volume': volume['name']})
+ lcfg = self.configuration
+ if not lcfg.zfssa_enable_local_cache:
+ return None, False
+
+ # virtual_size is the image's actual size when stored in a volume
+ # virtual_size is expected to be updated manually through glance
+ try:
+ virtual_size = int(image_meta['properties'].get('virtual_size'))
+ except Exception:
+ LOG.error(_LE('virtual_size property is not set for the image.'))
+ return None, False
+ cachevol_size = int(math.ceil(float(virtual_size) / units.Gi))
+ if cachevol_size > volume['size']:
+ exception_msg = (_LE('Image size %(img_size)dGB is larger '
+ 'than volume size %(vol_size)dGB.'),
+ {'img_size': cachevol_size,
+ 'vol_size': volume['size']})
+ LOG.error(exception_msg)
+ return None, False
+
+ specs = self._get_voltype_specs(volume)
+ cachevol_props = {'size': cachevol_size}
+
+ try:
+ cache_vol, cache_snap = self._verify_cache_volume(context,
+ image_meta,
+ image_service,
+ specs,
+ cachevol_props)
+ # A cache volume and a snapshot should be ready by now
+ # Create a clone from the cache volume
+ self.zfssa.clone_snapshot(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache_vol,
+ cache_snap,
+ lcfg.zfssa_project,
+ volume['name'])
+ if cachevol_size < volume['size']:
+ self.extend_volume(volume, volume['size'])
+ except exception.VolumeBackendAPIException as exc:
+ exception_msg = (_LE('Cannot clone image %(image)s to '
+ 'volume %(volume)s. Error: %(error)s.'),
+ {'volume': volume['name'],
+ 'image': image_meta['id'],
+ 'error': exc.message})
+ LOG.error(exception_msg)
+ return None, False
+
+ return None, True
+
+ @utils.synchronized('zfssaiscsi', external=True)
+ def _verify_cache_volume(self, context, img_meta,
+ img_service, specs, cachevol_props):
+ """Verify if we have a cache volume that we want.
+
+ If we don't, create one.
+ If we do, check if it's been updated:
+ * If so, delete it and recreate a new volume
+ * If not, we are good.
+
+ If it's out of date, delete it and create a new one.
+ After the function returns, there should be a cache volume available,
+ ready for cloning.
+
+ There needs to be a file lock here, otherwise subsequent clone_image
+ requests will fail if the first request is still pending.
+ """
+ lcfg = self.configuration
+ cachevol_name = 'os-cache-vol-%s' % img_meta['id']
+ cachesnap_name = 'image-%s' % img_meta['id']
+ cachevol_meta = {
+ 'cache_name': cachevol_name,
+ 'snap_name': cachesnap_name,
+ }
+ cachevol_props.update(cachevol_meta)
+ cache_vol, cache_snap = None, None
+ updated_at = six.text_type(img_meta['updated_at'].isoformat())
+ LOG.debug('Verifying cache volume %s:', cachevol_name)
+
+ try:
+ cache_vol = self.zfssa.get_lun(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cachevol_name)
+ cache_snap = self.zfssa.get_lun_snapshot(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cachevol_name,
+ cachesnap_name)
+ except exception.VolumeNotFound:
+ # There is no existing cache volume, create one:
+ return self._create_cache_volume(context,
+ img_meta,
+ img_service,
+ specs,
+ cachevol_props)
+ except exception.SnapshotNotFound:
+ exception_msg = (_('Cache volume %(cache_vol)s'
+ 'does not have snapshot %(cache_snap)s.'),
+ {'cache_vol': cachevol_name,
+ 'cache_snap': cachesnap_name})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ # A cache volume does exist, check if it's updated:
+ if ((cache_vol['updated_at'] != updated_at) or
+ (cache_vol['image_id'] != img_meta['id'])):
+ # The cache volume is updated, but has clones:
+ if cache_snap['numclones'] > 0:
+ exception_msg = (_('Cannot delete '
+ 'cache volume: %(cachevol_name)s. '
+ 'It was updated at %(updated_at)s '
+ 'and currently has %(numclones)s '
+ 'volume instances.'),
+ {'cachevol_name': cachevol_name,
+ 'updated_at': updated_at,
+ 'numclones': cache_snap['numclones']})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ # The cache volume is updated, but has no clone, so we delete it
+ # and re-create a new one:
+ self.zfssa.delete_lun(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cachevol_name)
+ return self._create_cache_volume(context,
+ img_meta,
+ img_service,
+ specs,
+ cachevol_props)
+
+ return cachevol_name, cachesnap_name
+
+ def _create_cache_volume(self, context, img_meta,
+ img_service, specs, cachevol_props):
+ """Create a cache volume from an image.
+
+ Returns names of the cache volume and its snapshot.
+ """
+ lcfg = self.configuration
+ cachevol_size = int(cachevol_props['size'])
+ lunsize = "%sg" % six.text_type(cachevol_size)
+ lun_props = {
+ 'custom:image_id': img_meta['id'],
+ 'custom:updated_at': (
+ six.text_type(img_meta['updated_at'].isoformat())),
+ }
+ lun_props.update(specs)
+
+ cache_vol = {
+ 'name': cachevol_props['cache_name'],
+ 'id': img_meta['id'],
+ 'size': cachevol_size,
+ }
+ LOG.debug('Creating cache volume %s.', cache_vol['name'])
+
+ try:
+ self.zfssa.create_lun(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache_vol['name'],
+ lunsize,
+ lcfg.zfssa_target_group,
+ lun_props)
+ super(ZFSSAISCSIDriver, self).copy_image_to_volume(context,
+ cache_vol,
+ img_service,
+ img_meta['id'])
+ self.zfssa.create_snapshot(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache_vol['name'],
+ cachevol_props['snap_name'])
+ except Exception as exc:
+ exc_msg = (_('Fail to create cache volume %(volume)s. '
+ 'Error: %(err)s'),
+ {'volume': cache_vol['name'],
+ 'err': six.text_type(exc)})
+ LOG.error(exc_msg)
+ self.zfssa.delete_lun(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache_vol['name'])
+ raise exception.VolumeBackendAPIException(data=exc_msg)
+
+ return cachevol_props['cache_name'], cachevol_props['snap_name']
+
def local_path(self, volume):
"""Not implemented."""
pass
lcfg = self.configuration
init_groups = self.zfssa.get_initiator_initiatorgroup(
connector['initiator'])
+ if ((lcfg.zfssa_enable_local_cache is True) and
+ (volume['name'].startswith('os-cache-vol-'))):
+ project = lcfg.zfssa_cache_project
+ else:
+ project = lcfg.zfssa_project
+
for initiator_group in init_groups:
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
- lcfg.zfssa_project,
+ project,
volume['name'],
initiator_group)
iscsi_properties = {}
"""Driver entry point to terminate a connection for a volume."""
LOG.debug('terminate_connection: volume name: %s.', volume['name'])
lcfg = self.configuration
+ project = lcfg.zfssa_project
+ if ((lcfg.zfssa_enable_local_cache is True) and
+ (volume['name'].startswith('os-cache-vol-'))):
+ project = lcfg.zfssa_cache_project
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
- lcfg.zfssa_project,
+ project,
volume['name'],
'')
current_name, name=original_name)
return {'_name_id': None}
+ @utils.synchronized('zfssaiscsi', external=True)
+ def _check_origin(self, lun, volname):
+ """Verify the cache volume of a bootable volume.
+
+ If the cache no longer has clone, it will be deleted.
+ There is a small lag between the time a clone is deleted and the number
+ of clones being updated accordingly. There is also a race condition
+ when multiple volumes (clones of a cache volume) are deleted at once,
+ leading to the number of clones reported incorrectly. The file lock is
+ here to avoid such issues.
+ """
+ lcfg = self.configuration
+ cache = lun['origin']
+ numclones = -1
+ if (cache['snapshot'].startswith('image-') and
+ cache['share'].startswith('os-cache-vol')):
+ try:
+ numclones = self.zfssa.num_clones(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache['share'],
+ cache['snapshot'])
+ except Exception:
+ LOG.debug('Cache volume is already deleted.')
+ return
+
+ LOG.debug('Checking cache volume %(name)s, numclones = %(clones)d',
+ {'name': cache['share'], 'clones': numclones})
+
+ # Sometimes numclones still hold old values even when all clones
+ # have been deleted. So we handle this situation separately here:
+ if numclones == 1:
+ try:
+ self.zfssa.get_lun(lcfg.zfssa_pool,
+ lcfg.zfssa_project,
+ volname)
+ # The volume does exist, so return
+ return
+ except exception.VolumeNotFound:
+ # The volume is already deleted
+ numclones = 0
+
+ if numclones == 0:
+ self.zfssa.delete_lun(lcfg.zfssa_pool,
+ lcfg.zfssa_cache_project,
+ cache['share'])
+
class MigrateVolumeInit(task.Task):
def execute(self, src_zfssa, volume, src_pool, src_project):
import base64
import datetime as dt
import errno
+import math
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import units
+import six
from cinder import exception
+from cinder import utils
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers import nfs
from cinder.volume.drivers.san import san
choices=['latency', 'throughput'],
help='Synchronous write bias-latency, throughput.'),
cfg.IntOpt('zfssa_rest_timeout',
- help='REST connection timeout. (seconds)')
+ help='REST connection timeout. (seconds)'),
+ cfg.BoolOpt('zfssa_enable_local_cache', default=True,
+ help='Flag to enable local caching: True, False.'),
+ cfg.StrOpt('zfssa_cache_directory', default='os-cinder-cache',
+ help='Name of directory inside zfssa_nfs_share where cache '
+ 'volumes are stored.')
]
LOG = log.getLogger(__name__)
"""ZFSSA Cinder NFS volume driver.
Version history:
- 1.0.1: Backend enabled volume migration.
+ 1.0.1:
+ Backend enabled volume migration.
+ Local cache feature.
"""
-
VERSION = '1.0.1'
volume_backend_name = 'ZFSSA_NFS'
protocol = driver_prefix = driver_volume_type = 'nfs'
self.zfssa.modify_service('http', args)
self.zfssa.enable_service('http')
+ if lcfg.zfssa_enable_local_cache:
+ LOG.debug('Creating local cache directory %s.',
+ lcfg.zfssa_cache_directory)
+ self.zfssa.create_directory(lcfg.zfssa_cache_directory)
+
def _ensure_shares_mounted(self):
try:
self._ensure_share_mounted(self.mount_path)
'snap_size': snapshot['volume_size']})
self._execute('rm', '-f', vol_path, run_as_root=True)
+ volume_origin = {'origin': snapshot['volume_name']}
+ self.zfssa.set_file_props(volume['name'], volume_origin)
+
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
return self.create_volume_from_snapshot(volume, snapshot,
method='MOVE')
+ def delete_volume(self, volume):
+ LOG.debug('Deleting volume %s.', volume['name'])
+ lcfg = self.configuration
+ try:
+ vol_props = self.zfssa.get_volume(volume['name'])
+ except exception.VolumeNotFound:
+ return
+ super(ZFSSANFSDriver, self).delete_volume(volume)
+
+ if vol_props['origin'].startswith(lcfg.zfssa_cache_directory):
+ LOG.info(_LI('Checking origin %(origin)s of volume %(volume)s.'),
+ {'origin': vol_props['origin'],
+ 'volume': volume['name']})
+ self._check_origin(vol_props['origin'])
+
+ def clone_image(self, context, volume,
+ image_location, image_meta,
+ image_service):
+ """Create a volume efficiently from an existing image.
+
+ Verify the image ID being used:
+
+ (1) If there is no existing cache volume, create one and transfer
+ image data to it. Take a snapshot.
+
+ (2) If a cache volume already exists, verify if it is either alternated
+ or updated. If so try to remove it, raise exception if removal fails.
+ Create a new cache volume as in (1).
+
+ Clone a volume from the cache volume and returns it to Cinder.
+ """
+ LOG.debug('Cloning image %(image)s to volume %(volume)s',
+ {'image': image_meta['id'], 'volume': volume['name']})
+ lcfg = self.configuration
+ if not lcfg.zfssa_enable_local_cache:
+ return None, False
+
+ # virtual_size is the image's actual size when stored in a volume
+ # virtual_size is expected to be updated manually through glance
+ try:
+ virtual_size = int(image_meta['properties'].get('virtual_size'))
+ except Exception:
+ LOG.error(_LE('virtual_size property is not set for the image.'))
+ return None, False
+ cachevol_size = int(math.ceil(float(virtual_size) / units.Gi))
+ if cachevol_size > volume['size']:
+ exception_msg = (_LE('Image size %(img_size)dGB is larger '
+ 'than volume size %(vol_size)dGB.'),
+ {'img_size': cachevol_size,
+ 'vol_size': volume['size']})
+ LOG.error(exception_msg)
+ return None, False
+
+ cache_dir = '%s/' % lcfg.zfssa_cache_directory
+ updated_at = six.text_type(image_meta['updated_at'].isoformat())
+ cachevol_props = {
+ 'name': '%sos-cache-vol-%s' % (cache_dir,
+ image_meta['id']),
+ 'size': cachevol_size,
+ 'updated_at': updated_at,
+ 'image_id': image_meta['id'],
+ }
+
+ try:
+ cachevol_name = self._verify_cache_volume(context,
+ image_meta,
+ image_service,
+ cachevol_props)
+ # A cache volume should be ready by now
+ # Create a clone from the cache volume
+ cache_vol = {
+ 'name': cachevol_name,
+ 'size': cachevol_size,
+ 'id': image_meta['id'],
+ }
+ clone_vol = self.create_cloned_volume(volume, cache_vol)
+ self._update_origin(volume['name'], cachevol_name)
+ except exception.VolumeBackendAPIException as exc:
+ exception_msg = (_LE('Cannot clone image %(image)s to '
+ 'volume %(volume)s. Error: %(error)s.'),
+ {'volume': volume['name'],
+ 'image': image_meta['id'],
+ 'error': exc.message})
+ LOG.error(exception_msg)
+ return None, False
+
+ return clone_vol, True
+
+ @utils.synchronized('zfssanfs', external=True)
+ def _verify_cache_volume(self, context, img_meta,
+ img_service, cachevol_props):
+ """Verify if we have a cache volume that we want.
+
+ If we don't, create one.
+ If we do, check if it's been updated:
+ * If so, delete it and recreate a new volume
+ * If not, we are good.
+
+ If it's out of date, delete it and create a new one.
+
+ After the function returns, there should be a cache volume available,
+ ready for cloning.
+ """
+ cachevol_name = cachevol_props['name']
+ cache_vol = None
+ LOG.debug('Verifying cache volume %s:', cachevol_name)
+
+ try:
+ cache_vol = self.zfssa.get_volume(cachevol_name)
+ except exception.VolumeNotFound:
+ # There is no existing cache volume, create one:
+ LOG.debug('Cache volume not found. Creating one...')
+ return self._create_cache_volume(context,
+ img_meta,
+ img_service,
+ cachevol_props)
+
+ # A cache volume does exist, check if it's updated:
+ if ((cache_vol['updated_at'] != cachevol_props['updated_at']) or
+ (cache_vol['image_id'] != cachevol_props['image_id'])):
+ if cache_vol['numclones'] > 0:
+ # The cache volume is updated, but has clones
+ exception_msg = (_('Cannot delete '
+ 'cache volume: %(cachevol_name)s. '
+ 'It was updated at %(updated_at)s '
+ 'and currently has %(numclones)d '
+ 'volume instances.'),
+ {'cachevol_name': cachevol_name,
+ 'updated_at': cachevol_props['updated_at'],
+ 'numclones': cache_vol['numclones']})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ # The cache volume is updated, but has no clone, so we delete it
+ # and re-create a new one:
+ cache_vol = {
+ 'provider_location': self.mount_path,
+ 'name': cachevol_name,
+ }
+ self.delete_volume(cache_vol)
+ return self._create_cache_volume(context,
+ img_meta,
+ img_service,
+ cachevol_props)
+
+ return cachevol_name
+
+ def _create_cache_volume(self, context, img_meta,
+ img_service, cachevol_props):
+ """Create a cache volume from an image.
+
+ Returns name of the cache volume.
+ """
+ cache_vol = {
+ 'provider_location': self.mount_path,
+ 'size': cachevol_props['size'],
+ 'name': cachevol_props['name'],
+ }
+ LOG.debug('Creating cache volume %s', cache_vol['name'])
+
+ try:
+ super(ZFSSANFSDriver, self).create_volume(cache_vol)
+ LOG.debug('Copying image data:')
+ super(ZFSSANFSDriver, self).copy_image_to_volume(context,
+ cache_vol,
+ img_service,
+ img_meta['id'])
+
+ except Exception as exc:
+ exc_msg = (_('Fail to create cache volume %(volume)s. '
+ 'Error: %(err)s'),
+ {'volume': cache_vol['name'],
+ 'err': six.text_type(exc)})
+ LOG.error(exc_msg)
+ self.zfssa.delete_file(cache_vol['name'])
+ raise exception.VolumeBackendAPIException(data=exc_msg)
+
+ cachevol_meta = {
+ 'updated_at': cachevol_props['updated_at'],
+ 'image_id': cachevol_props['image_id'],
+ }
+ cachevol_meta.update({'numclones': '0'})
+ self.zfssa.set_file_props(cache_vol['name'], cachevol_meta)
+ return cache_vol['name']
+
def _create_snapshot_name(self):
"""Creates a snapshot name from the date and time."""
used = share_details['space_total']
return free, used
+ @utils.synchronized('zfssanfs', external=True)
+ def _check_origin(self, origin):
+ """Verify the cache volume of a bootable volume.
+
+ If the cache no longer has clone, it will be deleted.
+ """
+ cachevol_props = self.zfssa.get_volume(origin)
+ numclones = cachevol_props['numclones']
+ LOG.debug('Number of clones: %d', numclones)
+ if numclones <= 1:
+ # This cache vol does not have any other clone
+ self.zfssa.delete_file(origin)
+ else:
+ cachevol_props = {'numclones': six.text_type(numclones - 1)}
+ self.zfssa.set_file_props(origin, cachevol_props)
+
+ @utils.synchronized('zfssanfs', external=True)
+ def _update_origin(self, vol_name, cachevol_name):
+ """Update WebDAV property of a volume.
+
+ WebDAV properties are used to keep track of:
+ (1) The number of clones of a cache volume.
+ (2) The cache volume name (origin) of a bootable volume.
+
+ To avoid race conditions when multiple volumes are created and needed
+ to be updated, a file lock is used to ensure that the properties are
+ updated properly.
+ """
+ volume_origin = {'origin': cachevol_name}
+ self.zfssa.set_file_props(vol_name, volume_origin)
+
+ cache_props = self.zfssa.get_volume(cachevol_name)
+ cache_props.update({'numclones':
+ six.text_type(cache_props['numclones'] + 1)})
+ self.zfssa.set_file_props(cachevol_name, cache_props)
+
def _update_volume_stats(self):
"""Get volume stats from zfssa"""
self._ensure_shares_mounted()
from oslo_service import loopingcall
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.zfssa import restclient
from cinder.volume.drivers.zfssa import webdavclient
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
- raise exception.VolumeBackendAPIException(data=exception_msg)
+ raise exception.VolumeNotFound(volume_id=lun)
val = json.loads(ret.data)
ret = {
}
if 'origin' in val['lun']:
ret.update({'origin': val['lun']['origin']})
+ if 'custom:image_id' in val['lun']:
+ ret.update({'image_id': val['lun']['custom:image_id']})
+ ret.update({'updated_at': val['lun']['custom:updated_at']})
+
+ return ret
+
+ def get_lun_snapshot(self, pool, project, lun, snapshot):
+ """Return iscsi lun snapshot properties."""
+ svc = ('/api/storage/v1/pools/' + pool + '/projects/' +
+ project + '/luns/' + lun + '/snapshots/' + snapshot)
+
+ ret = self.rclient.get(svc)
+ if ret.status != restclient.Status.OK:
+ exception_msg = (_LE('Error Getting '
+ 'Snapshot: %(snapshot)s of '
+ 'Volume: %(lun)s in '
+ 'Pool: %(pool)s, '
+ 'Project: %(project)s '
+ 'Return code: %(ret.status)d, '
+ 'Message: %(ret.data)s.'),
+ {'snapshot': snapshot,
+ 'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.SnapshotNotFound(snapshot_id=snapshot)
+ val = json.loads(ret.data)['snapshot']
+ ret = {
+ 'name': val['name'],
+ 'numclones': val['numclones'],
+ }
return ret
def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup):
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
- LOG.error(_LE('Error Deleting Volume: %(lun)s to Pool: %(pool)s '
- 'Project: %(project)s Return code: %(ret.status)d '
- 'Message: %(ret.data)s.'),
- {'lun': lun,
- 'pool': pool,
- 'project': project,
- 'ret.status': ret.status,
- 'ret.data': ret.data})
+ exception_msg = (_('Error Deleting Volume: %(lun)s from '
+ 'Pool: %(pool)s, Project: %(project)s. '
+ 'Return code: %(ret.status)d, '
+ 'Message: %(ret.data)s.'),
+ {'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ if ret.status == restclient.Status.FORBIDDEN:
+ # This means that the lun exists but it can't be deleted:
+ raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot(self, pool, project, lun, snapshot):
"""create snapshot."""
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
- 'Message: %(ret.data)s.')
- % {'snapshot': snapshot,
- 'lun': lun,
- 'pool': pool,
- 'project': project,
- 'ret.status': ret.status,
- 'ret.data': ret.data})
+ 'Message: %(ret.data)s.'),
+ {'snapshot': snapshot,
+ 'lun': lun,
+ 'pool': pool,
+ 'project': project,
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
- def clone_snapshot(self, pool, project, lun, snapshot, clone):
- """clone snapshot."""
+ def clone_snapshot(self, pool, project, lun, snapshot, clone_proj, clone):
+ """clone 'snapshot' to a lun named 'clone' in project 'clone_proj'."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone'
arg = {
- 'project': project,
+ 'project': clone_proj,
'share': clone,
'nodestroy': True
}
'Volume: %(lun)s of '
'Pool: %(pool)s '
'Project: %(project)s '
+ 'Clone project: %(clone_proj)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
+ 'clone_proj': clone_proj,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
- def has_clones(self, pool, project, lun, snapshot):
+ def num_clones(self, pool, project, lun, snapshot):
"""Checks whether snapshot has clones or not."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
- return val['snapshot']['numclones'] != 0
+ return val['snapshot']['numclones']
def get_initiator_initiatorgroup(self, initiator):
"""Returns the initiator group of the initiator."""
groups.append('default')
return groups
+ def create_schema(self, schema):
+ """Create a custom ZFSSA schema."""
+ base = '/api/storage/v1/schema'
+
+ svc = "%(base)s/%(prop)s" % {'base': base, 'prop': schema['property']}
+ ret = self.rclient.get(svc)
+ if ret.status == restclient.Status.OK:
+ LOG.warning(_LW('Property %s already exists.'), schema['property'])
+ return
+
+ ret = self.rclient.post(base, schema)
+ if ret.status != restclient.Status.CREATED:
+ exception_msg = (_('Error Creating '
+ 'Property: %(property)s '
+ 'Type: %(type)s '
+ 'Description: %(description)s '
+ 'Return code: %(ret.status)d '
+ 'Message: %(ret.data)s.')
+ % {'property': schema['property'],
+ 'type': schema['type'],
+ 'description': schema['description'],
+ 'ret.status': ret.status,
+ 'ret.data': ret.data})
+ LOG.error(exception_msg)
+ raise exception.VolumeBackendAPIException(data=exception_msg)
+
+ def create_schemas(self, schemas):
+ """Create multiple custom ZFSSA schemas."""
+ ret = []
+ for schema in schemas:
+ res = self.create_schema(schema)
+ ret.append(res)
+ return ret
+
class ZFSSANfsApi(ZFSSAApi):
"""ZFSSA API proxy class for NFS driver"""
val = json.loads(ret.data)
return val['filesystem']
+
+ def get_volume(self, volume):
+ LOG.debug('Getting volume %s.', volume)
+ try:
+ resp = self.webdavclient.request(src_file=volume,
+ method='PROPFIND')
+ except Exception:
+ raise exception.VolumeNotFound(volume_id=volume)
+
+ resp = resp.read()
+ numclones = self._parse_prop(resp, 'numclones')
+ result = {
+ 'numclones': int(numclones) if numclones != '' else 0,
+ 'updated_at': self._parse_prop(resp, 'updated_at'),
+ 'image_id': self._parse_prop(resp, 'image_id'),
+ 'origin': self._parse_prop(resp, 'origin'),
+ }
+ return result
+
+ def delete_file(self, filename):
+ try:
+ self.webdavclient.request(src_file=filename, method='DELETE')
+ except Exception:
+ exception_msg = (_LE('Cannot delete file %s.'), filename)
+ LOG.error(exception_msg)
+
+ def set_file_props(self, file, specs):
+ """Set custom properties to a file."""
+ for key in specs:
+ self.webdavclient.set_file_prop(file, key, specs[key])
+
+ def _parse_prop(self, response, prop):
+ """Parse a property value from the WebDAV response."""
+ propval = ""
+ for line in response.split("\n"):
+ if prop in line:
+ try:
+ propval = line[(line.index('>') + 1):line.index('</')]
+ except Exception:
+ pass
+ return propval
+
+ def create_directory(self, dirname):
+ try:
+ self.webdavclient.request(src_file=dirname, method='GET')
+ LOG.debug('Directory %s already exists.', dirname)
+ except Exception:
+ # The directory does not exist yet
+ try:
+ self.webdavclient.request(src_file=dirname, method='MKCOL')
+ except Exception:
+ exception_msg = (_('Cannot create directory %s.'), dirname)
+ raise exception.VolumeBackendAPIException(data=exception_msg)