return True
reserved = float(host_state.reserved_percentage) / 100
free = math.floor(free_space * (1 - reserved))
+
+ msg_args = {"host": host_state.host,
+ "requested": volume_size,
+ "available": free}
if free < volume_size:
LOG.warning(_("Insufficient free space for volume creation "
- "(requested / avail): "
- "%(requested)s/%(available)s")
- % {'requested': volume_size,
- 'available': free})
+ "on host %(host)s (requested / avail): "
+ "%(requested)s/%(available)s") % msg_args)
+ else:
+ LOG.debug("Sufficient free space for volume creation "
+ "on host %(host)s (requested / avail): "
+ "%(requested)s/%(available)s" % msg_args)
return free >= volume_size
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
- 'volume_type_id': None}
+ 'volume_type_id': None, 'host': 'hostname@backend#vol1'}
snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1',
'volume_size': 2, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
- 'volume_type_id': None}
+ 'volume_type_id': None, 'host': 'hostname@backend#vol1'}
vol1 = ssc_utils.NetAppVolume('lun1', 'openstack')
vol1.state['vserver_root'] = False
vol1.state['status'] = 'online'
if not properties:
raise AssertionError('Target portal is none')
- def test_fail_create_vol(self):
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.create_volume, self.vol_fail)
-
def test_vol_stats(self):
self.driver.get_volume_stats(refresh=True)
+ stats = self.driver._stats
+ self.assertEqual(stats['vendor_name'], 'NetApp')
+ self.assertTrue(stats['pools'][0]['pool_name'])
def test_create_vol_snapshot_diff_size_resize(self):
self.driver.create_volume(self.volume)
client = driver.client
client.set_api_version(1, 9)
self.driver = driver
+ self.driver.root_volume_name = 'root'
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_7mode'
self.driver.delete_volume(self.volume)
self.driver.volume_list = []
- def test_create_fail_on_select_vol(self):
- self.driver.volume_list = ['vol2', 'vol3']
- success = False
- try:
- self.driver.create_volume(self.volume)
- except exception.VolumeBackendAPIException:
- success = True
- pass
- finally:
- self.driver.volume_list = []
- if not success:
- raise AssertionError('Failed creating on selected volumes')
-
def test_check_for_setup_error_version(self):
drv = self.driver
delattr(drv.client, '_api_version')
client = driver.client
client.set_api_version(1, 9)
self.driver = driver
+ self.driver.root_volume_name = 'root'
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_7mode'
Tests for NetApp e-series iscsi volume driver.
"""
+import copy
import json
import re
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import common
+from cinder.volume.drivers.netapp.eseries import client
+from cinder.volume.drivers.netapp.eseries import iscsi
+from cinder.volume.drivers.netapp.eseries.iscsi import LOG as driver_log
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_eseries_opts
+import cinder.volume.drivers.netapp.utils as na_utils
LOG = logging.getLogger(__name__)
"""Test case for NetApp e-series iscsi driver."""
volume = {'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
- 'volume_name': 'lun1',
+ 'volume_name': 'lun1', 'host': 'hostname@backend#DDP',
'os_type': 'linux', 'provider_location': 'lun1',
'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
'provider_auth': 'provider a b', 'project_id': 'project',
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
+ fake_eseries_volume_label = na_utils.convert_uuid_to_es_fmt(volume['id'])
connector = {'initiator': 'iqn.1998-01.com.vmware:localhost-28a58148'}
+ fake_size_gb = volume['size']
+ fake_eseries_pool_label = 'DDP'
def setUp(self):
super(NetAppEseriesIscsiDriverTestCase, self).setUp()
self.volume_clone_large, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
+
+ @mock.patch.object(iscsi.Driver, '_get_volume',
+ mock.Mock(return_value={'volumeGroupRef': 'fake_ref'}))
+ def test_get_pool(self):
+ self.driver._objects['pools'] = [{'volumeGroupRef': 'fake_ref',
+ 'label': 'ddp1'}]
+ pool = self.driver.get_pool({'id': 'fake-uuid'})
+ self.assertEqual(pool, 'ddp1')
+
+ @mock.patch.object(iscsi.Driver, '_get_volume',
+ mock.Mock(return_value={'volumeGroupRef': 'fake_ref'}))
+ def test_get_pool_no_pools(self):
+ self.driver._objects['pools'] = []
+ pool = self.driver.get_pool({'id': 'fake-uuid'})
+ self.assertEqual(pool, None)
+
+ @mock.patch.object(iscsi.Driver, '_get_volume',
+ mock.Mock(return_value={'volumeGroupRef': 'fake_ref'}))
+ def test_get_pool_no_match(self):
+ self.driver._objects['pools'] = [{'volumeGroupRef': 'fake_ref2',
+ 'label': 'ddp2'}]
+ pool = self.driver.get_pool({'id': 'fake-uuid'})
+ self.assertEqual(pool, None)
+
+ @mock.patch.object(iscsi.Driver, '_create_volume', mock.Mock())
+ def test_create_volume(self):
+ self.driver.create_volume(self.volume)
+ self.driver._create_volume.assert_called_with(
+ 'DDP', self.fake_eseries_volume_label, self.volume['size'])
+
+ def test_create_volume_no_pool_provided_by_scheduler(self):
+ volume = copy.deepcopy(self.volume)
+ volume['host'] = "host@backend" # missing pool
+ self.assertRaises(exception.InvalidHost, self.driver.create_volume,
+ volume)
+
+ @mock.patch.object(client.RestClient, 'list_storage_pools')
+ def test_helper_create_volume_fail(self, fake_list_pools):
+ fake_pool = {}
+ fake_pool['label'] = self.fake_eseries_pool_label
+ fake_pool['volumeGroupRef'] = 'foo'
+ fake_pools = [fake_pool]
+ fake_list_pools.return_value = fake_pools
+ wrong_eseries_pool_label = 'hostname@backend'
+ self.assertRaises(exception.NetAppDriverException,
+ self.driver._create_volume, wrong_eseries_pool_label,
+ self.fake_eseries_volume_label, self.fake_size_gb)
+
+ @mock.patch.object(driver_log, 'info')
+ @mock.patch.object(client.RestClient, 'list_storage_pools')
+ @mock.patch.object(client.RestClient, 'create_volume',
+ mock.MagicMock(return_value='CorrectVolume'))
+ def test_helper_create_volume(self, storage_pools, log_info):
+ fake_pool = {}
+ fake_pool['label'] = self.fake_eseries_pool_label
+ fake_pool['volumeGroupRef'] = 'foo'
+ fake_pools = [fake_pool]
+ storage_pools.return_value = fake_pools
+ drv = self.driver
+ storage_vol = drv.driver._create_volume(self.fake_eseries_pool_label,
+ self.fake_eseries_volume_label,
+ self.fake_size_gb)
+ log_info.assert_called_once_with("Created volume with label %s.",
+ self.fake_eseries_volume_label)
+ self.assertEqual('CorrectVolume', storage_vol)
+
+ @mock.patch.object(client.RestClient, 'list_storage_pools')
+ @mock.patch.object(client.RestClient, 'create_volume',
+ mock.MagicMock(
+ side_effect=exception.NetAppDriverException))
+ @mock.patch.object(driver_log, 'info', mock.Mock())
+ def test_create_volume_check_exception(self, fake_list_pools):
+ fake_pool = {}
+ fake_pool['label'] = self.fake_eseries_pool_label
+ fake_pool['volumeGroupRef'] = 'foo'
+ fake_pools = [fake_pool]
+ fake_list_pools.return_value = fake_pools
+ self.assertRaises(exception.NetAppDriverException,
+ self.driver._create_volume,
+ self.fake_eseries_pool_label,
+ self.fake_eseries_volume_label, self.fake_size_gb)
class FakeVolume(object):
- def __init__(self, size=0):
+ def __init__(self, host='', size=0):
self.size = size
self.id = hash(self)
self.name = None
+ self.host = host
def __getitem__(self, key):
return self.__dict__[key]
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
- volume = FakeVolume(1)
+ location = '127.0.0.1:/nfs'
+ host = 'hostname@backend#' + location
+ volume = FakeVolume(host, 1)
snapshot = FakeSnapshot(1)
- location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
+ def test_get_pool(self):
+ pool = self._driver.get_pool({'provider_location': 'fake-share'})
+ self.assertEqual(pool, 'fake-share')
+
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
+ host = 'hostname@backend#' + fake_share
with mock.patch.object(drv, '_ensure_shares_mounted'):
- with mock.patch.object(drv, '_find_shares',
- return_value=['localhost:myshare']):
- with mock.patch.object(drv, '_do_create_volume'):
- volume_info = self._driver.create_volume(FakeVolume(1))
- self.assertEqual(volume_info.get('provider_location'),
- fake_share)
+ with mock.patch.object(drv, '_do_create_volume'):
+ volume_info = self._driver.create_volume(FakeVolume(host, 1))
+ self.assertEqual(volume_info.get('provider_location'),
+ fake_share)
+
+ def test_create_volume_no_pool_specified(self):
+ drv = self._driver
+ drv.ssc_enabled = False
+ host = 'hostname@backend' # missing pool
+ with mock.patch.object(drv, '_ensure_shares_mounted'):
+ self.assertRaises(exception.InvalidHost,
+ self._driver.create_volume, FakeVolume(host, 1))
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
- fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
+ host = 'hostname@backend#' + fake_share
+ fake_volume = FakeVolume(host, 1)
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
- with mock.patch.object(drv, '_find_shares',
- return_value=['localhost:myshare']):
- with mock.patch.object(drv, '_do_create_volume'):
- with mock.patch.object(drv,
- '_set_qos_policy_group_on_volume'
- ) as mock_set_qos:
- volume_info = self._driver.create_volume(fake_volume)
- self.assertEqual(volume_info.get('provider_location'),
- 'localhost:myshare')
- mock_set_qos.assert_called_once_with(fake_volume,
- fake_share,
- fake_qos_policy)
+ with mock.patch.object(drv, '_do_create_volume'):
+ with mock.patch.object(drv,
+ '_set_qos_policy_group_on_volume'
+ ) as mock_set_qos:
+ volume_info = self._driver.create_volume(fake_volume)
+ self.assertEqual(volume_info.get('provider_location'),
+ 'localhost:myshare')
+ mock_set_qos.assert_called_once_with(fake_volume,
+ fake_share,
+ fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
return mox
+ def test_create_volume_no_pool_specified(self):
+ drv = self._driver
+ drv.ssc_enabled = False
+ host = 'hostname@backend' # missing pool
+ with mock.patch.object(drv, '_ensure_shares_mounted'):
+ self.assertRaises(exception.InvalidHost,
+ self._driver.create_volume, FakeVolume(host, 1))
+
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
raise
mox.VerifyAll()
+
+ def test_get_pool(self):
+ pool = self._driver.get_pool({'provider_location': 'fake-share'})
+ self.assertEqual(pool, 'fake-share')
import mock
+from cinder import exception
from cinder import test
+from cinder.tests.test_netapp import create_configuration
import cinder.volume.drivers.netapp.api as ntapi
import cinder.volume.drivers.netapp.iscsi as ntap_iscsi
+from cinder.volume.drivers.netapp.iscsi import NetAppDirect7modeISCSIDriver \
+ as iscsi7modeDriver
+from cinder.volume.drivers.netapp.iscsi import NetAppDirectCmodeISCSIDriver \
+ as iscsiCmodeDriver
+from cinder.volume.drivers.netapp.iscsi import NetAppDirectISCSIDriver \
+ as iscsiDriver
+import cinder.volume.drivers.netapp.ssc_utils as ssc_utils
+import cinder.volume.drivers.netapp.utils as na_utils
class NetAppDirectISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppDirectISCSIDriverTestCase, self).setUp()
+ configuration = self._set_config(create_configuration())
self.driver = ntap_iscsi.NetAppDirectISCSIDriver(
- configuration=mock.Mock())
+ configuration=configuration)
self.driver.client = mock.Mock()
self.fake_volume = str(uuid.uuid4())
self.fake_lun = str(uuid.uuid4())
self.fake_size = '1024'
- self.fake_metadata = {
- 'OsType': 'linux',
- 'SpaceReserved': 'true',
- }
+ self.fake_metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
self.mock_request = mock.Mock()
+ def _set_config(self, configuration):
+ configuration.netapp_storage_protocol = 'iscsi'
+ configuration.netapp_login = 'admin'
+ configuration.netapp_password = 'pass'
+ configuration.netapp_server_hostname = '127.0.0.1'
+ configuration.netapp_transport_type = 'http'
+ configuration.netapp_server_port = '80'
+ return configuration
+
def tearDown(self):
super(NetAppDirectISCSIDriverTestCase, self).tearDown()
+ @mock.patch.object(iscsiDriver, '_get_lun_attr',
+ mock.Mock(return_value={'Volume': 'vol1'}))
+ def test_get_pool(self):
+ pool = self.driver.get_pool({'name': 'volume-fake-uuid'})
+ self.assertEqual(pool, 'vol1')
+
+ @mock.patch.object(iscsiDriver, '_get_lun_attr',
+ mock.Mock(return_value=None))
+ def test_get_pool_no_metadata(self):
+ pool = self.driver.get_pool({'name': 'volume-fake-uuid'})
+ self.assertEqual(pool, None)
+
+ @mock.patch.object(iscsiDriver, '_get_lun_attr',
+ mock.Mock(return_value=dict()))
+ def test_get_pool_volume_unknown(self):
+ pool = self.driver.get_pool({'name': 'volume-fake-uuid'})
+ self.assertEqual(pool, None)
+
+ @mock.patch.object(iscsiDriver, 'create_lun', mock.Mock())
+ @mock.patch.object(iscsiDriver, '_create_lun_handle', mock.Mock())
+ @mock.patch.object(iscsiDriver, '_add_lun_to_table', mock.Mock())
+ @mock.patch.object(na_utils, 'get_volume_extra_specs',
+ mock.Mock(return_value=None))
+ def test_create_volume(self):
+ self.driver.create_volume({'name': 'lun1', 'size': 100,
+ 'id': uuid.uuid4(),
+ 'host': 'hostname@backend#vol1'})
+ self.driver.create_lun.assert_called_once_with(
+ 'vol1', 'lun1', 107374182400, mock.ANY, None)
+
+ def test_create_volume_no_pool_provided_by_scheduler(self):
+ self.assertRaises(exception.InvalidHost, self.driver.create_volume,
+ {'name': 'lun1', 'size': 100,
+ 'id': uuid.uuid4(),
+ 'host': 'hostname@backend'}) # missing pool
+
def test_create_lun(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
self.driver.client.invoke_successfully.assert_called_once_with(
mock.ANY, True)
+ def test_create_lun_raises_on_failure(self):
+ self.driver.client.invoke_successfully = mock.Mock(
+ side_effect=ntapi.NaApiError)
+ self.assertRaises(ntapi.NaApiError,
+ self.driver.create_lun,
+ self.fake_volume,
+ self.fake_lun,
+ self.fake_size,
+ self.fake_metadata)
+
+ def test_update_volume_stats_is_abstract(self):
+ self.assertRaises(NotImplementedError,
+ self.driver._update_volume_stats)
+
class NetAppiSCSICModeTestCase(test.TestCase):
"""Test case for NetApp's C-Mode iSCSI driver."""
configuration=mock.Mock())
self.driver.client = mock.Mock()
self.driver.vserver = mock.Mock()
+ self.driver.ssc_vols = None
def tearDown(self):
super(NetAppiSCSICModeTestCase, self).tearDown()
self.assertEqual(1, self.driver.client.invoke_successfully.call_count)
+ @mock.patch.object(ssc_utils, 'refresh_cluster_ssc', mock.Mock())
+ @mock.patch.object(iscsiCmodeDriver, '_get_pool_stats', mock.Mock())
+ @mock.patch.object(na_utils, 'provide_ems', mock.Mock())
+ def test_vol_stats_calls_provide_ems(self):
+ self.driver.get_volume_stats(refresh=True)
+ self.assertEqual(na_utils.provide_ems.call_count, 1)
+
class NetAppiSCSI7ModeTestCase(test.TestCase):
"""Test case for NetApp's 7-Mode iSCSI driver."""
self.driver._clone_lun('fakeLUN', 'newFakeLUN')
self.assertEqual(1, self.driver.client.invoke_successfully.call_count)
+
+ @mock.patch.object(iscsi7modeDriver, '_refresh_volume_info', mock.Mock())
+ @mock.patch.object(iscsi7modeDriver, '_get_pool_stats', mock.Mock())
+ @mock.patch.object(na_utils, 'provide_ems', mock.Mock())
+ def test_vol_stats_calls_provide_ems(self):
+ self.driver.get_volume_stats(refresh=True)
+ self.assertEqual(na_utils.provide_ems.call_count, 1)
--- /dev/null
+# Copyright (c) Clinton Knight
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Mock unit tests for the NetApp driver utility module
+"""
+
+from cinder import test
+import cinder.volume.drivers.netapp.utils as na_utils
+
+
+class NetAppDriverUtilsTestCase(test.TestCase):
+
+ def test_to_bool(self):
+ self.assertTrue(na_utils.to_bool(True))
+ self.assertTrue(na_utils.to_bool('true'))
+ self.assertTrue(na_utils.to_bool('yes'))
+ self.assertTrue(na_utils.to_bool('y'))
+ self.assertTrue(na_utils.to_bool(1))
+ self.assertTrue(na_utils.to_bool('1'))
+ self.assertFalse(na_utils.to_bool(False))
+ self.assertFalse(na_utils.to_bool('false'))
+ self.assertFalse(na_utils.to_bool('asdf'))
+ self.assertFalse(na_utils.to_bool('no'))
+ self.assertFalse(na_utils.to_bool('n'))
+ self.assertFalse(na_utils.to_bool(0))
+ self.assertFalse(na_utils.to_bool('0'))
+ self.assertFalse(na_utils.to_bool(2))
+ self.assertFalse(na_utils.to_bool('2'))
+
+ def test_convert_uuid_to_es_fmt(self):
+ value = 'e67e931a-b2ed-4890-938b-3acc6a517fac'
+ result = na_utils.convert_uuid_to_es_fmt(value)
+ self.assertEqual(result, '4Z7JGGVS5VEJBE4LHLGGUUL7VQ')
+
+ def test_convert_es_fmt_to_uuid(self):
+ value = '4Z7JGGVS5VEJBE4LHLGGUUL7VQ'
+ result = str(na_utils.convert_es_fmt_to_uuid(value))
+ self.assertEqual(result, 'e67e931a-b2ed-4890-938b-3acc6a517fac')
+
+ def test_round_down(self):
+ self.assertAlmostEqual(na_utils.round_down(5.567, '0.00'), 5.56)
+ self.assertAlmostEqual(na_utils.round_down(5.567, '0.0'), 5.5)
+ self.assertAlmostEqual(na_utils.round_down(5.567, '0'), 5)
+ self.assertAlmostEqual(na_utils.round_down(0, '0.00'), 0)
+ self.assertAlmostEqual(na_utils.round_down(-5.567, '0.00'), -5.56)
+ self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5)
+ self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5)
import uuid
from oslo.config import cfg
+import six
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.netapp.options import netapp_eseries_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import utils
+from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
self.configuration.append_config_values(netapp_connection_opts)
self.configuration.append_config_values(netapp_transport_opts)
self.configuration.append_config_values(netapp_eseries_opts)
- self._objects = {'disk_pool_refs': [],
+ self._objects = {'disk_pool_refs': [], 'pools': [],
'volumes': {'label_ref': {}, 'ref_vol': {}},
'snapshots': {'label_ref': {}, 'ref_snap': {}}}
if (pool.get('raidLevel') == 'raidDiskPool'
and pool['label'].lower() in pools):
self._objects['disk_pool_refs'].append(pool['volumeGroupRef'])
+ self._objects['pools'].append(pool)
def _cache_volume(self, obj):
"""Caches volumes for further reference."""
return True
return False
+ def get_pool(self, volume):
+ """Return pool name where volume resides.
+
+ :param volume: The volume hosted by the driver.
+ :return: Name of the pool where given volume is hosted.
+ """
+ eseries_volume = self._get_volume(volume['id'])
+ for pool in self._objects['pools']:
+ if pool['volumeGroupRef'] == eseries_volume['volumeGroupRef']:
+ return pool['label']
+ return None
+
def create_volume(self, volume):
"""Creates a volume."""
- label = utils.convert_uuid_to_es_fmt(volume['id'])
+
+ LOG.debug('create_volume on %s' % volume['host'])
+
+ # get E-series pool label as pool name
+ eseries_pool_label = volume_utils.extract_host(volume['host'],
+ level='pool')
+
+ if eseries_pool_label is None:
+ msg = _("Pool is not available in the volume host field.")
+ raise exception.InvalidHost(reason=msg)
+
+ eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['id'])
+
+ # get size of the requested volume creation
size_gb = int(volume['size'])
- vol = self._create_volume(label, size_gb)
+ vol = self._create_volume(eseries_pool_label, eseries_volume_label,
+ size_gb)
self._cache_volume(vol)
- def _create_volume(self, label, size_gb):
+ def _create_volume(self, eseries_pool_label, eseries_volume_label,
+ size_gb):
+ """Creates volume with given label and size."""
+
+ target_pool = None
+
+ pools = self._client.list_storage_pools()
+ for pool in pools:
+ if pool["label"] == eseries_pool_label:
+ target_pool = pool
+ break
+
+ if not target_pool:
+ msg = _("Pools %s does not exist")
+ raise exception.NetAppDriverException(msg % eseries_pool_label)
+
+ try:
+ vol = self._client.create_volume(target_pool['volumeGroupRef'],
+ eseries_volume_label, size_gb)
+ LOG.info(_("Created volume with label %s."), eseries_volume_label)
+ except exception.NetAppDriverException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("Error creating volume. Msg - %s."),
+ six.text_type(e))
+
+ return vol
+
+ def _schedule_and_create_volume(self, label, size_gb):
"""Creates volume with given label and size."""
avl_pools = self._get_sorted_avl_storage_pools(size_gb)
for pool in avl_pools:
msg = _("Failure creating volume %s.")
raise exception.NetAppDriverException(msg % label)
- def _get_sorted_avl_storage_pools(self, size_gb):
- """Returns storage pools sorted on available capacity."""
- size = size_gb * units.Gi
- pools = self._client.list_storage_pools()
- sorted_pools = sorted(pools, key=lambda x:
- (int(x.get('totalRaidedSpace', 0))
- - int(x.get('usedSpace', 0))), reverse=True)
- avl_pools = [x for x in sorted_pools
- if (x['volumeGroupRef'] in
- self._objects['disk_pool_refs']) and
- (int(x.get('totalRaidedSpace', 0)) -
- int(x.get('usedSpace', 0) >= size))]
- if not avl_pools:
- msg = _("No storage pool found with available capacity %s.")
- exception.NotFound(msg % size_gb)
- return avl_pools
-
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
label = utils.convert_uuid_to_es_fmt(volume['id'])
size = volume['size']
- dst_vol = self._create_volume(label, size)
+ dst_vol = self._schedule_and_create_volume(label, size)
try:
src_vol = None
src_vol = self._create_snapshot_volume(snapshot['id'])
def _update_volume_stats(self):
"""Update volume statistics."""
LOG.debug("Updating volume stats.")
- self._stats = self._stats or {}
- netapp_backend = 'NetApp_ESeries'
- backend_name = self.configuration.safe_get('volume_backend_name')
- self._stats["volume_backend_name"] = (
- backend_name or netapp_backend)
- self._stats["vendor_name"] = 'NetApp'
- self._stats["driver_version"] = '1.0'
- self._stats["storage_protocol"] = 'iSCSI'
- self._stats["total_capacity_gb"] = 0
- self._stats["free_capacity_gb"] = 0
- self._stats["reserved_percentage"] = 0
- self._stats["QoS_support"] = False
- self._update_capacity()
- self._garbage_collect_tmp_vols()
+ data = dict()
+ netapp_backend = "NetApp_ESeries"
+ backend_name = self.configuration.safe_get("volume_backend_name")
+ data["volume_backend_name"] = (backend_name or netapp_backend)
+ data["vendor_name"] = "NetApp"
+ data["driver_version"] = self.VERSION
+ data["storage_protocol"] = "iSCSI"
+ data["pools"] = []
- def _update_capacity(self):
- """Get free and total appliance capacity in bytes."""
- tot_bytes, used_bytes = 0, 0
pools = self._client.list_storage_pools()
for pool in pools:
- if pool['volumeGroupRef'] in self._objects['disk_pool_refs']:
- tot_bytes = tot_bytes + int(pool.get('totalRaidedSpace', 0))
- used_bytes = used_bytes + int(pool.get('usedSpace', 0))
- self._stats['free_capacity_gb'] = (tot_bytes - used_bytes) / units.Gi
- self._stats['total_capacity_gb'] = tot_bytes / units.Gi
+ cinder_pool = {}
+ cinder_pool["pool_name"] = pool.get("label", 0)
+ cinder_pool["QoS_support"] = False
+ cinder_pool["reserved_percentage"] = 0
+ if pool["volumeGroupRef"] in self._objects["disk_pool_refs"]:
+ tot_bytes = int(pool.get("totalRaidedSpace", 0))
+ used_bytes = int(pool.get("usedSpace", 0))
+ cinder_pool["free_capacity_gb"] = ((tot_bytes - used_bytes) /
+ units.Gi)
+ cinder_pool["total_capacity_gb"] = tot_bytes / units.Gi
+ data["pools"].append(cinder_pool)
+
+ self._stats = data
+ self._garbage_collect_tmp_vols()
+
+ def _get_sorted_avl_storage_pools(self, size_gb):
+ """Returns storage pools sorted on available capacity."""
+ size = size_gb * units.Gi
+ pools = self._client.list_storage_pools()
+ sorted_pools = sorted(pools, key=lambda x:
+ (int(x.get('totalRaidedSpace', 0))
+ - int(x.get('usedSpace', 0))), reverse=True)
+ avl_pools = [x for x in sorted_pools
+ if (x['volumeGroupRef'] in
+ self._objects['disk_pool_refs']) and
+ (int(x.get('totalRaidedSpace', 0)) -
+ int(x.get('usedSpace', 0) >= size))]
+ if not avl_pools:
+ msg = _("No storage pool found with available capacity %s.")
+ LOG.warn(msg % size_gb)
+ return avl_pools
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
import time
import uuid
+import six
+
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import ssc_utils
+from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
-from cinder.volume.drivers.netapp.utils import provide_ems
+from cinder.volume.drivers.netapp.utils import round_down
from cinder.volume.drivers.netapp.utils import set_safe_attr
from cinder.volume.drivers.netapp.utils import validate_instantiation
+from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
self._get_lun_list()
LOG.debug("Success getting LUN list from server")
- def create_volume(self, volume):
- """Driver entry point for creating a new volume."""
- default_size = '104857600' # 100 MB
- gigabytes = 1073741824L # 2^30
+ def get_pool(self, volume):
+ """Return pool name where volume resides.
+
+ :param volume: The volume hosted by the driver.
+ :return: Name of the pool where given volume is hosted.
+ """
name = volume['name']
- if int(volume['size']) == 0:
- size = default_size
- else:
- size = str(int(volume['size']) * gigabytes)
- metadata = {}
- metadata['OsType'] = 'linux'
- metadata['SpaceReserved'] = 'true'
+ metadata = self._get_lun_attr(name, 'metadata') or dict()
+ return metadata.get('Volume', None)
+
+ def create_volume(self, volume):
+ """Driver entry point for creating a new volume (aka ONTAP LUN)."""
+
+ LOG.debug('create_volume on %s' % volume['host'])
+
+ # get ONTAP volume name as pool name
+ ontap_volume_name = volume_utils.extract_host(volume['host'],
+ level='pool')
+
+ if ontap_volume_name is None:
+ msg = _("Pool is not available in the volume host field.")
+ raise exception.InvalidHost(reason=msg)
+
+ lun_name = volume['name']
+
+ # start with default size, get requested size
+ default_size = units.Mi * 100 # 100 MB
+ size = default_size if not int(volume['size'])\
+ else int(volume['size']) * units.Gi
+
+ metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
+
extra_specs = get_volume_extra_specs(volume)
- self._create_lun_on_eligible_vol(name, size, metadata, extra_specs)
- LOG.debug("Created LUN with name %s" % name)
+ qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
+ if extra_specs else None
+
+ self.create_lun(ontap_volume_name, lun_name, size,
+ metadata, qos_policy_group)
+ LOG.debug('Created LUN with name %s' % lun_name)
+
+ metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
+ metadata['Volume'] = ontap_volume_name
+ metadata['Qtree'] = None
+
handle = self._create_lun_handle(metadata)
- self._add_lun_to_table(NetAppLun(handle, name, size, metadata))
+ self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
minor = res.get_child_content('minor-version')
return (major, minor)
- def _create_lun_on_eligible_vol(self, name, size, metadata,
- extra_specs=None):
- """Creates an actual lun on filer."""
- raise NotImplementedError()
+ def create_lun(self, volume_name, lun_name, size,
+ metadata, qos_policy_group=None):
+ """Issues API request for creating LUN on volume."""
- def create_lun(self, volume, lun, size, metadata, qos_policy_group=None):
- """Issues api request for creating lun on volume."""
- path = '/vol/%s/%s' % (volume, lun)
+ path = '/vol/%s/%s' % (volume_name, lun_name)
lun_create = NaElement.create_node_with_children(
'lun-create-by-size',
- **{'path': path, 'size': size,
+ **{'path': path, 'size': six.text_type(size),
'ostype': metadata['OsType'],
'space-reservation-enabled': metadata['SpaceReserved']})
if qos_policy_group:
lun_create.add_new_child('qos-policy-group', qos_policy_group)
- self.client.invoke_successfully(lun_create, True)
+
+ try:
+ self.client.invoke_successfully(lun_create, True)
+ except NaApiError as ex:
+ with excutils.save_and_reraise_exception():
+ msg = _("Error provisioning volume %(lun_name)s on "
+ "%(volume_name)s. Details: %(ex)s")
+ msg_args = {'lun_name': lun_name,
+ 'volume_name': volume_name,
+ 'ex': six.text_type(ex)}
+ LOG.error(msg % msg_args)
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
ssc_utils.check_ssc_api_permissions(self.client)
super(NetAppDirectCmodeISCSIDriver, self).check_for_setup_error()
- def _create_lun_on_eligible_vol(self, name, size, metadata,
- extra_specs=None):
- """Creates an actual lun on filer."""
- req_size = float(size) *\
- float(self.configuration.netapp_size_multiplier)
- qos_policy_group = None
- if extra_specs:
- qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None)
- volumes = self._get_avl_volumes(req_size, extra_specs)
- if not volumes:
- msg = _('Failed to get vol with required'
- ' size and extra specs for volume: %s')
- raise exception.VolumeBackendAPIException(data=msg % name)
- for volume in volumes:
- try:
- self.create_lun(volume.id['name'], name, size, metadata,
- qos_policy_group=qos_policy_group)
- metadata['Path'] = '/vol/%s/%s' % (volume.id['name'], name)
- metadata['Volume'] = volume.id['name']
- metadata['Qtree'] = None
- return
- except NaApiError as ex:
- msg = _("Error provisioning vol %(name)s on "
- "%(volume)s. Details: %(ex)s")
- LOG.error(msg % {'name': name,
- 'volume': volume.id['name'],
- 'ex': ex})
- finally:
- self._update_stale_vols(volume=volume)
+ def create_lun(self, volume_name, lun_name, size,
+ metadata, qos_policy_group=None):
+ """Creates a LUN, handling ONTAP differences as needed."""
- def _get_avl_volumes(self, size, extra_specs=None):
- """Get the available volume by size, extra_specs."""
- result = []
- volumes = ssc_utils.get_volumes_for_specs(
- self.ssc_vols, extra_specs)
- if volumes:
- sorted_vols = sorted(volumes, reverse=True)
- for vol in sorted_vols:
- if int(vol.space['size_avl_bytes']) >= int(size):
- result.append(vol)
- return result
+ super(NetAppDirectCmodeISCSIDriver, self).create_lun(
+ volume_name, lun_name, size, metadata, qos_policy_group)
+
+ self._update_stale_vols(
+ volume=ssc_utils.NetAppVolume(volume_name, self.vserver))
def _get_target_details(self):
"""Gets the target portal details."""
"""Creates lun metadata dictionary."""
self._is_naelement(lun)
meta_dict = {}
- self._is_naelement(lun)
meta_dict['Vserver'] = lun.get_child_content('vserver')
meta_dict['Volume'] = lun.get_child_content('volume')
meta_dict['Qtree'] = lun.get_child_content('qtree')
self.client.set_vserver(None)
def _update_volume_stats(self):
- """Retrieve stats info from volume group."""
+ """Retrieve stats info from vserver."""
- LOG.debug("Updating volume stats")
+ sync = True if self.ssc_vols is None else False
+ ssc_utils.refresh_cluster_ssc(self, self.client,
+ self.vserver, synchronous=sync)
+
+ LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_iSCSI_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
- data["volume_backend_name"] = (
- backend_name or netapp_backend)
- data["vendor_name"] = 'NetApp'
- data["driver_version"] = '1.0'
- data["storage_protocol"] = 'iSCSI'
-
- data['total_capacity_gb'] = 0
- data['free_capacity_gb'] = 0
- data['reserved_percentage'] = 0
- data['QoS_support'] = False
- self._update_cluster_vol_stats(data)
- provide_ems(self, self.client, data, netapp_backend)
+ data['volume_backend_name'] = backend_name or netapp_backend
+ data['vendor_name'] = 'NetApp'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = 'iSCSI'
+ data['pools'] = self._get_pool_stats()
+
+ na_utils.provide_ems(self, self.client, data, netapp_backend)
self._stats = data
- def _update_cluster_vol_stats(self, data):
- """Updates vol stats with cluster config."""
- sync = True if self.ssc_vols is None else False
- ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver,
- synchronous=sync)
- if self.ssc_vols:
- data['netapp_mirrored'] = 'true'\
- if self.ssc_vols['mirrored'] else 'false'
- data['netapp_unmirrored'] = 'true'\
- if len(self.ssc_vols['all']) > len(self.ssc_vols['mirrored'])\
- else 'false'
- data['netapp_dedup'] = 'true'\
- if self.ssc_vols['dedup'] else 'false'
- data['netapp_nodedup'] = 'true'\
- if len(self.ssc_vols['all']) > len(self.ssc_vols['dedup'])\
- else 'false'
- data['netapp_compression'] = 'true'\
- if self.ssc_vols['compression'] else 'false'
- data['netapp_nocompression'] = 'true'\
- if len(self.ssc_vols['all']) >\
- len(self.ssc_vols['compression'])\
- else 'false'
- data['netapp_thin_provisioned'] = 'true'\
- if self.ssc_vols['thin'] else 'false'
- data['netapp_thick_provisioned'] = 'true'\
- if len(self.ssc_vols['all']) >\
- len(self.ssc_vols['thin']) else 'false'
- if self.ssc_vols['all']:
- vol_max = max(self.ssc_vols['all'])
- data['total_capacity_gb'] =\
- int(vol_max.space['size_total_bytes']) / units.Gi
- data['free_capacity_gb'] =\
- int(vol_max.space['size_avl_bytes']) / units.Gi
- else:
- data['total_capacity_gb'] = 0
- data['free_capacity_gb'] = 0
- else:
- LOG.warn(_("Cluster ssc is not updated. No volume stats found."))
+ def _get_pool_stats(self):
+ """Retrieve pool (i.e. ONTAP volume) stats info from SSC volumes."""
+
+ pools = []
+ if not self.ssc_vols:
+ return pools
+
+ for vol in self.ssc_vols['all']:
+ pool = dict()
+ pool['pool_name'] = vol.id['name']
+ pool['QoS_support'] = False
+ pool['reserved_percentage'] = 0
+
+ # convert sizes to GB and de-rate by NetApp multiplier
+ total = float(vol.space['size_total_bytes'])
+ total /= self.configuration.netapp_size_multiplier
+ total /= units.Gi
+ pool['total_capacity_gb'] = round_down(total, '0.01')
+
+ free = float(vol.space['size_avl_bytes'])
+ free /= self.configuration.netapp_size_multiplier
+ free /= units.Gi
+ pool['free_capacity_gb'] = round_down(free, '0.01')
+
+ pool['netapp:raid_type'] = vol.aggr['raid_type']
+ pool['netapp:disk_type'] = vol.aggr['disk_type']
+ pool['netapp:qos_policy_group'] = vol.qos['qos_policy_group']
+
+ mirrored = vol in self.ssc_vols['mirrored']
+ pool['netapp_mirrored'] = six.text_type(mirrored).lower()
+ pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
+
+ dedup = vol in self.ssc_vols['dedup']
+ pool['netapp_dedup'] = six.text_type(dedup).lower()
+ pool['netapp_nodedup'] = six.text_type(not dedup).lower()
+
+ compression = vol in self.ssc_vols['compression']
+ pool['netapp_compression'] = six.text_type(compression).lower()
+ pool['netapp_nocompression'] = six.text_type(
+ not compression).lower()
+
+ thin = vol in self.ssc_vols['thin']
+ pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
+ pool['netapp_thick_provisioned'] = six.text_type(not thin).lower()
+
+ pools.append(pool)
+
+ return pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
self.vol_refresh_interval = 1800
self.vol_refresh_running = False
self.vol_refresh_voluntary = False
- # Setting it infinite at set up
- # This will not rule out backend from scheduling
- self.total_gb = 'infinite'
- self.free_gb = 'infinite'
+ self.root_volume_name = self._get_root_volume_name()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
raise exception.VolumeBackendAPIException(data=msg)
super(NetAppDirect7modeISCSIDriver, self).check_for_setup_error()
- def _create_lun_on_eligible_vol(self, name, size, metadata,
- extra_specs=None):
- """Creates an actual lun on filer."""
- req_size = float(size) *\
- float(self.configuration.netapp_size_multiplier)
- volume = self._get_avl_volume_by_size(req_size)
- if not volume:
- msg = _('Failed to get vol with required size for volume: %s')
- raise exception.VolumeBackendAPIException(data=msg % name)
- self.create_lun(volume['name'], name, size, metadata)
- metadata['Path'] = '/vol/%s/%s' % (volume['name'], name)
- metadata['Volume'] = volume['name']
- metadata['Qtree'] = None
+ def create_lun(self, volume_name, lun_name, size,
+ metadata, qos_policy_group=None):
+ """Creates a LUN, handling ONTAP differences as needed."""
+
+ super(NetAppDirect7modeISCSIDriver, self).create_lun(
+ volume_name, lun_name, size, metadata, qos_policy_group)
+
self.vol_refresh_voluntary = True
def _get_filer_volumes(self, volume=None):
return volumes.get_children()
return []
- def _get_avl_volume_by_size(self, size):
- """Get the available volume by size."""
+ def _get_root_volume_name(self):
+ # switch to volume-get-root-name API when possible
vols = self._get_filer_volumes()
for vol in vols:
- avl_size = vol.get_child_content('size-available')
- state = vol.get_child_content('state')
- if float(avl_size) >= float(size) and state == 'online':
- avl_vol = dict()
- avl_vol['name'] = vol.get_child_content('name')
- avl_vol['block-type'] = vol.get_child_content('block-type')
- avl_vol['type'] = vol.get_child_content('type')
- avl_vol['size-available'] = avl_size
- if self.volume_list:
- if avl_vol['name'] in self.volume_list:
- return avl_vol
- elif self._get_vol_option(avl_vol['name'], 'root') != 'true':
- return avl_vol
+ volume_name = vol.get_child_content('name')
+ if self._get_vol_option(volume_name, 'root') == 'true':
+ return volume_name
+ LOG.warn(_('Could not determine root volume name '
+ 'on %s.') % self._get_owner())
return None
def _get_igroup_by_initiator(self, initiator):
result = self.client.invoke_successfully(iscsi_service_iter, True)
return result.get_child_content('node-name')
- def _create_lun_handle(self, metadata):
- """Returns lun handle based on filer type."""
+ def _get_owner(self):
if self.vfiler:
owner = '%s:%s' % (self.configuration.netapp_server_hostname,
self.vfiler)
else:
owner = self.configuration.netapp_server_hostname
+ return owner
+
+ def _create_lun_handle(self, metadata):
+ """Returns lun handle based on filer type."""
+ owner = self._get_owner()
return '%s:%s' % (owner, metadata['Path'])
def _get_lun_list(self):
"""Creates lun metadata dictionary."""
self._is_naelement(lun)
meta_dict = {}
- self._is_naelement(lun)
meta_dict['Path'] = lun.get_child_content('path')
+ meta_dict['Volume'] = lun.get_child_content('path').split('/')[2]
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = lun.get_child_content(
'is-space-reservation-enabled')
return meta_dict
def _update_volume_stats(self):
- """Retrieve status info from volume group."""
- LOG.debug("Updating volume stats")
+ """Retrieve stats info from filer."""
+
+ # ensure we get current data
+ self.vol_refresh_voluntary = True
+ self._refresh_volume_info()
+
+ LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_iSCSI_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
- data["volume_backend_name"] = (
- backend_name or 'NetApp_iSCSI_7mode_direct')
- data["vendor_name"] = 'NetApp'
- data["driver_version"] = self.VERSION
- data["storage_protocol"] = 'iSCSI'
- data['reserved_percentage'] = 0
- data['QoS_support'] = False
- self._get_capacity_info(data)
- provide_ems(self, self.client, data, netapp_backend,
- server_type="7mode")
+ data['volume_backend_name'] = backend_name or netapp_backend
+ data['vendor_name'] = 'NetApp'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = 'iSCSI'
+ data['pools'] = self._get_pool_stats()
+
+ na_utils.provide_ems(self, self.client, data, netapp_backend,
+ server_type='7mode')
self._stats = data
+ def _get_pool_stats(self):
+ """Retrieve pool (i.e. ONTAP volume) stats info from volumes."""
+
+ pools = []
+ if not self.vols:
+ return pools
+
+ for vol in self.vols:
+
+ # omit volumes not specified in the config
+ volume_name = vol.get_child_content('name')
+ if self.volume_list and volume_name not in self.volume_list:
+ continue
+
+ # omit root volume
+ if volume_name == self.root_volume_name:
+ continue
+
+ # ensure good volume state
+ state = vol.get_child_content('state')
+ inconsistent = vol.get_child_content('is-inconsistent')
+ invalid = vol.get_child_content('is-invalid')
+ if (state != 'online' or
+ inconsistent != 'false' or
+ invalid != 'false'):
+ continue
+
+ pool = dict()
+ pool['pool_name'] = volume_name
+ pool['QoS_support'] = False
+ pool['reserved_percentage'] = 0
+
+ # convert sizes to GB and de-rate by NetApp multiplier
+ total = float(vol.get_child_content('size-total') or 0)
+ total /= self.configuration.netapp_size_multiplier
+ total /= units.Gi
+ pool['total_capacity_gb'] = round_down(total, '0.01')
+
+ free = float(vol.get_child_content('size-available') or 0)
+ free /= self.configuration.netapp_size_multiplier
+ free /= units.Gi
+ pool['free_capacity_gb'] = round_down(free, '0.01')
+
+ pools.append(pool)
+
+ return pools
+
def _get_lun_block_count(self, path):
"""Gets block counts for the lun."""
bs = super(
bs = bs - 1
return bs
- def _get_capacity_info(self, data):
- """Calculates the capacity information for the filer."""
+ def _refresh_volume_info(self):
+ """Saves the volume information for the filer."""
+
if (self.vol_refresh_time is None or self.vol_refresh_voluntary or
timeutils.is_newer_than(self.vol_refresh_time,
self.vol_refresh_interval)):
_("Volume refresh job already running. Returning..."))
return
self.vol_refresh_voluntary = False
- self._refresh_capacity_info()
+ self.vols = self._get_filer_volumes()
self.vol_refresh_time = timeutils.utcnow()
except Exception as e:
- LOG.warn(_("Error refreshing vol capacity. Message: %s"), e)
+ LOG.warn(_("Error refreshing volume info. Message: %s"),
+ six.text_type(e))
finally:
set_safe_attr(self, 'vol_refresh_running', False)
- data['total_capacity_gb'] = self.total_gb
- data['free_capacity_gb'] = self.free_gb
-
- def _refresh_capacity_info(self):
- """Gets the latest capacity information."""
- LOG.info(_("Refreshing capacity info for %s."), self.client)
- total_bytes = 0
- free_bytes = 0
- vols = self._get_filer_volumes()
- for vol in vols:
- volume = vol.get_child_content('name')
- if self.volume_list and volume not in self.volume_list:
- continue
- state = vol.get_child_content('state')
- inconsistent = vol.get_child_content('is-inconsistent')
- invalid = vol.get_child_content('is-invalid')
- if (state == 'online' and inconsistent == 'false'
- and invalid == 'false'):
- total_size = vol.get_child_content('size-total')
- if total_size:
- total_bytes = total_bytes + int(total_size)
- avl_size = vol.get_child_content('size-available')
- if avl_size:
- free_bytes = free_bytes + int(avl_size)
- self.total_gb = total_bytes / units.Gi
- self.free_gb = free_bytes / units.Gi
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
import time
import uuid
+import six
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.volume.drivers.netapp import ssc_utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
-from cinder.volume.drivers.netapp.utils import provide_ems
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume.drivers import nfs
+from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
"""Returns an error if prerequisites aren't met."""
raise NotImplementedError()
+ def get_pool(self, volume):
+ """Return pool name where volume resides.
+
+ :param volume: The volume hosted by the driver.
+ :return: Name of the pool where given volume is hosted.
+ """
+ return volume['provider_location']
+
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
vol_size = volume.size
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
- super(NetAppNFSDriver, self)._update_volume_stats()
- self._spawn_clean_cache_job()
+ raise NotImplementedError()
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
'file-usage-get', **{'path': path})
return file_use
+ def _get_extended_capacity_info(self, nfs_share):
+ """Returns an extended set of share capacity metrics."""
+
+ total_size, total_available, total_allocated = \
+ self._get_capacity_info(nfs_share)
+
+ used_ratio = (total_size - total_available) / total_size
+ subscribed_ratio = total_allocated / total_size
+ apparent_size = max(0, total_size * self.configuration.nfs_used_ratio)
+ apparent_available = max(0, apparent_size - total_allocated)
+
+ return {'total_size': total_size, 'total_available': total_available,
+ 'total_allocated': total_allocated, 'used_ratio': used_ratio,
+ 'subscribed_ratio': subscribed_ratio,
+ 'apparent_size': apparent_size,
+ 'apparent_available': apparent_available}
+
class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
"""Executes commands related to volumes on c mode."""
:param volume: volume reference
"""
+ LOG.debug('create_volume on %s' % volume['host'])
self._ensure_shares_mounted()
+
+ # get share as pool name
+ share = volume_utils.extract_host(volume['host'], level='pool')
+
+ if share is None:
+ msg = _("Pool is not available in the volume host field.")
+ raise exception.InvalidHost(reason=msg)
+
extra_specs = get_volume_extra_specs(volume)
- qos_policy_group = None
- if extra_specs:
- qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None)
- eligible = self._find_shares(volume['size'], extra_specs)
- if not eligible:
- raise exception.NfsNoSuitableShareFound(
- volume_size=volume['size'])
- for sh in eligible:
- try:
- volume['provider_location'] = sh
- LOG.info(_('casted to %s') % volume['provider_location'])
- self._do_create_volume(volume)
- if qos_policy_group:
- self._set_qos_policy_group_on_volume(volume, sh,
- qos_policy_group)
- return {'provider_location': volume['provider_location']}
- except Exception as ex:
- LOG.error(_("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s")
- % {'name': volume['name'],
- 'share': volume['provider_location'],
- 'ex': ex})
- volume['provider_location'] = None
- finally:
- if self.ssc_enabled:
- self._update_stale_vols(self._get_vol_for_share(sh))
+ qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
+ if extra_specs else None
+
+ try:
+ volume['provider_location'] = share
+ LOG.info(_('casted to %s') % volume['provider_location'])
+ self._do_create_volume(volume)
+ if qos_policy_group:
+ self._set_qos_policy_group_on_volume(volume, share,
+ qos_policy_group)
+ return {'provider_location': volume['provider_location']}
+ except Exception as ex:
+ LOG.error(_("Exception creating vol %(name)s on "
+ "share %(share)s. Details: %(ex)s")
+ % {'name': volume['name'],
+ 'share': volume['provider_location'],
+ 'ex': ex})
+ volume['provider_location'] = None
+ finally:
+ if self.ssc_enabled:
+ self._update_stale_vols(self._get_vol_for_share(share))
+
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
'vserver': self.vserver})
self._invoke_successfully(file_assign_qos)
- def _find_shares(self, size, extra_specs):
- """Finds suitable shares for given params."""
- shares = []
- containers = []
- if self.ssc_enabled:
- vols = ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs)
- containers = [x.export['path'] for x in vols]
- else:
- containers = self._mounted_shares
- for sh in containers:
- if self._is_share_eligible(sh, size):
- size, avl, alloc = self._get_capacity_info(sh)
- shares.append((sh, avl))
- shares = [a for a, b in sorted(
- shares, key=lambda x: x[1], reverse=True)]
- return shares
-
def _clone_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clones mounted volume on NetApp Cluster."""
self._invoke_successfully(clone_create, vserver)
def _update_volume_stats(self):
- """Retrieve stats info from volume group."""
- super(NetAppDirectCmodeNfsDriver, self)._update_volume_stats()
- netapp_backend = 'NetApp_NFS_cluster_direct'
+ """Retrieve stats info from vserver."""
+
+ self._ensure_shares_mounted()
+ sync = True if self.ssc_vols is None else False
+ ssc_utils.refresh_cluster_ssc(self, self._client,
+ self.vserver, synchronous=sync)
+
+ LOG.debug('Updating volume stats')
+ data = {}
+ netapp_backend = 'NetApp_NFS_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
- self._stats["volume_backend_name"] = (backend_name or
- netapp_backend)
- self._stats["vendor_name"] = 'NetApp'
- self._stats["driver_version"] = '1.0'
- self._update_cluster_vol_stats(self._stats)
- provide_ems(self, self._client, self._stats, netapp_backend)
-
- def _update_cluster_vol_stats(self, data):
- """Updates vol stats with cluster config."""
- if self.ssc_enabled:
- sync = True if self.ssc_vols is None else False
- ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver,
- synchronous=sync)
- else:
- LOG.warn(_("No vserver set in config. SSC will be disabled."))
- if self.ssc_vols:
- data['netapp_mirrored'] = 'true'\
- if self.ssc_vols['mirrored'] else 'false'
- data['netapp_unmirrored'] = 'true'\
- if len(self.ssc_vols['all']) >\
- len(self.ssc_vols['mirrored']) else 'false'
- data['netapp_dedup'] = 'true'\
- if self.ssc_vols['dedup'] else 'false'
- data['netapp_nodedup'] = 'true'\
- if len(self.ssc_vols['all']) >\
- len(self.ssc_vols['dedup']) else 'false'
- data['netapp_compression'] = 'true'\
- if self.ssc_vols['compression'] else 'false'
- data['netapp_nocompression'] = 'true'\
- if len(self.ssc_vols['all']) >\
- len(self.ssc_vols['compression']) else 'false'
- data['netapp_thin_provisioned'] = 'true'\
- if self.ssc_vols['thin'] else 'false'
- data['netapp_thick_provisioned'] = 'true'\
- if len(self.ssc_vols['all']) >\
- len(self.ssc_vols['thin']) else 'false'
- if self.ssc_vols['all']:
- vol_max = max(self.ssc_vols['all'])
- data['total_capacity_gb'] =\
- int(vol_max.space['size_total_bytes']) / units.Gi
- data['free_capacity_gb'] =\
- int(vol_max.space['size_avl_bytes']) / units.Gi
- else:
- data['total_capacity_gb'] = 0
- data['free_capacity_gb'] = 0
- elif self.ssc_enabled:
- LOG.warn(_("No cluster ssc stats found."
- " Wait for next volume stats update."))
+ data['volume_backend_name'] = backend_name or netapp_backend
+ data['vendor_name'] = 'NetApp'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = 'nfs'
+ data['pools'] = self._get_pool_stats()
+
+ self._spawn_clean_cache_job()
+ na_utils.provide_ems(self, self._client, data, netapp_backend)
+ self._stats = data
+
+ def _get_pool_stats(self):
+ """Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
+
+ pools = []
+
+ for nfs_share in self._mounted_shares:
+
+ capacity = self._get_extended_capacity_info(nfs_share)
+
+ pool = dict()
+ pool['pool_name'] = nfs_share
+ pool['QoS_support'] = False
+ pool['reserved_percentage'] = 0
+
+ # Report pool as reserved when over the configured used_ratio
+ if capacity['used_ratio'] > self.configuration.nfs_used_ratio:
+ pool['reserved_percentage'] = 100
+
+ # Report pool as reserved when over the subscribed ratio
+ if capacity['subscribed_ratio'] >=\
+ self.configuration.nfs_oversub_ratio:
+ pool['reserved_percentage'] = 100
+
+ # convert sizes to GB
+ total = float(capacity['apparent_size']) / units.Gi
+ pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
+
+ free = float(capacity['apparent_available']) / units.Gi
+ pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
+
+ # add SSC content if available
+ vol = self._get_vol_for_share(nfs_share)
+ if vol and self.ssc_vols:
+ pool['netapp:raid_type'] = vol.aggr['raid_type']
+ pool['netapp:disk_type'] = vol.aggr['disk_type']
+ pool['netapp:qos_policy_group'] = vol.qos['qos_policy_group']
+
+ mirrored = vol in self.ssc_vols['mirrored']
+ pool['netapp_mirrored'] = six.text_type(mirrored).lower()
+ pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
+
+ dedup = vol in self.ssc_vols['dedup']
+ pool['netapp_dedup'] = six.text_type(dedup).lower()
+ pool['netapp_nodedup'] = six.text_type(not dedup).lower()
+
+ compression = vol in self.ssc_vols['compression']
+ pool['netapp_compression'] = six.text_type(compression).lower()
+ pool['netapp_nocompression'] = six.text_type(
+ not compression).lower()
+
+ thin = vol in self.ssc_vols['thin']
+ pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
+ pool['netapp_thick_provisioned'] = six.text_type(
+ not thin).lower()
+
+ pools.append(pool)
+
+ return pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
result = server.invoke_successfully(na_element, True)
return result
+ def create_volume(self, volume):
+ """Creates a volume.
+
+ :param volume: volume reference
+ """
+ LOG.debug('create_volume on %s' % volume['host'])
+ self._ensure_shares_mounted()
+
+ # get share as pool name
+ share = volume_utils.extract_host(volume['host'], level='pool')
+
+ if share is None:
+ msg = _("Pool is not available in the volume host field.")
+ raise exception.InvalidHost(reason=msg)
+
+ volume['provider_location'] = share
+ LOG.info(_('Creating volume at location %s')
+ % volume['provider_location'])
+
+ try:
+ self._do_create_volume(volume)
+ except Exception as ex:
+ LOG.error(_("Exception creating vol %(name)s on "
+ "share %(share)s. Details: %(ex)s")
+ % {'name': volume['name'],
+ 'share': volume['provider_location'],
+ 'ex': six.text_type(ex)})
+ msg = _("Volume %s could not be created on shares.")
+ raise exception.VolumeBackendAPIException(
+ data=msg % (volume['name']))
+
+ return {'provider_location': volume['provider_location']}
+
def _clone_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clones mounted volume with NetApp filer."""
retry = retry - 1
def _update_volume_stats(self):
- """Retrieve stats info from volume group."""
- super(NetAppDirect7modeNfsDriver, self)._update_volume_stats()
+ """Retrieve stats info from vserver."""
+
+ self._ensure_shares_mounted()
+
+ LOG.debug('Updating volume stats')
+ data = {}
netapp_backend = 'NetApp_NFS_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
- self._stats["volume_backend_name"] = (backend_name or
- 'NetApp_NFS_7mode_direct')
- self._stats["vendor_name"] = 'NetApp'
- self._stats["driver_version"] = self.VERSION
- provide_ems(self, self._client, self._stats, netapp_backend,
- server_type="7mode")
+ data['volume_backend_name'] = backend_name or netapp_backend
+ data['vendor_name'] = 'NetApp'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = 'nfs'
+ data['pools'] = self._get_pool_stats()
+
+ self._spawn_clean_cache_job()
+ na_utils.provide_ems(self, self._client, data, netapp_backend,
+ server_type="7mode")
+ self._stats = data
+
+ def _get_pool_stats(self):
+ """Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
+
+ pools = []
+
+ for nfs_share in self._mounted_shares:
+
+ capacity = self._get_extended_capacity_info(nfs_share)
+
+ pool = dict()
+ pool['pool_name'] = nfs_share
+ pool['QoS_support'] = False
+ pool['reserved_percentage'] = 0
+
+ # Report pool as reserved when over the configured used_ratio
+ if capacity['used_ratio'] > self.configuration.nfs_used_ratio:
+ pool['reserved_percentage'] = 100
+
+ # Report pool as reserved when over the subscribed ratio
+ if capacity['subscribed_ratio'] >=\
+ self.configuration.nfs_oversub_ratio:
+ pool['reserved_percentage'] = 100
+
+ # convert sizes to GB
+ total = float(capacity['apparent_size']) / units.Gi
+ pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
+
+ free = float(capacity['apparent_available']) / units.Gi
+ pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
+
+ pools.append(pool)
+
+ return pools
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
import copy
from threading import Timer
+import six
+
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
vols.add(vol)
except KeyError as e:
LOG.debug('Unexpected error while creating'
- ' ssc vol list. Message - %s' % (e.message))
+ ' ssc vol list. Message - %s' % six.text_type(e))
continue
return vols
import base64
import binascii
import copy
+import decimal
import socket
import uuid
+import six
+
from cinder import context
from cinder import exception
from cinder.i18n import _
"""Converts e-series name format to uuid."""
es_label_b32 = es_label.ljust(32, '=')
return uuid.UUID(binascii.hexlify(base64.b32decode(es_label_b32)))
+
+
+def round_down(value, precision):
+ return float(decimal.Decimal(six.text_type(value)).quantize(
+ decimal.Decimal(precision), rounding=decimal.ROUND_DOWN))