-
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
import six
from cinder import exception
-from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
-from cinder.volume.drivers.netapp.api import NaElement
-from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume.drivers.netapp import common
+from cinder.volume.drivers.netapp.dataontap.client import client_base
+from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp.options import netapp_7mode_opts
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
-from cinder.volume.drivers.netapp import ssc_utils
LOG = logging.getLogger("cinder.volume.driver")
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None, 'host': 'hostname@backend#vol1'}
- vol1 = ssc_utils.NetAppVolume('lun1', 'openstack')
+ vol1 = ssc_cmode.NetAppVolume('lun1', 'openstack')
vol1.state['vserver_root'] = False
vol1.state['status'] = 'online'
vol1.state['junction_active'] = True
def _custom_setup(self):
self.stubs.Set(
- ssc_utils, 'refresh_cluster_ssc',
+ ssc_cmode, 'refresh_cluster_ssc',
lambda a, b, c, synchronous: None)
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(httplib, 'HTTPConnection',
FakeDirectCmodeHTTPConnection)
driver.do_setup(context='')
- client = driver.client
- client.set_api_version(1, 15)
self.driver = driver
self.driver.ssc_vols = self.ssc_map
return configuration
def test_connect(self):
+ self.driver.library.zapi_client = mock.MagicMock()
+ self.driver.library.zapi_client.get_ontapi_version.return_value = \
+ (1, 20)
self.driver.check_for_setup_error()
def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('80', driver.client.get_port())
- self.assertEqual('http', driver.client.get_transport_type())
+ na_server = driver.library.zapi_client.get_connection()
+ self.assertEqual('80', na_server.get_port())
+ self.assertEqual('http', na_server.get_transport_type())
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('80', driver.client.get_port())
- self.assertEqual('http', driver.client.get_transport_type())
+ na_server = driver.library.zapi_client.get_connection()
+ self.assertEqual('80', na_server.get_port())
+ self.assertEqual('http', na_server.get_transport_type())
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
+ driver.library._get_root_volume_name = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('443', driver.client.get_port())
- self.assertEqual('https', driver.client.get_transport_type())
+ na_server = driver.library.zapi_client.get_connection()
+ self.assertEqual('443', na_server.get_port())
+ self.assertEqual('https', na_server.get_transport_type())
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('81', driver.client.get_port())
- self.assertEqual('http', driver.client.get_transport_type())
+ na_server = driver.library.zapi_client.get_connection()
+ self.assertEqual('81', na_server.get_port())
+ self.assertEqual('http', na_server.get_transport_type())
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
+ driver.library._get_root_volume_name = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('446', driver.client.get_port())
- self.assertEqual('https', driver.client.get_transport_type())
+ na_server = driver.library.zapi_client.get_connection()
+ self.assertEqual('446', na_server.get_port())
+ self.assertEqual('https', na_server.get_transport_type())
def test_create_destroy(self):
self.driver.create_volume(self.volume)
raise AssertionError('Target portal is none')
def test_vol_stats(self):
- self.driver.get_volume_stats(refresh=True)
- stats = self.driver._stats
+ stats = self.driver.get_volume_stats(refresh=True)
self.assertEqual(stats['vendor_name'], 'NetApp')
- self.assertTrue(stats['pools'][0]['pool_name'])
def test_create_vol_snapshot_diff_size_resize(self):
self.driver.create_volume(self.volume)
self.stubs.Set(httplib, 'HTTPConnection',
FakeDirect7modeHTTPConnection)
driver.do_setup(context='')
- client = driver.client
- client.set_api_version(1, 9)
+ driver.root_volume_name = 'root'
self.driver = driver
- self.driver.root_volume_name = 'root'
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_7mode'
self.driver.delete_volume(self.volume)
self.driver.volume_list = []
+ def test_connect(self):
+ self.driver.driver.library.zapi_client = mock.MagicMock()
+ self.driver.driver.library.zapi_client.get_ontapi_version.\
+ return_value = (1, 20)
+ self.driver.check_for_setup_error()
+
def test_check_for_setup_error_version(self):
drv = self.driver
- delattr(drv.client, '_api_version')
+ drv.zapi_client = mock.Mock()
+ drv.zapi_client.get_ontapi_version.return_value = None
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
- drv.client.set_api_version(1, 8)
+ drv.zapi_client.get_ontapi_version.return_value = (1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
self.stubs.Set(httplib, 'HTTPConnection',
FakeDirect7modeHTTPConnection)
driver.do_setup(context='')
- client = driver.client
- client.set_api_version(1, 9)
self.driver = driver
self.driver.root_volume_name = 'root'
configuration.netapp_server_port = None
configuration.netapp_vfiler = 'openstack'
return configuration
-
-
-class NetAppApiElementTransTests(test.TestCase):
- """Test case for NetApp api element translations."""
-
- def setUp(self):
- super(NetAppApiElementTransTests, self).setUp()
-
- def test_translate_struct_dict_unique_key(self):
- """Tests if dict gets properly converted to NaElements."""
- root = NaElement('root')
- child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}
- root.translate_struct(child)
- self.assertEqual(len(root.get_children()), 3)
- self.assertEqual(root.get_child_content('e1'), 'v1')
- self.assertEqual(root.get_child_content('e2'), 'v2')
- self.assertEqual(root.get_child_content('e3'), 'v3')
-
- def test_translate_struct_dict_nonunique_key(self):
- """Tests if list/dict gets properly converted to NaElements."""
- root = NaElement('root')
- child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]
- root.translate_struct(child)
- self.assertEqual(len(root.get_children()), 3)
- children = root.get_children()
- for c in children:
- if c.get_name() == 'e1':
- self.assertIn(c.get_content(), ['v1', 'v3'])
- else:
- self.assertEqual(c.get_content(), 'v2')
-
- def test_translate_struct_list(self):
- """Tests if list gets properly converted to NaElements."""
- root = NaElement('root')
- child = ['e1', 'e2']
- root.translate_struct(child)
- self.assertEqual(len(root.get_children()), 2)
- self.assertIsNone(root.get_child_content('e1'))
- self.assertIsNone(root.get_child_content('e2'))
-
- def test_translate_struct_tuple(self):
- """Tests if tuple gets properly converted to NaElements."""
- root = NaElement('root')
- child = ('e1', 'e2')
- root.translate_struct(child)
- self.assertEqual(len(root.get_children()), 2)
- self.assertIsNone(root.get_child_content('e1'))
- self.assertIsNone(root.get_child_content('e2'))
-
- def test_translate_invalid_struct(self):
- """Tests if invalid data structure raises exception."""
- root = NaElement('root')
- child = 'random child element'
- self.assertRaises(ValueError, root.translate_struct, child)
-
- def test_setter_builtin_types(self):
- """Tests str, int, float get converted to NaElement."""
- root = NaElement('root')
- root['e1'] = 'v1'
- root['e2'] = 1
- root['e3'] = 2.0
- root['e4'] = 8l
- self.assertEqual(len(root.get_children()), 4)
- self.assertEqual(root.get_child_content('e1'), 'v1')
- self.assertEqual(root.get_child_content('e2'), '1')
- self.assertEqual(root.get_child_content('e3'), '2.0')
- self.assertEqual(root.get_child_content('e4'), '8')
-
- def test_setter_na_element(self):
- """Tests na_element gets appended as child."""
- root = NaElement('root')
- root['e1'] = NaElement('nested')
- self.assertEqual(len(root.get_children()), 1)
- e1 = root.get_child_by_name('e1')
- self.assertIsInstance(e1, NaElement)
- self.assertIsInstance(e1.get_child_by_name('nested'), NaElement)
-
- def test_setter_child_dict(self):
- """Tests dict is appended as child to root."""
- root = NaElement('root')
- root['d'] = {'e1': 'v1', 'e2': 'v2'}
- e1 = root.get_child_by_name('d')
- self.assertIsInstance(e1, NaElement)
- sub_ch = e1.get_children()
- self.assertEqual(len(sub_ch), 2)
- for c in sub_ch:
- self.assertIn(c.get_name(), ['e1', 'e2'])
- if c.get_name() == 'e1':
- self.assertEqual(c.get_content(), 'v1')
- else:
- self.assertEqual(c.get_content(), 'v2')
-
- def test_setter_child_list_tuple(self):
- """Tests list/tuple are appended as child to root."""
- root = NaElement('root')
- root['l'] = ['l1', 'l2']
- root['t'] = ('t1', 't2')
- l = root.get_child_by_name('l')
- self.assertIsInstance(l, NaElement)
- t = root.get_child_by_name('t')
- self.assertIsInstance(t, NaElement)
- for le in l.get_children():
- self.assertIn(le.get_name(), ['l1', 'l2'])
- for te in t.get_children():
- self.assertIn(te.get_name(), ['t1', 't2'])
-
- def test_setter_no_value(self):
- """Tests key with None value."""
- root = NaElement('root')
- root['k'] = None
- self.assertIsNone(root.get_child_content('k'))
-
- def test_setter_invalid_value(self):
- """Tests invalid value raises exception."""
- root = NaElement('root')
- try:
- root['k'] = NaServer('localhost')
- except Exception as e:
- if not isinstance(e, TypeError):
- self.fail(_('Error not a TypeError.'))
-
- def test_setter_invalid_key(self):
- """Tests invalid value raises exception."""
- root = NaElement('root')
- try:
- root[None] = 'value'
- except Exception as e:
- if not isinstance(e, KeyError):
- self.fail(_('Error not a KeyError.'))
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import iscsi
from cinder.volume.drivers.netapp.eseries.iscsi import LOG as driver_log
+from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_eseries_opts
-import cinder.volume.drivers.netapp.utils as na_utils
LOG = logging.getLogger(__name__)
raise exception.Invalid()
-class NetAppEseriesIscsiDriverTestCase(test.TestCase):
+class NetAppEseriesISCSIDriverTestCase(test.TestCase):
"""Test case for NetApp e-series iscsi driver."""
volume = {'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
- fake_eseries_volume_label = na_utils.convert_uuid_to_es_fmt(volume['id'])
+ fake_eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['id'])
connector = {'initiator': 'iqn.1998-01.com.vmware:localhost-28a58148'}
fake_size_gb = volume['size']
fake_eseries_pool_label = 'DDP'
def setUp(self):
- super(NetAppEseriesIscsiDriverTestCase, self).setUp()
+ super(NetAppEseriesISCSIDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
- @mock.patch.object(iscsi.Driver, '_get_volume',
+ @mock.patch.object(iscsi.NetAppEseriesISCSIDriver, '_get_volume',
mock.Mock(return_value={'volumeGroupRef': 'fake_ref'}))
def test_get_pool(self):
self.driver._objects['pools'] = [{'volumeGroupRef': 'fake_ref',
pool = self.driver.get_pool({'id': 'fake-uuid'})
self.assertEqual(pool, 'ddp1')
- @mock.patch.object(iscsi.Driver, '_get_volume',
+ @mock.patch.object(iscsi.NetAppEseriesISCSIDriver, '_get_volume',
mock.Mock(return_value={'volumeGroupRef': 'fake_ref'}))
def test_get_pool_no_pools(self):
self.driver._objects['pools'] = []
pool = self.driver.get_pool({'id': 'fake-uuid'})
self.assertEqual(pool, None)
- @mock.patch.object(iscsi.Driver, '_get_volume',
+ @mock.patch.object(iscsi.NetAppEseriesISCSIDriver, '_get_volume',
mock.Mock(return_value={'volumeGroupRef': 'fake_ref'}))
def test_get_pool_no_match(self):
self.driver._objects['pools'] = [{'volumeGroupRef': 'fake_ref2',
pool = self.driver.get_pool({'id': 'fake-uuid'})
self.assertEqual(pool, None)
- @mock.patch.object(iscsi.Driver, '_create_volume', mock.Mock())
+ @mock.patch.object(iscsi.NetAppEseriesISCSIDriver, '_create_volume',
+ mock.Mock())
def test_create_volume(self):
self.driver.create_volume(self.volume)
self.driver._create_volume.assert_called_with(
import mock
import mox
from mox import IgnoreArg
-from mox import IsA
import six
-from cinder import context
from cinder import exception
from cinder.i18n import _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
-from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import common
-from cinder.volume.drivers.netapp import nfs as netapp_nfs
+from cinder.volume.drivers.netapp.dataontap.client import api
+from cinder.volume.drivers.netapp.dataontap.client import client_7mode
+from cinder.volume.drivers.netapp.dataontap.client import client_base
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap import nfs_7mode \
+ as netapp_nfs_7mode
+from cinder.volume.drivers.netapp.dataontap import nfs_base
+from cinder.volume.drivers.netapp.dataontap import nfs_cmode \
+ as netapp_nfs_cmode
+from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import utils
LOG = logging.getLogger(__name__)
+CONNECTION_INFO = {'hostname': 'fake_host',
+ 'transport_type': 'https',
+ 'port': 443,
+ 'username': 'admin',
+ 'password': 'passw0rd'}
+FAKE_VSERVER = 'fake_vserver'
+
+
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
+ configuration.netapp_server_hostname = CONNECTION_INFO['hostname']
+ configuration.netapp_transport_type = CONNECTION_INFO['transport_type']
+ configuration.netapp_server_port = CONNECTION_INFO['port']
+ configuration.netapp_login = CONNECTION_INFO['username']
+ configuration.netapp_password = CONNECTION_INFO['password']
return configuration
self.Reason = 'Sample error'
-class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
+class NetAppCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
- super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
+ super(NetAppCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
+ def _custom_setup(self):
+ kwargs = {}
+ kwargs['netapp_mode'] = 'proxy'
+ kwargs['configuration'] = create_configuration()
+ self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
+ self._driver.zapi_client = mock.Mock()
+
+ config = self._driver.configuration
+ config.netapp_vserver = FAKE_VSERVER
+
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
mox.VerifyAll()
- def _custom_setup(self):
- kwargs = {}
- kwargs['netapp_mode'] = 'proxy'
- kwargs['configuration'] = create_configuration()
- self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
-
- def test_check_for_setup_error(self):
- mox = self.mox
- drv = self._driver
- required_flags = [
- 'netapp_login',
- 'netapp_password',
- 'netapp_server_hostname']
-
- # set required flags
- for flag in required_flags:
- setattr(drv.configuration, flag, None)
- # check exception raises when flags are not set
- self.assertRaises(exception.CinderException,
- drv.check_for_setup_error)
-
- # set required flags
- for flag in required_flags:
- setattr(drv.configuration, flag, 'val')
- setattr(drv, 'ssc_enabled', False)
-
- mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
-
- netapp_nfs.NetAppDirectNfsDriver._check_flags()
- mox.ReplayAll()
-
- drv.check_for_setup_error()
-
- mox.VerifyAll()
-
- # restore initial FLAGS
- for flag in required_flags:
- delattr(drv.configuration, flag)
-
- def test_do_setup(self):
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
- mox.StubOutWithMock(drv, '_get_client')
- mox.StubOutWithMock(drv, '_do_custom_setup')
-
- netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
- drv._get_client()
- drv._do_custom_setup(IgnoreArg())
-
- mox.ReplayAll()
-
- drv.do_setup(IsA(context.RequestContext))
-
- mox.VerifyAll()
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup')
+ @mock.patch.object(client_cmode.Client, '__init__', return_value=None)
+ def test_do_setup(self, mock_client_init, mock_super_do_setup):
+ context = mock.Mock()
+ self._driver.do_setup(context)
+ mock_client_init.assert_called_once_with(vserver=FAKE_VSERVER,
+ **CONNECTION_INFO)
+ mock_super_do_setup.assert_called_once_with(context)
+
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
+ @mock.patch.object(ssc_cmode, 'check_ssc_api_permissions')
+ def test_check_for_setup_error(self, mock_ssc_api_permission_check,
+ mock_super_check_for_setup_error):
+ self._driver.zapi_client = mock.Mock()
+ self._driver.check_for_setup_error()
+ mock_ssc_api_permission_check.assert_called_once_with(
+ self._driver.zapi_client)
+ mock_super_check_for_setup_error.assert_called_once_with()
def _prepare_clone_mock(self, status):
drv = self._driver
configuration.nfs_shares_config = '/nfs'
return configuration
- @mock.patch.object(netapp_nfs.NetAppNFSDriver, 'do_setup')
- def test_do_setup_all_default(self, mock_set_up):
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
+ def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('80', driver._client.get_port())
- self.assertEqual('http', driver._client.get_transport_type())
-
- @mock.patch.object(netapp_nfs.NetAppNFSDriver, 'do_setup')
- def test_do_setup_http_default_port(self, mock_setup):
+ na_server = driver.zapi_client.get_connection()
+ self.assertEqual('80', na_server.get_port())
+ self.assertEqual('http', na_server.get_transport_type())
+
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
+ def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('80', driver._client.get_port())
- self.assertEqual('http', driver._client.get_transport_type())
-
- @mock.patch.object(netapp_nfs.NetAppNFSDriver, 'do_setup')
- def test_do_setup_https_default_port(self, mock_setup):
+ na_server = driver.zapi_client.get_connection()
+ self.assertEqual('80', na_server.get_port())
+ self.assertEqual('http', na_server.get_transport_type())
+
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
+ def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('443', driver._client.get_port())
- self.assertEqual('https', driver._client.get_transport_type())
-
- @mock.patch.object(netapp_nfs.NetAppNFSDriver, 'do_setup')
- def test_do_setup_http_non_default_port(self, mock_setup):
+ na_server = driver.zapi_client.get_connection()
+ self.assertEqual('443', na_server.get_port())
+ self.assertEqual('https', na_server.get_transport_type())
+
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
+ def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('81', driver._client.get_port())
- self.assertEqual('http', driver._client.get_transport_type())
-
- @mock.patch.object(netapp_nfs.NetAppNFSDriver, 'do_setup')
- def test_do_setup_https_non_default_port(self, mock_setup):
+ na_server = driver.zapi_client.get_connection()
+ self.assertEqual('81', na_server.get_port())
+ self.assertEqual('http', na_server.get_transport_type())
+
+ @mock.patch.object(client_base.Client, 'get_ontapi_version',
+ mock.Mock(return_value=(1, 20)))
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
+ def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
- driver._do_custom_setup = mock.Mock()
driver.do_setup(context='')
- self.assertEqual('446', driver._client.get_port())
- self.assertEqual('https', driver._client.get_transport_type())
+ na_server = driver.zapi_client.get_connection()
+ self.assertEqual('446', na_server.get_port())
+ self.assertEqual('https', na_server.get_transport_type())
-class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
+class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
- super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
+ super(NetAppCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
- self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
+ self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
+ self._driver.zapi_client = mock.Mock()
+ @mock.patch.object(netapp_nfs_cmode, 'get_volume_extra_specs')
@mock.patch.object(utils, 'LOG', mock.Mock())
- @mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
self.assertEqual(0, utils.LOG.warning.call_count)
@mock.patch.object(utils, 'LOG', mock.Mock())
- @mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
- def test_create_volume_obsolete_extra_spec(self, mock_volume_extra_specs):
+ def test_create_volume_obsolete_extra_spec(self):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:raid_type': 'raid4'}
+ mock_volume_extra_specs = mock.Mock()
+ self.mock_object(netapp_nfs_cmode,
+ 'get_volume_extra_specs',
+ mock_volume_extra_specs)
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
utils.LOG.warning.assert_called_once_with(warn_msg)
@mock.patch.object(utils, 'LOG', mock.Mock())
- @mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
- def test_create_volume_deprecated_extra_spec(self,
- mock_volume_extra_specs):
+ def test_create_volume_deprecated_extra_spec(self):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp_thick_provisioned': 'true'}
- mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
+ mock_volume_extra_specs = mock.Mock()
+ self.mock_object(netapp_nfs_cmode,
+ 'get_volume_extra_specs',
+ mock_volume_extra_specs)
+ mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_do_create_volume'):
self._driver.create_volume(FakeVolume(host, 1))
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
- @mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
+ @mock.patch.object(netapp_nfs_cmode, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
- drv._client = mock.Mock()
- drv._client.get_api_version = mock.Mock(return_value=(1, 20))
+ drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
- drv._client = mock.Mock()
- drv._client.get_api_version = mock.Mock(return_value=(1, 20))
+ drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
- netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
+ nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
- netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
+ nfs_base.NetAppNfsDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
drv._post_clone_image.assert_called_with(volume)
-class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
+class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
+
def _custom_setup(self):
- self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
+ self._driver = netapp_nfs_7mode.NetApp7modeNfsDriver(
configuration=create_configuration())
+ self._driver.zapi_client = mock.Mock()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
- def test_check_for_setup_error_version(self):
- drv = self._driver
- drv._client = api.NaServer("127.0.0.1")
-
- # check exception raises when version not found
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup')
+ @mock.patch.object(client_7mode.Client, '__init__', return_value=None)
+ def test_do_setup(self, mock_client_init, mock_super_do_setup):
+ context = mock.Mock()
+ self._driver.do_setup(context)
+ mock_client_init.assert_called_once_with(**CONNECTION_INFO)
+ mock_super_do_setup.assert_called_once_with(context)
+
+ @mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
+ def test_check_for_setup_error(self, mock_super_check_for_setup_error):
+ self._driver.zapi_client.get_ontapi_version.return_value = (1, 20)
+ self.assertIsNone(self._driver.check_for_setup_error())
+ mock_super_check_for_setup_error.assert_called_once_with()
+
+ def test_check_for_setup_error_old_version(self):
+ self._driver.zapi_client.get_ontapi_version.return_value = (1, 8)
self.assertRaises(exception.VolumeBackendAPIException,
- drv.check_for_setup_error)
+ self._driver.check_for_setup_error)
- drv._client.set_api_version(1, 8)
-
- # check exception raises when not supported version
+ def test_check_for_setup_error_no_version(self):
+ self._driver.zapi_client.get_ontapi_version.return_value = None
self.assertRaises(exception.VolumeBackendAPIException,
- drv.check_for_setup_error)
-
- def test_check_for_setup_error(self):
- mox = self.mox
- drv = self._driver
- drv._client = api.NaServer("127.0.0.1")
- drv._client.set_api_version(1, 9)
- required_flags = [
- 'netapp_transport_type',
- 'netapp_login',
- 'netapp_password',
- 'netapp_server_hostname',
- 'netapp_server_port']
-
- # set required flags
- for flag in required_flags:
- setattr(drv.configuration, flag, None)
- # check exception raises when flags are not set
- self.assertRaises(exception.CinderException,
- drv.check_for_setup_error)
-
- # set required flags
- for flag in required_flags:
- setattr(drv.configuration, flag, 'val')
-
- mox.ReplayAll()
-
- drv.check_for_setup_error()
-
- mox.VerifyAll()
-
- # restore initial FLAGS
- for flag in required_flags:
- delattr(drv.configuration, flag)
-
- def test_do_setup(self):
- mox = self.mox
- drv = self._driver
- mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
- mox.StubOutWithMock(drv, '_get_client')
- mox.StubOutWithMock(drv, '_do_custom_setup')
- netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
- drv._get_client()
- drv._do_custom_setup(IgnoreArg())
-
- mox.ReplayAll()
-
- drv.do_setup(IsA(context.RequestContext))
-
- mox.VerifyAll()
+ self._driver.check_for_setup_error)
def _prepare_clone_mock(self, status):
drv = self._driver
self.assertEqual(pool, 'fake-share')
def _set_config(self, configuration):
- super(NetappDirect7modeNfsDriverTestCase, self)._set_config(
+ super(NetApp7modeNfsDriverTestCase, self)._set_config(
configuration)
configuration.netapp_storage_family = 'ontap_7mode'
return configuration
-
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
from cinder import exception
from cinder import test
-from cinder.volume.drivers.netapp import api
-from cinder.volume.drivers.netapp import ssc_utils
+from cinder.volume.drivers.netapp.dataontap.client import api
+from cinder.volume.drivers.netapp.dataontap import ssc_cmode
class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def createNetAppVolume(**kwargs):
- vol = ssc_utils.NetAppVolume(kwargs['name'], kwargs['vs'])
+ vol = ssc_cmode.NetAppVolume(kwargs['name'], kwargs['vs'])
vol.state['vserver_root'] = kwargs.get('vs_root')
vol.state['status'] = kwargs.get('status')
vol.state['junction_active'] = kwargs.get('junc_active')
'rel_type': 'data_protection',
'mirr_state': 'broken'}]}
- self.mox.StubOutWithMock(ssc_utils, 'query_cluster_vols_for_ssc')
- self.mox.StubOutWithMock(ssc_utils, 'get_sis_vol_dict')
- self.mox.StubOutWithMock(ssc_utils, 'get_snapmirror_vol_dict')
- self.mox.StubOutWithMock(ssc_utils, 'query_aggr_options')
- self.mox.StubOutWithMock(ssc_utils, 'query_aggr_storage_disk')
- ssc_utils.query_cluster_vols_for_ssc(
+ self.mox.StubOutWithMock(ssc_cmode, 'query_cluster_vols_for_ssc')
+ self.mox.StubOutWithMock(ssc_cmode, 'get_sis_vol_dict')
+ self.mox.StubOutWithMock(ssc_cmode, 'get_snapmirror_vol_dict')
+ self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_options')
+ self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_storage_disk')
+ ssc_cmode.query_cluster_vols_for_ssc(
na_server, vserver, None).AndReturn(test_vols)
- ssc_utils.get_sis_vol_dict(na_server, vserver, None).AndReturn(sis)
- ssc_utils.get_snapmirror_vol_dict(na_server, vserver, None).AndReturn(
+ ssc_cmode.get_sis_vol_dict(na_server, vserver, None).AndReturn(sis)
+ ssc_cmode.get_snapmirror_vol_dict(na_server, vserver, None).AndReturn(
mirrored)
raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
- ssc_utils.query_aggr_options(
+ ssc_cmode.query_aggr_options(
na_server, IgnoreArg()).AndReturn(raiddp)
- ssc_utils.query_aggr_storage_disk(
+ ssc_cmode.query_aggr_storage_disk(
na_server, IgnoreArg()).AndReturn('SSD')
raid4 = {'ha_policy': 'cfo', 'raid_type': 'raid4'}
- ssc_utils.query_aggr_options(
+ ssc_cmode.query_aggr_options(
na_server, IgnoreArg()).AndReturn(raid4)
- ssc_utils.query_aggr_storage_disk(
+ ssc_cmode.query_aggr_storage_disk(
na_server, IgnoreArg()).AndReturn('SAS')
self.mox.ReplayAll()
- res_vols = ssc_utils.get_cluster_vols_with_ssc(
+ res_vols = ssc_cmode.get_cluster_vols_with_ssc(
na_server, vserver, volume=None)
self.mox.VerifyAll()
'rel_type': 'data_protection',
'mirr_state': 'snapmirrored'}]}
- self.mox.StubOutWithMock(ssc_utils, 'query_cluster_vols_for_ssc')
- self.mox.StubOutWithMock(ssc_utils, 'get_sis_vol_dict')
- self.mox.StubOutWithMock(ssc_utils, 'get_snapmirror_vol_dict')
- self.mox.StubOutWithMock(ssc_utils, 'query_aggr_options')
- self.mox.StubOutWithMock(ssc_utils, 'query_aggr_storage_disk')
- ssc_utils.query_cluster_vols_for_ssc(
+ self.mox.StubOutWithMock(ssc_cmode, 'query_cluster_vols_for_ssc')
+ self.mox.StubOutWithMock(ssc_cmode, 'get_sis_vol_dict')
+ self.mox.StubOutWithMock(ssc_cmode, 'get_snapmirror_vol_dict')
+ self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_options')
+ self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_storage_disk')
+ ssc_cmode.query_cluster_vols_for_ssc(
na_server, vserver, 'vola').AndReturn(test_vols)
- ssc_utils.get_sis_vol_dict(
+ ssc_cmode.get_sis_vol_dict(
na_server, vserver, 'vola').AndReturn(sis)
- ssc_utils.get_snapmirror_vol_dict(
+ ssc_cmode.get_snapmirror_vol_dict(
na_server, vserver, 'vola').AndReturn(mirrored)
raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
- ssc_utils.query_aggr_options(
+ ssc_cmode.query_aggr_options(
na_server, 'aggr1').AndReturn(raiddp)
- ssc_utils.query_aggr_storage_disk(na_server, 'aggr1').AndReturn('SSD')
+ ssc_cmode.query_aggr_storage_disk(na_server, 'aggr1').AndReturn('SSD')
self.mox.ReplayAll()
- res_vols = ssc_utils.get_cluster_vols_with_ssc(
+ res_vols = ssc_cmode.get_cluster_vols_with_ssc(
na_server, vserver, volume='vola')
self.mox.VerifyAll()
test_vols = set(
[self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
- self.mox.StubOutWithMock(ssc_utils, 'get_cluster_vols_with_ssc')
- ssc_utils.get_cluster_vols_with_ssc(
+ self.mox.StubOutWithMock(ssc_cmode, 'get_cluster_vols_with_ssc')
+ ssc_cmode.get_cluster_vols_with_ssc(
na_server, vserver).AndReturn(test_vols)
self.mox.ReplayAll()
- res_map = ssc_utils.get_cluster_ssc(na_server, vserver)
+ res_map = ssc_cmode.get_cluster_ssc(na_server, vserver)
self.mox.VerifyAll()
self.assertEqual(len(res_map['mirrored']), 1)
for type in test_map.keys():
# type
extra_specs = {test_map[type][0]: 'true'}
- res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
+ res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(res), len(ssc_map[type]))
# opposite type
extra_specs = {test_map[type][1]: 'true'}
- res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
+ res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(res), len(ssc_map['all'] - ssc_map[type]))
# both types
extra_specs =\
{test_map[type][0]: 'true', test_map[type][1]: 'true'}
- res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
+ res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(res), len(ssc_map['all']))
def test_vols_for_optional_specs(self):
extra_specs =\
{'netapp_dedup': 'true',
'netapp:raid_type': 'raid4', 'netapp:disk_type': 'SSD'}
- res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
+ res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(res), 1)
def test_query_cl_vols_for_ssc(self):
na_server = api.NaServer('127.0.0.1')
na_server.set_api_version(1, 15)
- vols = ssc_utils.query_cluster_vols_for_ssc(na_server, 'Openstack')
+ vols = ssc_cmode.query_cluster_vols_for_ssc(na_server, 'Openstack')
self.assertEqual(len(vols), 2)
for vol in vols:
if vol.id['name'] != 'iscsi' or vol.id['name'] != 'nfsvol':
def test_query_aggr_options(self):
na_server = api.NaServer('127.0.0.1')
- aggr_attribs = ssc_utils.query_aggr_options(na_server, 'aggr0')
+ aggr_attribs = ssc_cmode.query_aggr_options(na_server, 'aggr0')
if aggr_attribs:
self.assertEqual(aggr_attribs['ha_policy'], 'cfo')
self.assertEqual(aggr_attribs['raid_type'], 'raid_dp')
def test_query_aggr_storage_disk(self):
na_server = api.NaServer('127.0.0.1')
- eff_disk_type = ssc_utils.query_aggr_storage_disk(na_server, 'aggr0')
+ eff_disk_type = ssc_cmode.query_aggr_storage_disk(na_server, 'aggr0')
self.assertEqual(eff_disk_type, 'SATA')
+++ /dev/null
-# Copyright 2014 Tom Barron. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import platform
-
-import mock
-from oslo.concurrency import processutils as putils
-
-from cinder import test
-from cinder import version
-from cinder.volume.drivers.netapp import utils as na_utils
-
-
-class OpenstackInfoTestCase(test.TestCase):
-
- UNKNOWN_VERSION = 'unknown version'
- UNKNOWN_RELEASE = 'unknown release'
- UNKNOWN_VENDOR = 'unknown vendor'
- UNKNOWN_PLATFORM = 'unknown platform'
- VERSION_STRING_RET_VAL = 'fake_version_1'
- RELEASE_STRING_RET_VAL = 'fake_release_1'
- PLATFORM_RET_VAL = 'fake_platform_1'
- VERSION_INFO_VERSION = 'fake_version_2'
- VERSION_INFO_RELEASE = 'fake_release_2'
- RPM_INFO_VERSION = 'fake_version_3'
- RPM_INFO_RELEASE = 'fake_release_3'
- RPM_INFO_VENDOR = 'fake vendor 3'
- PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '')
- NO_PKG_FOUND = ('', 'whatever')
- PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '')
- DEB_RLS = 'upstream_version-debian_revision'
- DEB_VENDOR = 'debian_revision'
-
- def setUp(self):
- super(OpenstackInfoTestCase, self).setUp()
-
- def test_openstack_info_init(self):
- info = na_utils.OpenStackInfo()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(version.version_info, 'version_string',
- mock.Mock(return_value=VERSION_STRING_RET_VAL))
- def test_update_version_from_version_string(self):
- info = na_utils.OpenStackInfo()
- info._update_version_from_version_string()
-
- self.assertEqual(self.VERSION_STRING_RET_VAL, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(version.version_info, 'version_string',
- mock.Mock(side_effect=Exception))
- def test_xcption_in_update_version_from_version_string(self):
- info = na_utils.OpenStackInfo()
- info._update_version_from_version_string()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(version.version_info, 'release_string',
- mock.Mock(return_value=RELEASE_STRING_RET_VAL))
- def test_update_release_from_release_string(self):
- info = na_utils.OpenStackInfo()
- info._update_release_from_release_string()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(version.version_info, 'release_string',
- mock.Mock(side_effect=Exception))
- def test_xcption_in_update_release_from_release_string(self):
- info = na_utils.OpenStackInfo()
- info._update_release_from_release_string()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(platform, 'platform',
- mock.Mock(return_value=PLATFORM_RET_VAL))
- def test_update_platform(self):
- info = na_utils.OpenStackInfo()
- info._update_platform()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.PLATFORM_RET_VAL, info._platform)
-
- @mock.patch.object(platform, 'platform',
- mock.Mock(side_effect=Exception))
- def test_xcption_in_update_platform(self):
- info = na_utils.OpenStackInfo()
- info._update_platform()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version',
- mock.Mock(return_value=VERSION_INFO_VERSION))
- @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release',
- mock.Mock(return_value=VERSION_INFO_RELEASE))
- def test_update_info_from_version_info(self):
- info = na_utils.OpenStackInfo()
- info._update_info_from_version_info()
-
- self.assertEqual(self.VERSION_INFO_VERSION, info._version)
- self.assertEqual(self.VERSION_INFO_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version',
- mock.Mock(return_value=''))
- @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release',
- mock.Mock(return_value=None))
- def test_no_info_from_version_info(self):
- info = na_utils.OpenStackInfo()
- info._update_info_from_version_info()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version',
- mock.Mock(return_value=VERSION_INFO_VERSION))
- @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release',
- mock.Mock(side_effect=Exception))
- def test_xcption_in_info_from_version_info(self):
- info = na_utils.OpenStackInfo()
- info._update_info_from_version_info()
-
- self.assertEqual(self.VERSION_INFO_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
-
- @mock.patch.object(putils, 'execute',
- mock.Mock(return_value=PUTILS_RPM_RET_VAL))
- def test_update_info_from_rpm(self):
- info = na_utils.OpenStackInfo()
- found_package = info._update_info_from_rpm()
-
- self.assertEqual(self.RPM_INFO_VERSION, info._version)
- self.assertEqual(self.RPM_INFO_RELEASE, info._release)
- self.assertEqual(self.RPM_INFO_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
- self.assertTrue(found_package)
-
- @mock.patch.object(putils, 'execute',
- mock.Mock(return_value=NO_PKG_FOUND))
- def test_update_info_from_rpm_no_pkg_found(self):
- info = na_utils.OpenStackInfo()
- found_package = info._update_info_from_rpm()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
- self.assertFalse(found_package)
-
- @mock.patch.object(putils, 'execute',
- mock.Mock(side_effect=Exception))
- def test_xcption_in_update_info_from_rpm(self):
- info = na_utils.OpenStackInfo()
- found_package = info._update_info_from_rpm()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
- self.assertFalse(found_package)
-
- @mock.patch.object(putils, 'execute',
- mock.Mock(return_value=PUTILS_DPKG_RET_VAL))
- def test_update_info_from_dpkg(self):
- info = na_utils.OpenStackInfo()
- found_package = info._update_info_from_dpkg()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.DEB_RLS, info._release)
- self.assertEqual(self.DEB_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
- self.assertTrue(found_package)
-
- @mock.patch.object(putils, 'execute',
- mock.Mock(return_value=NO_PKG_FOUND))
- def test_update_info_from_dpkg_no_pkg_found(self):
- info = na_utils.OpenStackInfo()
- found_package = info._update_info_from_dpkg()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
- self.assertFalse(found_package)
-
- @mock.patch.object(putils, 'execute',
- mock.Mock(side_effect=Exception))
- def test_xcption_in_update_info_from_dpkg(self):
- info = na_utils.OpenStackInfo()
- found_package = info._update_info_from_dpkg()
-
- self.assertEqual(self.UNKNOWN_VERSION, info._version)
- self.assertEqual(self.UNKNOWN_RELEASE, info._release)
- self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
- self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
- self.assertFalse(found_package)
-
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_version_from_version_string', mock.Mock())
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_release_from_release_string', mock.Mock())
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_platform', mock.Mock())
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_info_from_version_info', mock.Mock())
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_info_from_rpm', mock.Mock(return_value=True))
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_info_from_dpkg')
- def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg):
- info = na_utils.OpenStackInfo()
- info._update_openstack_info()
-
- self.assertFalse(mock_updt_from_dpkg.called)
-
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_version_from_version_string', mock.Mock())
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_release_from_release_string', mock.Mock())
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_platform', mock.Mock())
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_info_from_version_info', mock.Mock())
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_info_from_rpm', mock.Mock(return_value=False))
- @mock.patch.object(na_utils.OpenStackInfo,
- '_update_info_from_dpkg')
- def test_update_openstack_info_rpm_pkg_not_found(self,
- mock_updt_from_dpkg):
- info = na_utils.OpenStackInfo()
- info._update_openstack_info()
-
- self.assertTrue(mock_updt_from_dpkg.called)
--- /dev/null
+# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Bob Callaway. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests for NetApp API layer
+"""
+
+
+from cinder.i18n import _
+from cinder import test
+from cinder.volume.drivers.netapp.dataontap.client.api import NaElement
+from cinder.volume.drivers.netapp.dataontap.client.api import NaServer
+
+
+class NetAppApiElementTransTests(test.TestCase):
+ """Test case for NetApp API element translations."""
+
+ def setUp(self):
+ super(NetAppApiElementTransTests, self).setUp()
+
+ def test_translate_struct_dict_unique_key(self):
+ """Tests if dict gets properly converted to NaElements."""
+ root = NaElement('root')
+ child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}
+ root.translate_struct(child)
+ self.assertEqual(len(root.get_children()), 3)
+ self.assertEqual(root.get_child_content('e1'), 'v1')
+ self.assertEqual(root.get_child_content('e2'), 'v2')
+ self.assertEqual(root.get_child_content('e3'), 'v3')
+
+ def test_translate_struct_dict_nonunique_key(self):
+ """Tests if list/dict gets properly converted to NaElements."""
+ root = NaElement('root')
+ child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]
+ root.translate_struct(child)
+ self.assertEqual(len(root.get_children()), 3)
+ children = root.get_children()
+ for c in children:
+ if c.get_name() == 'e1':
+ self.assertIn(c.get_content(), ['v1', 'v3'])
+ else:
+ self.assertEqual(c.get_content(), 'v2')
+
+ def test_translate_struct_list(self):
+ """Tests if list gets properly converted to NaElements."""
+ root = NaElement('root')
+ child = ['e1', 'e2']
+ root.translate_struct(child)
+ self.assertEqual(len(root.get_children()), 2)
+ self.assertIsNone(root.get_child_content('e1'))
+ self.assertIsNone(root.get_child_content('e2'))
+
+ def test_translate_struct_tuple(self):
+ """Tests if tuple gets properly converted to NaElements."""
+ root = NaElement('root')
+ child = ('e1', 'e2')
+ root.translate_struct(child)
+ self.assertEqual(len(root.get_children()), 2)
+ self.assertIsNone(root.get_child_content('e1'))
+ self.assertIsNone(root.get_child_content('e2'))
+
+ def test_translate_invalid_struct(self):
+ """Tests if invalid data structure raises exception."""
+ root = NaElement('root')
+ child = 'random child element'
+ self.assertRaises(ValueError, root.translate_struct, child)
+
+ def test_setter_builtin_types(self):
+ """Tests str, int, float get converted to NaElement."""
+ root = NaElement('root')
+ root['e1'] = 'v1'
+ root['e2'] = 1
+ root['e3'] = 2.0
+ root['e4'] = 8l
+ self.assertEqual(len(root.get_children()), 4)
+ self.assertEqual(root.get_child_content('e1'), 'v1')
+ self.assertEqual(root.get_child_content('e2'), '1')
+ self.assertEqual(root.get_child_content('e3'), '2.0')
+ self.assertEqual(root.get_child_content('e4'), '8')
+
+ def test_setter_na_element(self):
+ """Tests na_element gets appended as child."""
+ root = NaElement('root')
+ root['e1'] = NaElement('nested')
+ self.assertEqual(len(root.get_children()), 1)
+ e1 = root.get_child_by_name('e1')
+ self.assertIsInstance(e1, NaElement)
+ self.assertIsInstance(e1.get_child_by_name('nested'), NaElement)
+
+ def test_setter_child_dict(self):
+ """Tests dict is appended as child to root."""
+ root = NaElement('root')
+ root['d'] = {'e1': 'v1', 'e2': 'v2'}
+ e1 = root.get_child_by_name('d')
+ self.assertIsInstance(e1, NaElement)
+ sub_ch = e1.get_children()
+ self.assertEqual(len(sub_ch), 2)
+ for c in sub_ch:
+ self.assertIn(c.get_name(), ['e1', 'e2'])
+ if c.get_name() == 'e1':
+ self.assertEqual(c.get_content(), 'v1')
+ else:
+ self.assertEqual(c.get_content(), 'v2')
+
+ def test_setter_child_list_tuple(self):
+ """Tests list/tuple are appended as child to root."""
+ root = NaElement('root')
+ root['l'] = ['l1', 'l2']
+ root['t'] = ('t1', 't2')
+ l = root.get_child_by_name('l')
+ self.assertIsInstance(l, NaElement)
+ t = root.get_child_by_name('t')
+ self.assertIsInstance(t, NaElement)
+ for le in l.get_children():
+ self.assertIn(le.get_name(), ['l1', 'l2'])
+ for te in t.get_children():
+ self.assertIn(te.get_name(), ['t1', 't2'])
+
+ def test_setter_no_value(self):
+ """Tests key with None value."""
+ root = NaElement('root')
+ root['k'] = None
+ self.assertIsNone(root.get_child_content('k'))
+
+ def test_setter_invalid_value(self):
+ """Tests invalid value raises exception."""
+ root = NaElement('root')
+ try:
+ root['k'] = NaServer('localhost')
+ except Exception as e:
+ if not isinstance(e, TypeError):
+ self.fail(_('Error not a TypeError.'))
+
+ def test_setter_invalid_key(self):
+ """Tests invalid value raises exception."""
+ root = NaElement('root')
+ try:
+ root[None] = 'value'
+ except Exception as e:
+ if not isinstance(e, KeyError):
+ self.fail(_('Error not a KeyError.'))
-# Copyright (c) - 2014, Alex Meade. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
import six
from cinder import test
-from cinder.volume.drivers.netapp import api as netapp_api
-from cinder.volume.drivers.netapp.client import seven_mode
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client import client_7mode
+
+
+CONNECTION_INFO = {'hostname': 'hostname',
+ 'transport_type': 'https',
+ 'port': 443,
+ 'username': 'admin',
+ 'password': 'passw0rd'}
class NetApp7modeClientTestCase(test.TestCase):
def setUp(self):
super(NetApp7modeClientTestCase, self).setUp()
- self.connection = mock.MagicMock()
+
self.fake_volume = six.text_type(uuid.uuid4())
- self.client = seven_mode.Client(self.connection, [self.fake_volume])
+
+ with mock.patch.object(client_7mode.Client,
+ 'get_ontapi_version',
+ return_value=(1, 20)):
+ self.client = client_7mode.Client([self.fake_volume],
+ **CONNECTION_INFO)
+
+ self.client.connection = mock.MagicMock()
+ self.connection = self.client.connection
self.fake_lun = six.text_type(uuid.uuid4())
def tearDown(self):
-# Copyright (c) - 2014, Alex Meade. All rights reserved.
-# All Rights Reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
import six
from cinder import test
-from cinder.volume.drivers.netapp import api as netapp_api
-from cinder.volume.drivers.netapp.client import base
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client import client_base
+
+
+CONNECTION_INFO = {'hostname': 'hostname',
+ 'transport_type': 'https',
+ 'port': 443,
+ 'username': 'admin',
+ 'password': 'passw0rd'}
class NetAppBaseClientTestCase(test.TestCase):
def setUp(self):
super(NetAppBaseClientTestCase, self).setUp()
- self.connection = mock.MagicMock()
- self.client = base.Client(self.connection)
+ self.client = client_base.Client(**CONNECTION_INFO)
+ self.client.connection = mock.MagicMock()
+ self.connection = self.client.connection
self.fake_volume = six.text_type(uuid.uuid4())
self.fake_lun = six.text_type(uuid.uuid4())
self.fake_size = '1024'
- self.fake_metadata = {
- 'OsType': 'linux',
- 'SpaceReserved': 'true',
- }
+ self.fake_metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
def tearDown(self):
super(NetAppBaseClientTestCase, self).tearDown()
</results>"""))
self.connection.invoke_successfully.return_value = version_response
- major, minor = self.client.get_ontapi_version()
+ major, minor = self.client.get_ontapi_version(cached=False)
self.assertEqual('1', major)
self.assertEqual('19', minor)
+ def test_get_ontapi_version_cached(self):
+
+ self.connection.get_api_version.return_value = (1, 20)
+ major, minor = self.client.get_ontapi_version()
+ self.assertEqual(1, self.connection.get_api_version.call_count)
+ self.assertEqual(1, major)
+ self.assertEqual(20, minor)
+
+ def test_check_is_naelement(self):
+
+ element = netapp_api.NaElement('name')
+ self.assertIsNone(self.client.check_is_naelement(element))
+ self.assertRaises(ValueError, self.client.check_is_naelement, None)
+
def test_create_lun(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
-# Copyright (c) - 2014, Alex Meade. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
from cinder import exception
from cinder import test
-from cinder.volume.drivers.netapp import api as netapp_api
-from cinder.volume.drivers.netapp.client import cmode
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+
+
+CONNECTION_INFO = {'hostname': 'hostname',
+ 'transport_type': 'https',
+ 'port': 443,
+ 'username': 'admin',
+ 'password': 'passw0rd',
+ 'vserver': 'fake_vserver'}
class NetAppCmodeClientTestCase(test.TestCase):
def setUp(self):
super(NetAppCmodeClientTestCase, self).setUp()
- self.connection = mock.MagicMock()
- self.vserver = 'fake_vserver'
- self.client = cmode.Client(self.connection, self.vserver)
+
+ with mock.patch.object(client_cmode.Client,
+ 'get_ontapi_version',
+ return_value=(1, 20)):
+ self.client = client_cmode.Client(**CONNECTION_INFO)
+
+ self.client.connection = mock.MagicMock()
+ self.connection = self.client.connection
+ self.vserver = CONNECTION_INFO['vserver']
self.fake_volume = six.text_type(uuid.uuid4())
self.fake_lun = six.text_type(uuid.uuid4())
--- /dev/null
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Mock unit tests for the NetApp block storage 7-mode library
+"""
+
+import uuid
+
+import mock
+import six
+
+from cinder import test
+from cinder.volume.drivers.netapp.dataontap import block_7mode
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+
+FAKE_VOLUME = six.text_type(uuid.uuid4())
+FAKE_LUN = six.text_type(uuid.uuid4())
+FAKE_SIZE = '1024'
+FAKE_METADATA = {'OsType': 'linux', 'SpaceReserved': 'true'}
+
+
+class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
+ """Test case for NetApp's 7-Mode iSCSI library."""
+
+ def setUp(self):
+ super(NetAppBlockStorage7modeLibraryTestCase, self).setUp()
+
+ kwargs = {'configuration': mock.Mock()}
+ self.library = block_7mode.NetAppBlockStorage7modeLibrary('driver',
+ 'protocol',
+ **kwargs)
+
+ self.library.zapi_client = mock.Mock()
+ self.library.vfiler = mock.Mock()
+
+ def tearDown(self):
+ super(NetAppBlockStorage7modeLibraryTestCase, self).tearDown()
+
+ def test_clone_lun_zero_block_count(self):
+ """Test for when clone lun is not passed a block count."""
+
+ lun = netapp_api.NaElement.create_node_with_children(
+ 'lun-info',
+ **{'alignment': 'indeterminate',
+ 'block-size': '512',
+ 'comment': '',
+ 'creation-timestamp': '1354536362',
+ 'is-space-alloc-enabled': 'false',
+ 'is-space-reservation-enabled': 'true',
+ 'mapped': 'false',
+ 'multiprotocol-type': 'linux',
+ 'online': 'true',
+ 'path': '/vol/fakeLUN/fakeLUN',
+ 'prefix-size': '0',
+ 'qtree': '',
+ 'read-only': 'false',
+ 'serial-number': '2FfGI$APyN68',
+ 'share-state': 'none',
+ 'size': '20971520',
+ 'size-used': '0',
+ 'staging': 'false',
+ 'suffix-size': '0',
+ 'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
+ 'volume': 'fakeLUN',
+ 'vserver': 'fake_vserver'})
+ self.library._get_lun_attr = mock.Mock(return_value={
+ 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
+ self.library.zapi_client = mock.Mock()
+ self.library.zapi_client.get_lun_by_args.return_value = [lun]
+ self.library._add_lun_to_table = mock.Mock()
+
+ self.library._clone_lun('fakeLUN', 'newFakeLUN')
+
+ self.library.zapi_client.clone_lun.assert_called_once_with(
+ '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
+ 'newFakeLUN', 'true', block_count=0, dest_block=0, src_block=0)
+
+ @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
+ '_refresh_volume_info', mock.Mock())
+ @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
+ '_get_pool_stats', mock.Mock())
+ def test_vol_stats_calls_provide_ems(self):
+ self.library.zapi_client.provide_ems = mock.Mock()
+ self.library.get_volume_stats(refresh=True)
+ self.assertEqual(self.library.zapi_client.provide_ems.call_count, 1)
+
+ def test_create_lun(self):
+ self.library.vol_refresh_voluntary = False
+
+ self.library._create_lun(FAKE_VOLUME, FAKE_LUN, FAKE_SIZE,
+ FAKE_METADATA)
+
+ self.library.zapi_client.create_lun.assert_called_once_with(
+ FAKE_VOLUME, FAKE_LUN, FAKE_SIZE, FAKE_METADATA, None)
+
+ self.assertTrue(self.library.vol_refresh_voluntary)
--- /dev/null
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Mock unit tests for the NetApp block storage library
+"""
+
+import uuid
+
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder.volume.drivers.netapp.dataontap import block_base
+from cinder.volume.drivers.netapp import utils as na_utils
+
+
+class NetAppBlockStorageLibraryTestCase(test.TestCase):
+
+ def setUp(self):
+ super(NetAppBlockStorageLibraryTestCase, self).setUp()
+
+ kwargs = {'configuration': mock.Mock()}
+ self.library = block_base.NetAppBlockStorageLibrary('driver',
+ 'protocol',
+ **kwargs)
+ self.library.zapi_client = mock.Mock()
+ self.mock_request = mock.Mock()
+
+ def tearDown(self):
+ super(NetAppBlockStorageLibraryTestCase, self).tearDown()
+
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr',
+ mock.Mock(return_value={'Volume': 'vol1'}))
+ def test_get_pool(self):
+ pool = self.library.get_pool({'name': 'volume-fake-uuid'})
+ self.assertEqual(pool, 'vol1')
+
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr',
+ mock.Mock(return_value=None))
+ def test_get_pool_no_metadata(self):
+ pool = self.library.get_pool({'name': 'volume-fake-uuid'})
+ self.assertEqual(pool, None)
+
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr',
+ mock.Mock(return_value=dict()))
+ def test_get_pool_volume_unknown(self):
+ pool = self.library.get_pool({'name': 'volume-fake-uuid'})
+ self.assertEqual(pool, None)
+
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_create_lun',
+ mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun_handle',
+ mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_add_lun_to_table',
+ mock.Mock())
+ @mock.patch.object(na_utils, 'get_volume_extra_specs',
+ mock.Mock(return_value=None))
+ @mock.patch.object(block_base, 'LOG',
+ mock.Mock())
+ def test_create_volume(self):
+ self.library.create_volume({'name': 'lun1', 'size': 100,
+ 'id': uuid.uuid4(),
+ 'host': 'hostname@backend#vol1'})
+ self.library._create_lun.assert_called_once_with(
+ 'vol1', 'lun1', 107374182400, mock.ANY, None)
+ self.assertEqual(0, block_base.LOG.warn.call_count)
+
+ def test_create_volume_no_pool_provided_by_scheduler(self):
+ self.assertRaises(exception.InvalidHost, self.library.create_volume,
+ {'name': 'lun1', 'size': 100,
+ 'id': uuid.uuid4(),
+ 'host': 'hostname@backend'}) # missing pool
+
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun_handle', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_add_lun_to_table', mock.Mock())
+ @mock.patch.object(na_utils, 'LOG', mock.Mock())
+ @mock.patch.object(na_utils, 'get_volume_extra_specs',
+ mock.Mock(return_value={'netapp:raid_type': 'raid4'}))
+ def test_create_volume_obsolete_extra_spec(self):
+
+ self.library.create_volume({'name': 'lun1', 'size': 100,
+ 'id': uuid.uuid4(),
+ 'host': 'hostname@backend#vol1'})
+ warn_msg = 'Extra spec netapp:raid_type is obsolete. ' \
+ 'Use netapp_raid_type instead.'
+ na_utils.LOG.warn.assert_called_once_with(warn_msg)
+
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun_handle', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_add_lun_to_table', mock.Mock())
+ @mock.patch.object(na_utils, 'LOG', mock.Mock())
+ @mock.patch.object(na_utils, 'get_volume_extra_specs',
+ mock.Mock(return_value={'netapp_thick_provisioned':
+ 'true'}))
+ def test_create_volume_deprecated_extra_spec(self):
+
+ self.library.create_volume({'name': 'lun1', 'size': 100,
+ 'id': uuid.uuid4(),
+ 'host': 'hostname@backend#vol1'})
+ warn_msg = 'Extra spec netapp_thick_provisioned is deprecated. ' \
+ 'Use netapp_thin_provisioned instead.'
+ na_utils.LOG.warn.assert_called_once_with(warn_msg)
--- /dev/null
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Mock unit tests for the NetApp block storage C-mode library
+"""
+
+import uuid
+
+import mock
+import six
+
+from cinder import test
+from cinder.volume.drivers.netapp.dataontap import block_cmode
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap import ssc_cmode
+
+FAKE_VOLUME = six.text_type(uuid.uuid4())
+FAKE_LUN = six.text_type(uuid.uuid4())
+FAKE_SIZE = '1024'
+FAKE_METADATA = {'OsType': 'linux', 'SpaceReserved': 'true'}
+
+
+class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
+ """Test case for NetApp's C-Mode iSCSI library."""
+
+ def setUp(self):
+ super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp()
+
+ kwargs = {'configuration': mock.Mock()}
+ self.library = block_cmode.NetAppBlockStorageCmodeLibrary('driver',
+ 'protocol',
+ **kwargs)
+ self.library.zapi_client = mock.Mock()
+ self.library.vserver = mock.Mock()
+ self.library.ssc_vols = None
+
+ def tearDown(self):
+ super(NetAppBlockStorageCmodeLibraryTestCase, self).tearDown()
+
+ def test_clone_lun_zero_block_count(self):
+ """Test for when clone lun is not passed a block count."""
+
+ self.library._get_lun_attr = mock.Mock(return_value={'Volume':
+ 'fakeLUN'})
+ self.library.zapi_client = mock.Mock()
+ self.library.zapi_client.get_lun_by_args.return_value = [
+ mock.Mock(spec=netapp_api.NaElement)]
+ lun = netapp_api.NaElement.create_node_with_children(
+ 'lun-info',
+ **{'alignment': 'indeterminate',
+ 'block-size': '512',
+ 'comment': '',
+ 'creation-timestamp': '1354536362',
+ 'is-space-alloc-enabled': 'false',
+ 'is-space-reservation-enabled': 'true',
+ 'mapped': 'false',
+ 'multiprotocol-type': 'linux',
+ 'online': 'true',
+ 'path': '/vol/fakeLUN/lun1',
+ 'prefix-size': '0',
+ 'qtree': '',
+ 'read-only': 'false',
+ 'serial-number': '2FfGI$APyN68',
+ 'share-state': 'none',
+ 'size': '20971520',
+ 'size-used': '0',
+ 'staging': 'false',
+ 'suffix-size': '0',
+ 'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
+ 'volume': 'fakeLUN',
+ 'vserver': 'fake_vserver'})
+ self.library._get_lun_by_args = mock.Mock(return_value=[lun])
+ self.library._add_lun_to_table = mock.Mock()
+ self.library._update_stale_vols = mock.Mock()
+
+ self.library._clone_lun('fakeLUN', 'newFakeLUN')
+
+ self.library.zapi_client.clone_lun.assert_called_once_with(
+ 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'true', block_count=0,
+ dest_block=0, src_block=0)
+
+ @mock.patch.object(ssc_cmode, 'refresh_cluster_ssc', mock.Mock())
+ @mock.patch.object(block_cmode.NetAppBlockStorageCmodeLibrary,
+ '_get_pool_stats', mock.Mock())
+ def test_vol_stats_calls_provide_ems(self):
+ self.library.zapi_client.provide_ems = mock.Mock()
+ self.library.get_volume_stats(refresh=True)
+ self.assertEqual(self.library.zapi_client.provide_ems.call_count, 1)
+
+ def test_create_lun(self):
+ self.library._update_stale_vols = mock.Mock()
+
+ self.library._create_lun(FAKE_VOLUME,
+ FAKE_LUN,
+ FAKE_SIZE,
+ FAKE_METADATA)
+
+ self.library.zapi_client.create_lun.assert_called_once_with(
+ FAKE_VOLUME, FAKE_LUN, FAKE_SIZE,
+ FAKE_METADATA, None)
+
+ self.assertEqual(1, self.library._update_stale_vols.call_count)
--- /dev/null
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Mock unit tests for the NetApp E-series driver utility module
+"""
+
+import six
+
+from cinder import test
+from cinder.volume.drivers.netapp.eseries import utils
+
+
+class NetAppEseriesDriverUtilsTestCase(test.TestCase):
+
+ def test_convert_uuid_to_es_fmt(self):
+ value = 'e67e931a-b2ed-4890-938b-3acc6a517fac'
+ result = utils.convert_uuid_to_es_fmt(value)
+ self.assertEqual(result, '4Z7JGGVS5VEJBE4LHLGGUUL7VQ')
+
+ def test_convert_es_fmt_to_uuid(self):
+ value = '4Z7JGGVS5VEJBE4LHLGGUUL7VQ'
+ result = six.text_type(utils.convert_es_fmt_to_uuid(value))
+ self.assertEqual(result, 'e67e931a-b2ed-4890-938b-3acc6a517fac')
+++ /dev/null
-# Copyright (c) - 2014, Alex Meade. All rights reserved.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Mock unit tests for the NetApp iSCSI driver
-"""
-
-import uuid
-
-import mock
-import six
-
-from cinder import exception
-from cinder.i18n import _
-from cinder import test
-from cinder.tests.test_netapp import create_configuration
-import cinder.volume.drivers.netapp.api as ntapi
-import cinder.volume.drivers.netapp.iscsi as ntap_iscsi
-from cinder.volume.drivers.netapp.iscsi import NetAppDirect7modeISCSIDriver \
- as iscsi7modeDriver
-from cinder.volume.drivers.netapp.iscsi import NetAppDirectCmodeISCSIDriver \
- as iscsiCmodeDriver
-from cinder.volume.drivers.netapp.iscsi import NetAppDirectISCSIDriver \
- as iscsiDriver
-import cinder.volume.drivers.netapp.ssc_utils as ssc_utils
-import cinder.volume.drivers.netapp.utils as na_utils
-
-
-FAKE_VOLUME = six.text_type(uuid.uuid4())
-FAKE_LUN = six.text_type(uuid.uuid4())
-FAKE_SIZE = '1024'
-FAKE_METADATA = {'OsType': 'linux', 'SpaceReserved': 'true'}
-
-
-class NetAppDirectISCSIDriverTestCase(test.TestCase):
-
- def setUp(self):
- super(NetAppDirectISCSIDriverTestCase, self).setUp()
- configuration = self._set_config(create_configuration())
- self.driver = ntap_iscsi.NetAppDirectISCSIDriver(
- configuration=configuration)
- self.driver.client = mock.Mock()
- self.driver.zapi_client = mock.Mock()
- self.mock_request = mock.Mock()
-
- def _set_config(self, configuration):
- configuration.netapp_storage_protocol = 'iscsi'
- configuration.netapp_login = 'admin'
- configuration.netapp_password = 'pass'
- configuration.netapp_server_hostname = '127.0.0.1'
- configuration.netapp_transport_type = 'http'
- configuration.netapp_server_port = '80'
- return configuration
-
- def tearDown(self):
- super(NetAppDirectISCSIDriverTestCase, self).tearDown()
-
- @mock.patch.object(iscsiDriver, '_get_lun_attr',
- mock.Mock(return_value={'Volume': 'vol1'}))
- def test_get_pool(self):
- pool = self.driver.get_pool({'name': 'volume-fake-uuid'})
- self.assertEqual(pool, 'vol1')
-
- @mock.patch.object(iscsiDriver, '_get_lun_attr',
- mock.Mock(return_value=None))
- def test_get_pool_no_metadata(self):
- pool = self.driver.get_pool({'name': 'volume-fake-uuid'})
- self.assertEqual(pool, None)
-
- @mock.patch.object(iscsiDriver, '_get_lun_attr',
- mock.Mock(return_value=dict()))
- def test_get_pool_volume_unknown(self):
- pool = self.driver.get_pool({'name': 'volume-fake-uuid'})
- self.assertEqual(pool, None)
-
- @mock.patch.object(iscsiDriver, 'create_lun', mock.Mock())
- @mock.patch.object(iscsiDriver, '_create_lun_handle', mock.Mock())
- @mock.patch.object(iscsiDriver, '_add_lun_to_table', mock.Mock())
- @mock.patch.object(ntap_iscsi, 'LOG', mock.Mock())
- @mock.patch.object(ntap_iscsi, 'get_volume_extra_specs',
- mock.Mock(return_value=None))
- def test_create_volume(self):
- self.driver.create_volume({'name': 'lun1', 'size': 100,
- 'id': uuid.uuid4(),
- 'host': 'hostname@backend#vol1'})
- self.driver.create_lun.assert_called_once_with(
- 'vol1', 'lun1', 107374182400, mock.ANY, None)
- self.assertEqual(0, ntap_iscsi.LOG.warn.call_count)
-
- def test_create_volume_no_pool_provided_by_scheduler(self):
- self.assertRaises(exception.InvalidHost, self.driver.create_volume,
- {'name': 'lun1', 'size': 100,
- 'id': uuid.uuid4(),
- 'host': 'hostname@backend'}) # missing pool
-
- @mock.patch.object(iscsiDriver, 'create_lun', mock.Mock())
- @mock.patch.object(iscsiDriver, '_create_lun_handle', mock.Mock())
- @mock.patch.object(iscsiDriver, '_add_lun_to_table', mock.Mock())
- @mock.patch.object(na_utils, 'LOG', mock.Mock())
- @mock.patch.object(ntap_iscsi, 'get_volume_extra_specs',
- mock.Mock(return_value={'netapp:raid_type': 'raid4'}))
- def test_create_volume_obsolete_extra_spec(self):
-
- self.driver.create_volume({'name': 'lun1', 'size': 100,
- 'id': uuid.uuid4(),
- 'host': 'hostname@backend#vol1'})
- warn_msg = 'Extra spec netapp:raid_type is obsolete. ' \
- 'Use netapp_raid_type instead.'
- na_utils.LOG.warning.assert_called_once_with(warn_msg)
-
- @mock.patch.object(iscsiDriver, 'create_lun', mock.Mock())
- @mock.patch.object(iscsiDriver, '_create_lun_handle', mock.Mock())
- @mock.patch.object(iscsiDriver, '_add_lun_to_table', mock.Mock())
- @mock.patch.object(na_utils, 'LOG', mock.Mock())
- @mock.patch.object(ntap_iscsi, 'get_volume_extra_specs',
- mock.Mock(return_value={'netapp_thick_provisioned':
- 'true'}))
- def test_create_volume_deprecated_extra_spec(self):
-
- self.driver.create_volume({'name': 'lun1', 'size': 100,
- 'id': uuid.uuid4(),
- 'host': 'hostname@backend#vol1'})
- warn_msg = 'Extra spec netapp_thick_provisioned is deprecated. ' \
- 'Use netapp_thin_provisioned instead.'
- na_utils.LOG.warning.assert_called_once_with(warn_msg)
-
- def test_update_volume_stats_is_abstract(self):
- self.assertRaises(NotImplementedError,
- self.driver._update_volume_stats)
-
- def test_initialize_connection_no_target_details_found(self):
- fake_volume = {'name': 'mock-vol'}
- fake_connector = {'initiator': 'iqn.mock'}
- self.driver._map_lun = mock.Mock(return_value='mocked-lun-id')
- self.driver.zapi_client.get_iscsi_service_details = mock.Mock(
- return_value='mocked-iqn')
- self.driver.zapi_client.get_target_details = mock.Mock(return_value=[])
- expected = (_('No iscsi target details were found for LUN %s')
- % fake_volume['name'])
- try:
- self.driver.initialize_connection(fake_volume, fake_connector)
- except exception.VolumeBackendAPIException as exc:
- if expected not in six.text_type(exc):
- self.fail(_('Expected exception message is missing'))
- else:
- self.fail(_('VolumeBackendAPIException not raised'))
-
-
-class NetAppiSCSICModeTestCase(test.TestCase):
- """Test case for NetApp's C-Mode iSCSI driver."""
-
- def setUp(self):
- super(NetAppiSCSICModeTestCase, self).setUp()
- self.driver = ntap_iscsi.NetAppDirectCmodeISCSIDriver(
- configuration=mock.Mock())
- self.driver.client = mock.Mock()
- self.driver.zapi_client = mock.Mock()
- self.driver.vserver = mock.Mock()
- self.driver.ssc_vols = None
-
- def tearDown(self):
- super(NetAppiSCSICModeTestCase, self).tearDown()
-
- def test_clone_lun_zero_block_count(self):
- """Test for when clone lun is not passed a block count."""
-
- self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
- 'fakeLUN'})
- self.driver.zapi_client = mock.Mock()
- self.driver.zapi_client.get_lun_by_args.return_value = [
- mock.Mock(spec=ntapi.NaElement)]
- lun = ntapi.NaElement.create_node_with_children(
- 'lun-info',
- **{'alignment': 'indeterminate',
- 'block-size': '512',
- 'comment': '',
- 'creation-timestamp': '1354536362',
- 'is-space-alloc-enabled': 'false',
- 'is-space-reservation-enabled': 'true',
- 'mapped': 'false',
- 'multiprotocol-type': 'linux',
- 'online': 'true',
- 'path': '/vol/fakeLUN/lun1',
- 'prefix-size': '0',
- 'qtree': '',
- 'read-only': 'false',
- 'serial-number': '2FfGI$APyN68',
- 'share-state': 'none',
- 'size': '20971520',
- 'size-used': '0',
- 'staging': 'false',
- 'suffix-size': '0',
- 'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
- 'volume': 'fakeLUN',
- 'vserver': 'fake_vserver'})
- self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
- self.driver._add_lun_to_table = mock.Mock()
- self.driver._update_stale_vols = mock.Mock()
-
- self.driver._clone_lun('fakeLUN', 'newFakeLUN')
-
- self.driver.zapi_client.clone_lun.assert_called_once_with(
- 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'true', block_count=0,
- dest_block=0, src_block=0)
-
- @mock.patch.object(ssc_utils, 'refresh_cluster_ssc', mock.Mock())
- @mock.patch.object(iscsiCmodeDriver, '_get_pool_stats', mock.Mock())
- @mock.patch.object(na_utils, 'provide_ems', mock.Mock())
- def test_vol_stats_calls_provide_ems(self):
- self.driver.get_volume_stats(refresh=True)
- self.assertEqual(na_utils.provide_ems.call_count, 1)
-
- def test_create_lun(self):
- self.driver._update_stale_vols = mock.Mock()
-
- self.driver.create_lun(FAKE_VOLUME,
- FAKE_LUN,
- FAKE_SIZE,
- FAKE_METADATA)
-
- self.driver.zapi_client.create_lun.assert_called_once_with(
- FAKE_VOLUME, FAKE_LUN, FAKE_SIZE,
- FAKE_METADATA, None)
-
- self.assertEqual(1, self.driver._update_stale_vols.call_count)
-
-
-class NetAppiSCSI7ModeTestCase(test.TestCase):
- """Test case for NetApp's 7-Mode iSCSI driver."""
-
- def setUp(self):
- super(NetAppiSCSI7ModeTestCase, self).setUp()
- self.driver = ntap_iscsi.NetAppDirect7modeISCSIDriver(
- configuration=mock.Mock())
- self.driver.client = mock.Mock()
- self.driver.zapi_client = mock.Mock()
- self.driver.vfiler = mock.Mock()
-
- def tearDown(self):
- super(NetAppiSCSI7ModeTestCase, self).tearDown()
-
- def test_clone_lun_zero_block_count(self):
- """Test for when clone lun is not passed a block count."""
-
- lun = ntapi.NaElement.create_node_with_children(
- 'lun-info',
- **{'alignment': 'indeterminate',
- 'block-size': '512',
- 'comment': '',
- 'creation-timestamp': '1354536362',
- 'is-space-alloc-enabled': 'false',
- 'is-space-reservation-enabled': 'true',
- 'mapped': 'false',
- 'multiprotocol-type': 'linux',
- 'online': 'true',
- 'path': '/vol/fakeLUN/fakeLUN',
- 'prefix-size': '0',
- 'qtree': '',
- 'read-only': 'false',
- 'serial-number': '2FfGI$APyN68',
- 'share-state': 'none',
- 'size': '20971520',
- 'size-used': '0',
- 'staging': 'false',
- 'suffix-size': '0',
- 'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
- 'volume': 'fakeLUN',
- 'vserver': 'fake_vserver'})
- self.driver._get_lun_attr = mock.Mock(return_value={
- 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
- self.driver.zapi_client = mock.Mock()
- self.driver.zapi_client.get_lun_by_args.return_value = [lun]
- self.driver._add_lun_to_table = mock.Mock()
-
- self.driver._clone_lun('fakeLUN', 'newFakeLUN')
-
- self.driver.zapi_client.clone_lun.assert_called_once_with(
- '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
- 'newFakeLUN', 'true', block_count=0, dest_block=0, src_block=0)
-
- @mock.patch.object(iscsi7modeDriver, '_refresh_volume_info', mock.Mock())
- @mock.patch.object(iscsi7modeDriver, '_get_pool_stats', mock.Mock())
- @mock.patch.object(na_utils, 'provide_ems', mock.Mock())
- def test_vol_stats_calls_provide_ems(self):
- self.driver.get_volume_stats(refresh=True)
- self.assertEqual(na_utils.provide_ems.call_count, 1)
-
- def test_create_lun(self):
- self.driver.vol_refresh_voluntary = False
-
- self.driver.create_lun(FAKE_VOLUME,
- FAKE_LUN,
- FAKE_SIZE,
- FAKE_METADATA)
-
- self.driver.zapi_client.create_lun.assert_called_once_with(
- FAKE_VOLUME, FAKE_LUN, FAKE_SIZE,
- FAKE_METADATA, None)
-
- self.assertTrue(self.driver.vol_refresh_voluntary)
-# Copyright (c) Clinton Knight
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2014 Tom Barron. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Mock unit tests for the NetApp driver utility module
"""
-import six
+import platform
+import mock
+from oslo.concurrency import processutils as putils
+
+from cinder import exception
from cinder import test
+from cinder import version
import cinder.volume.drivers.netapp.utils as na_utils
class NetAppDriverUtilsTestCase(test.TestCase):
+ @mock.patch.object(na_utils, 'LOG', mock.Mock())
+ def test_validate_instantiation_proxy(self):
+ kwargs = {'netapp_mode': 'proxy'}
+ na_utils.validate_instantiation(**kwargs)
+ self.assertEqual(na_utils.LOG.warning.call_count, 0)
+
+ @mock.patch.object(na_utils, 'LOG', mock.Mock())
+ def test_validate_instantiation_no_proxy(self):
+ kwargs = {'netapp_mode': 'asdf'}
+ na_utils.validate_instantiation(**kwargs)
+ self.assertEqual(na_utils.LOG.warning.call_count, 1)
+
+ def test_check_flags(self):
+
+ class TestClass(object):
+ pass
+
+ required_flags = ['flag1', 'flag2']
+ configuration = TestClass()
+ setattr(configuration, 'flag1', 'value1')
+ setattr(configuration, 'flag3', 'value3')
+ self.assertRaises(exception.InvalidInput, na_utils.check_flags,
+ required_flags, configuration)
+
+ setattr(configuration, 'flag2', 'value2')
+ self.assertIsNone(na_utils.check_flags(required_flags, configuration))
+
def test_to_bool(self):
self.assertTrue(na_utils.to_bool(True))
self.assertTrue(na_utils.to_bool('true'))
self.assertFalse(na_utils.to_bool(2))
self.assertFalse(na_utils.to_bool('2'))
- def test_convert_uuid_to_es_fmt(self):
- value = 'e67e931a-b2ed-4890-938b-3acc6a517fac'
- result = na_utils.convert_uuid_to_es_fmt(value)
- self.assertEqual(result, '4Z7JGGVS5VEJBE4LHLGGUUL7VQ')
+ def test_set_safe_attr(self):
- def test_convert_es_fmt_to_uuid(self):
- value = '4Z7JGGVS5VEJBE4LHLGGUUL7VQ'
- result = six.text_type(na_utils.convert_es_fmt_to_uuid(value))
- self.assertEqual(result, 'e67e931a-b2ed-4890-938b-3acc6a517fac')
+ fake_object = mock.Mock()
+ fake_object.fake_attr = None
+
+ # test initial checks
+ self.assertFalse(na_utils.set_safe_attr(None, fake_object, None))
+ self.assertFalse(na_utils.set_safe_attr(fake_object, None, None))
+ self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr',
+ None))
+
+ # test value isn't changed if it shouldn't be and retval is False
+ fake_object.fake_attr = 'fake_value'
+ self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr',
+ 'fake_value'))
+ self.assertEqual(fake_object.fake_attr, 'fake_value')
+
+ # test value is changed if it should be and retval is True
+ self.assertTrue(na_utils.set_safe_attr(fake_object, 'fake_attr',
+ 'new_fake_value'))
+ self.assertEqual(fake_object.fake_attr, 'new_fake_value')
def test_round_down(self):
self.assertAlmostEqual(na_utils.round_down(5.567, '0.00'), 5.56)
self.assertAlmostEqual(na_utils.round_down(-5.567, '0.00'), -5.56)
self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5)
self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5)
+
+
+class OpenStackInfoTestCase(test.TestCase):
+
+ UNKNOWN_VERSION = 'unknown version'
+ UNKNOWN_RELEASE = 'unknown release'
+ UNKNOWN_VENDOR = 'unknown vendor'
+ UNKNOWN_PLATFORM = 'unknown platform'
+ VERSION_STRING_RET_VAL = 'fake_version_1'
+ RELEASE_STRING_RET_VAL = 'fake_release_1'
+ PLATFORM_RET_VAL = 'fake_platform_1'
+ VERSION_INFO_VERSION = 'fake_version_2'
+ VERSION_INFO_RELEASE = 'fake_release_2'
+ RPM_INFO_VERSION = 'fake_version_3'
+ RPM_INFO_RELEASE = 'fake_release_3'
+ RPM_INFO_VENDOR = 'fake vendor 3'
+ PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '')
+ NO_PKG_FOUND = ('', 'whatever')
+ PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '')
+ DEB_RLS = 'upstream_version-debian_revision'
+ DEB_VENDOR = 'debian_revision'
+
+ def setUp(self):
+ super(OpenStackInfoTestCase, self).setUp()
+
+ def test_openstack_info_init(self):
+ info = na_utils.OpenStackInfo()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(version.version_info, 'version_string',
+ mock.Mock(return_value=VERSION_STRING_RET_VAL))
+ def test_update_version_from_version_string(self):
+ info = na_utils.OpenStackInfo()
+ info._update_version_from_version_string()
+
+ self.assertEqual(self.VERSION_STRING_RET_VAL, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(version.version_info, 'version_string',
+ mock.Mock(side_effect=Exception))
+ def test_xcption_in_update_version_from_version_string(self):
+ info = na_utils.OpenStackInfo()
+ info._update_version_from_version_string()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(version.version_info, 'release_string',
+ mock.Mock(return_value=RELEASE_STRING_RET_VAL))
+ def test_update_release_from_release_string(self):
+ info = na_utils.OpenStackInfo()
+ info._update_release_from_release_string()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(version.version_info, 'release_string',
+ mock.Mock(side_effect=Exception))
+ def test_xcption_in_update_release_from_release_string(self):
+ info = na_utils.OpenStackInfo()
+ info._update_release_from_release_string()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(platform, 'platform',
+ mock.Mock(return_value=PLATFORM_RET_VAL))
+ def test_update_platform(self):
+ info = na_utils.OpenStackInfo()
+ info._update_platform()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.PLATFORM_RET_VAL, info._platform)
+
+ @mock.patch.object(platform, 'platform',
+ mock.Mock(side_effect=Exception))
+ def test_xcption_in_update_platform(self):
+ info = na_utils.OpenStackInfo()
+ info._update_platform()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version',
+ mock.Mock(return_value=VERSION_INFO_VERSION))
+ @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release',
+ mock.Mock(return_value=VERSION_INFO_RELEASE))
+ def test_update_info_from_version_info(self):
+ info = na_utils.OpenStackInfo()
+ info._update_info_from_version_info()
+
+ self.assertEqual(self.VERSION_INFO_VERSION, info._version)
+ self.assertEqual(self.VERSION_INFO_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version',
+ mock.Mock(return_value=''))
+ @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release',
+ mock.Mock(return_value=None))
+ def test_no_info_from_version_info(self):
+ info = na_utils.OpenStackInfo()
+ info._update_info_from_version_info()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version',
+ mock.Mock(return_value=VERSION_INFO_VERSION))
+ @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release',
+ mock.Mock(side_effect=Exception))
+ def test_xcption_in_info_from_version_info(self):
+ info = na_utils.OpenStackInfo()
+ info._update_info_from_version_info()
+
+ self.assertEqual(self.VERSION_INFO_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+
+ @mock.patch.object(putils, 'execute',
+ mock.Mock(return_value=PUTILS_RPM_RET_VAL))
+ def test_update_info_from_rpm(self):
+ info = na_utils.OpenStackInfo()
+ found_package = info._update_info_from_rpm()
+
+ self.assertEqual(self.RPM_INFO_VERSION, info._version)
+ self.assertEqual(self.RPM_INFO_RELEASE, info._release)
+ self.assertEqual(self.RPM_INFO_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+ self.assertTrue(found_package)
+
+ @mock.patch.object(putils, 'execute',
+ mock.Mock(return_value=NO_PKG_FOUND))
+ def test_update_info_from_rpm_no_pkg_found(self):
+ info = na_utils.OpenStackInfo()
+ found_package = info._update_info_from_rpm()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+ self.assertFalse(found_package)
+
+ @mock.patch.object(putils, 'execute',
+ mock.Mock(side_effect=Exception))
+ def test_xcption_in_update_info_from_rpm(self):
+ info = na_utils.OpenStackInfo()
+ found_package = info._update_info_from_rpm()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+ self.assertFalse(found_package)
+
+ @mock.patch.object(putils, 'execute',
+ mock.Mock(return_value=PUTILS_DPKG_RET_VAL))
+ def test_update_info_from_dpkg(self):
+ info = na_utils.OpenStackInfo()
+ found_package = info._update_info_from_dpkg()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.DEB_RLS, info._release)
+ self.assertEqual(self.DEB_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+ self.assertTrue(found_package)
+
+ @mock.patch.object(putils, 'execute',
+ mock.Mock(return_value=NO_PKG_FOUND))
+ def test_update_info_from_dpkg_no_pkg_found(self):
+ info = na_utils.OpenStackInfo()
+ found_package = info._update_info_from_dpkg()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+ self.assertFalse(found_package)
+
+ @mock.patch.object(putils, 'execute',
+ mock.Mock(side_effect=Exception))
+ def test_xcption_in_update_info_from_dpkg(self):
+ info = na_utils.OpenStackInfo()
+ found_package = info._update_info_from_dpkg()
+
+ self.assertEqual(self.UNKNOWN_VERSION, info._version)
+ self.assertEqual(self.UNKNOWN_RELEASE, info._release)
+ self.assertEqual(self.UNKNOWN_VENDOR, info._vendor)
+ self.assertEqual(self.UNKNOWN_PLATFORM, info._platform)
+ self.assertFalse(found_package)
+
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_version_from_version_string', mock.Mock())
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_release_from_release_string', mock.Mock())
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_platform', mock.Mock())
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_info_from_version_info', mock.Mock())
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_info_from_rpm', mock.Mock(return_value=True))
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_info_from_dpkg')
+ def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg):
+ info = na_utils.OpenStackInfo()
+ info._update_openstack_info()
+
+ self.assertFalse(mock_updt_from_dpkg.called)
+
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_version_from_version_string', mock.Mock())
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_release_from_release_string', mock.Mock())
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_platform', mock.Mock())
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_info_from_version_info', mock.Mock())
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_info_from_rpm', mock.Mock(return_value=False))
+ @mock.patch.object(na_utils.OpenStackInfo,
+ '_update_info_from_dpkg')
+ def test_update_openstack_info_rpm_pkg_not_found(self,
+ mock_updt_from_dpkg):
+ info = na_utils.OpenStackInfo()
+ info._update_openstack_info()
+
+ self.assertTrue(mock_updt_from_dpkg.called)
-# Copyright (c) 2012 NetApp, Inc.
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
from oslo.utils import importutils
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.options import netapp_proxy_opts
-from cinder.volume.drivers.netapp import utils
+from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
# NOTE(singn): Holds family:{protocol:driver} registration information.
# Plug in new families and protocols to support new drivers.
# No other code modification required.
+
+DATAONTAP_PATH = 'cinder.volume.drivers.netapp.dataontap'
+ESERIES_PATH = 'cinder.volume.drivers.netapp.eseries'
+
netapp_unified_plugin_registry =\
{'ontap_cluster':
{
- 'iscsi':
- 'cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver',
- 'nfs': 'cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver'
+ 'iscsi': DATAONTAP_PATH + '.iscsi_cmode.NetAppCmodeISCSIDriver',
+ 'nfs': DATAONTAP_PATH + '.nfs_cmode.NetAppCmodeNfsDriver'
},
'ontap_7mode':
{
- 'iscsi':
- 'cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver',
- 'nfs':
- 'cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver'
+ 'iscsi': DATAONTAP_PATH + '.iscsi_7mode.NetApp7modeISCSIDriver',
+ 'nfs': DATAONTAP_PATH + '.nfs_7mode.NetApp7modeNfsDriver'
},
'eseries':
{
- 'iscsi':
- 'cinder.volume.drivers.netapp.eseries.iscsi.Driver'
+ 'iscsi': ESERIES_PATH + '.iscsi.NetAppEseriesISCSIDriver'
},
}
-# NOTE(singn): Holds family:protocol information.
-# Protocol represents the default protocol driver option
-# in case no protocol is specified by the user in configuration.
-netapp_family_default =\
- {
- 'ontap_cluster': 'nfs',
- 'ontap_7mode': 'nfs',
- 'eseries': 'iscsi'
- }
-
class NetAppDriver(object):
""""NetApp unified block storage driver.
Override the proxy driver method by adding method in this driver.
"""
+ REQUIRED_FLAGS = ['netapp_storage_family', 'netapp_storage_protocol']
+
def __init__(self, *args, **kwargs):
super(NetAppDriver, self).__init__()
- app_version = utils.OpenStackInfo().info()
- LOG.info(_('OpenStack OS Version Info: %(info)s') % {
+
+ app_version = na_utils.OpenStackInfo().info()
+ LOG.info(_LI('OpenStack OS Version Info: %(info)s') % {
'info': app_version})
+
self.configuration = kwargs.get('configuration', None)
- if self.configuration:
- self.configuration.append_config_values(netapp_proxy_opts)
- else:
+ if not self.configuration:
raise exception.InvalidInput(
reason=_("Required configuration not found"))
+
+ self.configuration.append_config_values(netapp_proxy_opts)
+ na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
+
kwargs['app_version'] = app_version
+
self.driver = NetAppDriverFactory.create_driver(
self.configuration.netapp_storage_family,
self.configuration.netapp_storage_protocol,
"""Factory to instantiate appropriate NetApp driver."""
@staticmethod
- def create_driver(
- storage_family, storage_protocol, *args, **kwargs):
+ def create_driver(storage_family, storage_protocol, *args, **kwargs):
""""Creates an appropriate driver based on family and protocol."""
- fmt = {'storage_family': storage_family,
- 'storage_protocol': storage_protocol}
- LOG.info(_('Requested unified config: %(storage_family)s and '
- '%(storage_protocol)s') % fmt)
- storage_family = storage_family.lower()
+
+ fmt = {'storage_family': storage_family.lower(),
+ 'storage_protocol': storage_protocol.lower()}
+ LOG.info(_LI('Requested unified config: %(storage_family)s and '
+ '%(storage_protocol)s') % fmt)
+
family_meta = netapp_unified_plugin_registry.get(storage_family)
if family_meta is None:
raise exception.InvalidInput(
reason=_('Storage family %s is not supported')
% storage_family)
- if storage_protocol is None:
- storage_protocol = netapp_family_default.get(storage_family)
- fmt['storage_protocol'] = storage_protocol
- if storage_protocol is None:
- raise exception.InvalidInput(
- reason=_('No default storage protocol found'
- ' for storage family %(storage_family)s')
- % fmt)
- storage_protocol = storage_protocol.lower()
+
driver_loc = family_meta.get(storage_protocol)
if driver_loc is None:
raise exception.InvalidInput(
reason=_('Protocol %(storage_protocol)s is not supported'
' for storage family %(storage_family)s')
% fmt)
+
NetAppDriverFactory.check_netapp_driver(driver_loc)
kwargs = kwargs or {}
kwargs['netapp_mode'] = 'proxy'
driver = importutils.import_object(driver_loc, *args, **kwargs)
- LOG.info(_('NetApp driver of family %(storage_family)s and protocol'
- ' %(storage_protocol)s loaded') % fmt)
+ LOG.info(_LI('NetApp driver of family %(storage_family)s and protocol'
+ ' %(storage_protocol)s loaded') % fmt)
return driver
@staticmethod
--- /dev/null
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Andrew Kerr. All rights reserved.
+# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver library for NetApp 7-mode block storage systems.
+"""
+
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+
+from cinder import exception
+from cinder.i18n import _, _LW
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.netapp.dataontap import block_base
+from cinder.volume.drivers.netapp.dataontap.client import client_7mode
+from cinder.volume.drivers.netapp import options as na_opts
+from cinder.volume.drivers.netapp import utils as na_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetAppBlockStorage7modeLibrary(block_base.
+ NetAppBlockStorageLibrary):
+ """NetApp block storage library for Data ONTAP (7-mode)."""
+
+ def __init__(self, driver_name, driver_protocol, **kwargs):
+ super(NetAppBlockStorage7modeLibrary, self).__init__(driver_name,
+ driver_protocol,
+ **kwargs)
+ self.configuration.append_config_values(na_opts.netapp_7mode_opts)
+ self.driver_mode = '7mode'
+
+ def do_setup(self, context):
+ super(NetAppBlockStorage7modeLibrary, self).do_setup(context)
+
+ self.volume_list = self.configuration.netapp_volume_list
+ if self.volume_list:
+ self.volume_list = self.volume_list.split(',')
+ self.volume_list = [el.strip() for el in self.volume_list]
+
+ self.vfiler = self.configuration.netapp_vfiler
+
+ self.zapi_client = client_7mode.Client(
+ self.volume_list,
+ transport_type=self.configuration.netapp_transport_type,
+ username=self.configuration.netapp_login,
+ password=self.configuration.netapp_password,
+ hostname=self.configuration.netapp_server_hostname,
+ port=self.configuration.netapp_server_port,
+ vfiler=self.vfiler)
+
+ self.vol_refresh_time = None
+ self.vol_refresh_interval = 1800
+ self.vol_refresh_running = False
+ self.vol_refresh_voluntary = False
+ self.root_volume_name = self._get_root_volume_name()
+
+ def check_for_setup_error(self):
+ """Check that the driver is working and can communicate."""
+ api_version = self.zapi_client.get_ontapi_version()
+ if api_version:
+ major, minor = api_version
+ if major == 1 and minor < 9:
+ msg = _("Unsupported Data ONTAP version."
+ " Data ONTAP version 7.3.1 and above is supported.")
+ raise exception.VolumeBackendAPIException(data=msg)
+ else:
+ msg = _("API version could not be determined.")
+ raise exception.VolumeBackendAPIException(data=msg)
+ super(NetAppBlockStorage7modeLibrary, self).check_for_setup_error()
+
+ def _create_lun(self, volume_name, lun_name, size,
+ metadata, qos_policy_group=None):
+ """Creates a LUN, handling Data ONTAP differences as needed."""
+
+ self.zapi_client.create_lun(
+ volume_name, lun_name, size, metadata, qos_policy_group)
+
+ self.vol_refresh_voluntary = True
+
+ def _get_root_volume_name(self):
+ # switch to volume-get-root-name API when possible
+ vols = self.zapi_client.get_filer_volumes()
+ for vol in vols:
+ volume_name = vol.get_child_content('name')
+ if self._get_vol_option(volume_name, 'root') == 'true':
+ return volume_name
+ LOG.warning(_LW('Could not determine root volume name '
+ 'on %s.') % self._get_owner())
+ return None
+
+ def _get_owner(self):
+ if self.vfiler:
+ owner = '%s:%s' % (self.configuration.netapp_server_hostname,
+ self.vfiler)
+ else:
+ owner = self.configuration.netapp_server_hostname
+ return owner
+
+ def _create_lun_handle(self, metadata):
+ """Returns LUN handle based on filer type."""
+ owner = self._get_owner()
+ return '%s:%s' % (owner, metadata['Path'])
+
+ def _find_mapped_lun_igroup(self, path, initiator, os=None):
+ """Find the igroup for mapped LUN with initiator."""
+ igroup = None
+ lun_id = None
+ result = self.zapi_client.get_lun_map(path)
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ found = False
+ igroup_infs = igroups.get_children()
+ for ig in igroup_infs:
+ initiators = ig.get_child_by_name('initiators')
+ init_infs = initiators.get_children()
+ for info in init_infs:
+ if info.get_child_content('initiator-name') == initiator:
+ found = True
+ igroup = ig.get_child_content('initiator-group-name')
+ lun_id = ig.get_child_content('lun-id')
+ break
+ if found:
+ break
+ return igroup, lun_id
+
+ def _clone_lun(self, name, new_name, space_reserved='true',
+ src_block=0, dest_block=0, block_count=0):
+ """Clone LUN with the given handle to the new name."""
+ metadata = self._get_lun_attr(name, 'metadata')
+ path = metadata['Path']
+ (parent, _splitter, name) = path.rpartition('/')
+ clone_path = '%s/%s' % (parent, new_name)
+
+ self.zapi_client.clone_lun(path, clone_path, name, new_name,
+ space_reserved, src_block=0,
+ dest_block=0, block_count=0)
+
+ self.vol_refresh_voluntary = True
+ luns = self.zapi_client.get_lun_by_args(path=clone_path)
+ cloned_lun = luns[0]
+ self.zapi_client.set_space_reserve(clone_path, space_reserved)
+ clone_meta = self._create_lun_meta(cloned_lun)
+ handle = self._create_lun_handle(clone_meta)
+ self._add_lun_to_table(
+ block_base.NetAppLun(handle, new_name,
+ cloned_lun.get_child_content('size'),
+ clone_meta))
+
+ def _create_lun_meta(self, lun):
+ """Creates LUN metadata dictionary."""
+ self.zapi_client.check_is_naelement(lun)
+ meta_dict = {}
+ meta_dict['Path'] = lun.get_child_content('path')
+ meta_dict['Volume'] = lun.get_child_content('path').split('/')[2]
+ meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
+ meta_dict['SpaceReserved'] = lun.get_child_content(
+ 'is-space-reservation-enabled')
+ return meta_dict
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from filer."""
+
+ # ensure we get current data
+ self.vol_refresh_voluntary = True
+ self._refresh_volume_info()
+
+ LOG.debug('Updating volume stats')
+ data = {}
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ data['volume_backend_name'] = backend_name or self.driver_name
+ data['vendor_name'] = 'NetApp'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = self.driver_protocol
+ data['pools'] = self._get_pool_stats()
+
+ self.zapi_client.provide_ems(self, self.driver_name, self.app_version,
+ server_type=self.driver_mode)
+ self._stats = data
+
+ def _get_pool_stats(self):
+ """Retrieve pool (i.e. Data ONTAP volume) stats info from volumes."""
+
+ pools = []
+ if not self.vols:
+ return pools
+
+ for vol in self.vols:
+
+ # omit volumes not specified in the config
+ volume_name = vol.get_child_content('name')
+ if self.volume_list and volume_name not in self.volume_list:
+ continue
+
+ # omit root volume
+ if volume_name == self.root_volume_name:
+ continue
+
+ # ensure good volume state
+ state = vol.get_child_content('state')
+ inconsistent = vol.get_child_content('is-inconsistent')
+ invalid = vol.get_child_content('is-invalid')
+ if (state != 'online' or
+ inconsistent != 'false' or
+ invalid != 'false'):
+ continue
+
+ pool = dict()
+ pool['pool_name'] = volume_name
+ pool['QoS_support'] = False
+ pool['reserved_percentage'] = 0
+
+ # convert sizes to GB and de-rate by NetApp multiplier
+ total = float(vol.get_child_content('size-total') or 0)
+ total /= self.configuration.netapp_size_multiplier
+ total /= units.Gi
+ pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
+
+ free = float(vol.get_child_content('size-available') or 0)
+ free /= self.configuration.netapp_size_multiplier
+ free /= units.Gi
+ pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
+
+ pools.append(pool)
+
+ return pools
+
+ def _get_lun_block_count(self, path):
+ """Gets block counts for the LUN."""
+ bs = super(NetAppBlockStorage7modeLibrary,
+ self)._get_lun_block_count(path)
+ api_version = self.zapi_client.get_ontapi_version()
+ if api_version:
+ major = api_version[0]
+ minor = api_version[1]
+ if major == 1 and minor < 15:
+ bs -= 1
+ return bs
+
+ def _refresh_volume_info(self):
+ """Saves the volume information for the filer."""
+
+ if (self.vol_refresh_time is None or self.vol_refresh_voluntary or
+ timeutils.is_newer_than(self.vol_refresh_time,
+ self.vol_refresh_interval)):
+ try:
+ job_set = na_utils.set_safe_attr(self, 'vol_refresh_running',
+ True)
+ if not job_set:
+ LOG.warning(_LW("Volume refresh job already running. "
+ "Returning..."))
+ return
+ self.vol_refresh_voluntary = False
+ self.vols = self.zapi_client.get_filer_volumes()
+ self.vol_refresh_time = timeutils.utcnow()
+ except Exception as e:
+ LOG.warning(_LW("Error refreshing volume info. Message: %s"),
+ six.text_type(e))
+ finally:
+ na_utils.set_safe_attr(self, 'vol_refresh_running', False)
+
+ def delete_volume(self, volume):
+ """Driver entry point for destroying existing volumes."""
+ super(NetAppBlockStorage7modeLibrary, self).delete_volume(volume)
+ self.vol_refresh_voluntary = True
--- /dev/null
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Andrew Kerr. All rights reserved.
+# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver library for NetApp 7/C-mode block storage systems.
+"""
+
+import sys
+import uuid
+
+from oslo.utils import excutils
+from oslo.utils import units
+import six
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
+from cinder.volume.drivers.netapp import options as na_opts
+from cinder.volume.drivers.netapp import utils as na_utils
+from cinder.volume import utils as volume_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetAppLun(object):
+ """Represents a LUN on NetApp storage."""
+
+ def __init__(self, handle, name, size, metadata_dict):
+ self.handle = handle
+ self.name = name
+ self.size = size
+ self.metadata = metadata_dict or {}
+
+ def get_metadata_property(self, prop):
+ """Get the metadata property of a LUN."""
+ if prop in self.metadata:
+ return self.metadata[prop]
+ name = self.name
+ msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
+ msg_fmt = {'prop': prop, 'name': name}
+ LOG.debug(msg % msg_fmt)
+
+ def __str__(self, *args, **kwargs):
+ return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
+ % (self.handle, self.name, self.size, self.metadata)
+
+
+class NetAppBlockStorageLibrary(object):
+ """NetApp block storage library for Data ONTAP."""
+
+ # do not increment this as it may be used in volume type definitions
+ VERSION = "1.0.0"
+ IGROUP_PREFIX = 'openstack-'
+ REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
+ 'netapp_server_hostname']
+
+ def __init__(self, driver_name, driver_protocol, **kwargs):
+
+ na_utils.validate_instantiation(**kwargs)
+
+ self.driver_name = driver_name
+ self.driver_protocol = driver_protocol
+ self.zapi_client = None
+ self._stats = {}
+ self.lun_table = {}
+ self.app_version = kwargs.get("app_version", "unknown")
+
+ self.configuration = kwargs['configuration']
+ self.configuration.append_config_values(na_opts.netapp_connection_opts)
+ self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
+ self.configuration.append_config_values(na_opts.netapp_transport_opts)
+ self.configuration.append_config_values(
+ na_opts.netapp_provisioning_opts)
+
+ def do_setup(self, context):
+ na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
+
+ def check_for_setup_error(self):
+ """Check that the driver is working and can communicate.
+
+ Discovers the LUNs on the NetApp server.
+ """
+
+ lun_list = self.zapi_client.get_lun_list()
+ self._extract_and_populate_luns(lun_list)
+ LOG.debug("Success getting list of LUNs from server.")
+
+ def get_pool(self, volume):
+ """Return pool name where volume resides.
+
+ :param volume: The volume hosted by the driver.
+ :return: Name of the pool where given volume is hosted.
+ """
+ name = volume['name']
+ metadata = self._get_lun_attr(name, 'metadata') or dict()
+ return metadata.get('Volume', None)
+
+ def create_volume(self, volume):
+ """Driver entry point for creating a new volume (Data ONTAP LUN)."""
+
+ LOG.debug('create_volume on %s' % volume['host'])
+
+ # get Data ONTAP volume name as pool name
+ ontap_volume_name = volume_utils.extract_host(volume['host'],
+ level='pool')
+
+ if ontap_volume_name is None:
+ msg = _("Pool is not available in the volume host field.")
+ raise exception.InvalidHost(reason=msg)
+
+ lun_name = volume['name']
+
+ # start with default size, get requested size
+ default_size = units.Mi * 100 # 100 MB
+ size = default_size if not int(volume['size'])\
+ else int(volume['size']) * units.Gi
+
+ metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
+
+ extra_specs = na_utils.get_volume_extra_specs(volume)
+ qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
+ if extra_specs else None
+
+ # warn on obsolete extra specs
+ na_utils.log_extra_spec_warnings(extra_specs)
+
+ self._create_lun(ontap_volume_name, lun_name, size,
+ metadata, qos_policy_group)
+ LOG.debug('Created LUN with name %s' % lun_name)
+
+ metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
+ metadata['Volume'] = ontap_volume_name
+ metadata['Qtree'] = None
+
+ handle = self._create_lun_handle(metadata)
+ self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
+
+ def delete_volume(self, volume):
+ """Driver entry point for destroying existing volumes."""
+ name = volume['name']
+ metadata = self._get_lun_attr(name, 'metadata')
+ if not metadata:
+ msg = _LW("No entry in LUN table for volume/snapshot %(name)s.")
+ msg_fmt = {'name': name}
+ LOG.warning(msg % msg_fmt)
+ return
+ self.zapi_client.destroy_lun(metadata['Path'])
+ self.lun_table.pop(name)
+
+ def ensure_export(self, context, volume):
+ """Driver entry point to get the export info for an existing volume."""
+ handle = self._get_lun_attr(volume['name'], 'handle')
+ return {'provider_location': handle}
+
+ def create_export(self, context, volume):
+ """Driver entry point to get the export info for a new volume."""
+ handle = self._get_lun_attr(volume['name'], 'handle')
+ return {'provider_location': handle}
+
+ def remove_export(self, context, volume):
+ """Driver entry point to remove an export for a volume.
+
+ Since exporting is idempotent in this driver, we have nothing
+ to do for unexporting.
+ """
+
+ pass
+
+ def create_snapshot(self, snapshot):
+ """Driver entry point for creating a snapshot.
+
+ This driver implements snapshots by using efficient single-file
+ (LUN) cloning.
+ """
+
+ vol_name = snapshot['volume_name']
+ snapshot_name = snapshot['name']
+ lun = self._get_lun_from_table(vol_name)
+ self._clone_lun(lun.name, snapshot_name, 'false')
+
+ def delete_snapshot(self, snapshot):
+ """Driver entry point for deleting a snapshot."""
+ self.delete_volume(snapshot)
+ LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Driver entry point for creating a new volume from a snapshot.
+
+ Many would call this "cloning" and in fact we use cloning to implement
+ this feature.
+ """
+
+ vol_size = volume['size']
+ snap_size = snapshot['volume_size']
+ snapshot_name = snapshot['name']
+ new_name = volume['name']
+ self._clone_lun(snapshot_name, new_name, 'true')
+ if vol_size != snap_size:
+ try:
+ self.extend_volume(volume, volume['size'])
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(
+ _LE("Resizing %s failed. Cleaning volume."), new_name)
+ self.delete_volume(volume)
+
+ def _create_lun(self, volume_name, lun_name, size,
+ metadata, qos_policy_group=None):
+ """Creates a LUN, handling Data ONTAP differences as needed."""
+ raise NotImplementedError()
+
+ def _create_lun_handle(self, metadata):
+ """Returns LUN handle based on filer type."""
+ raise NotImplementedError()
+
+ def _extract_and_populate_luns(self, api_luns):
+ """Extracts the LUNs from API.
+
+ Populates in the LUN table.
+ """
+
+ for lun in api_luns:
+ meta_dict = self._create_lun_meta(lun)
+ path = lun.get_child_content('path')
+ (_rest, _splitter, name) = path.rpartition('/')
+ handle = self._create_lun_handle(meta_dict)
+ size = lun.get_child_content('size')
+ discovered_lun = NetAppLun(handle, name, size, meta_dict)
+ self._add_lun_to_table(discovered_lun)
+
+ def _map_lun(self, name, initiator, initiator_type='iscsi', lun_id=None):
+ """Maps LUN to the initiator and returns LUN id assigned."""
+ metadata = self._get_lun_attr(name, 'metadata')
+ os = metadata['OsType']
+ path = metadata['Path']
+ if self._check_allowed_os(os):
+ os = os
+ else:
+ os = 'default'
+ igroup_name = self._get_or_create_igroup(initiator,
+ initiator_type, os)
+ try:
+ return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
+ except NaApiError:
+ exc_info = sys.exc_info()
+ (_igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator)
+ if lun_id is not None:
+ return lun_id
+ else:
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+ def _unmap_lun(self, path, initiator):
+ """Unmaps a LUN from given initiator."""
+ (igroup_name, _lun_id) = self._find_mapped_lun_igroup(path, initiator)
+ self.zapi_client.unmap_lun(path, igroup_name)
+
+ def _find_mapped_lun_igroup(self, path, initiator, os=None):
+ """Find the igroup for mapped LUN with initiator."""
+ raise NotImplementedError()
+
+ def _get_or_create_igroup(self, initiator, initiator_type='iscsi',
+ os='default'):
+ """Checks for an igroup for an initiator.
+
+ Creates igroup if not found.
+ """
+
+ igroups = self.zapi_client.get_igroup_by_initiator(initiator=initiator)
+ igroup_name = None
+ for igroup in igroups:
+ if igroup['initiator-group-os-type'] == os:
+ if igroup['initiator-group-type'] == initiator_type or \
+ igroup['initiator-group-type'] == 'mixed':
+ if igroup['initiator-group-name'].startswith(
+ self.IGROUP_PREFIX):
+ igroup_name = igroup['initiator-group-name']
+ break
+ if not igroup_name:
+ igroup_name = self.IGROUP_PREFIX + six.text_type(uuid.uuid4())
+ self.zapi_client.create_igroup(igroup_name, initiator_type, os)
+ self.zapi_client.add_igroup_initiator(igroup_name, initiator)
+ return igroup_name
+
+ def _check_allowed_os(self, os):
+ """Checks if the os type supplied is NetApp supported."""
+ if os in ['linux', 'aix', 'hpux', 'windows', 'solaris',
+ 'netware', 'vmware', 'openvms', 'xen', 'hyper_v']:
+ return True
+ else:
+ return False
+
+ def _add_lun_to_table(self, lun):
+ """Adds LUN to cache table."""
+ if not isinstance(lun, NetAppLun):
+ msg = _("Object is not a NetApp LUN.")
+ raise exception.VolumeBackendAPIException(data=msg)
+ self.lun_table[lun.name] = lun
+
+ def _get_lun_from_table(self, name):
+ """Gets LUN from cache table.
+
+ Refreshes cache if LUN not found in cache.
+ """
+ lun = self.lun_table.get(name)
+ if lun is None:
+ lun_list = self.zapi_client.get_lun_list()
+ self._extract_and_populate_luns(lun_list)
+ lun = self.lun_table.get(name)
+ if lun is None:
+ raise exception.VolumeNotFound(volume_id=name)
+ return lun
+
+ def _clone_lun(self, name, new_name, space_reserved='true',
+ src_block=0, dest_block=0, block_count=0):
+ """Clone LUN with the given name to the new name."""
+ raise NotImplementedError()
+
+ def _get_lun_attr(self, name, attr):
+ """Get the LUN attribute if found else None."""
+ try:
+ attr = getattr(self._get_lun_from_table(name), attr)
+ return attr
+ except exception.VolumeNotFound as e:
+ LOG.error(_LE("Message: %s"), e.msg)
+ except Exception as e:
+ LOG.error(_LE("Error getting LUN attribute. Exception: %s"),
+ e.__str__())
+ return None
+
+ def _create_lun_meta(self, lun):
+ raise NotImplementedError()
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates a clone of the specified volume."""
+ vol_size = volume['size']
+ src_vol = self._get_lun_from_table(src_vref['name'])
+ src_vol_size = src_vref['size']
+ new_name = volume['name']
+ self._clone_lun(src_vol.name, new_name, 'true')
+ if vol_size != src_vol_size:
+ try:
+ self.extend_volume(volume, volume['size'])
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(
+ _LE("Resizing %s failed. Cleaning volume."), new_name)
+ self.delete_volume(volume)
+
+ def get_volume_stats(self, refresh=False):
+ """Get volume stats.
+
+ If 'refresh' is True, run update the stats first.
+ """
+
+ if refresh:
+ self._update_volume_stats()
+
+ return self._stats
+
+ def _update_volume_stats(self):
+ raise NotImplementedError()
+
+ def extend_volume(self, volume, new_size):
+ """Extend an existing volume to the new size."""
+ name = volume['name']
+ lun = self._get_lun_from_table(name)
+ path = lun.metadata['Path']
+ curr_size_bytes = six.text_type(lun.size)
+ new_size_bytes = six.text_type(int(new_size) * units.Gi)
+ # Reused by clone scenarios.
+ # Hence comparing the stored size.
+ if curr_size_bytes != new_size_bytes:
+ lun_geometry = self.zapi_client.get_lun_geometry(path)
+ if (lun_geometry and lun_geometry.get("max_resize")
+ and int(lun_geometry.get("max_resize")) >=
+ int(new_size_bytes)):
+ self.zapi_client.do_direct_resize(path, new_size_bytes)
+ else:
+ self._do_sub_clone_resize(path, new_size_bytes)
+ self.lun_table[name].size = new_size_bytes
+ else:
+ LOG.info(_LI("No need to extend volume %s"
+ " as it is already the requested new size."), name)
+
+ def _get_vol_option(self, volume_name, option_name):
+ """Get the value for the volume option."""
+ value = None
+ options = self.zapi_client.get_volume_options(volume_name)
+ for opt in options:
+ if opt.get_child_content('name') == option_name:
+ value = opt.get_child_content('value')
+ break
+ return value
+
+ def _do_sub_clone_resize(self, path, new_size_bytes):
+ """Does sub LUN clone after verification.
+
+ Clones the block ranges and swaps
+ the LUNs also deletes older LUN
+ after a successful clone.
+ """
+ seg = path.split("/")
+ LOG.info(_LI("Resizing LUN %s to new size using clone operation."),
+ seg[-1])
+ name = seg[-1]
+ vol_name = seg[2]
+ lun = self._get_lun_from_table(name)
+ metadata = lun.metadata
+ compression = self._get_vol_option(vol_name, 'compression')
+ if compression == "on":
+ msg = _('%s cannot be resized using clone operation'
+ ' as it is hosted on compressed volume')
+ raise exception.VolumeBackendAPIException(data=msg % name)
+ else:
+ block_count = self._get_lun_block_count(path)
+ if block_count == 0:
+ msg = _('%s cannot be resized using clone operation'
+ ' as it contains no blocks.')
+ raise exception.VolumeBackendAPIException(data=msg % name)
+ new_lun = 'new-%s' % name
+ self.zapi_client.create_lun(vol_name, new_lun, new_size_bytes,
+ metadata)
+ try:
+ self._clone_lun(name, new_lun, block_count=block_count)
+ self._post_sub_clone_resize(path)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ new_path = '/vol/%s/%s' % (vol_name, new_lun)
+ self.zapi_client.destroy_lun(new_path)
+
+ def _post_sub_clone_resize(self, path):
+ """Try post sub clone resize in a transactional manner."""
+ st_tm_mv, st_nw_mv, st_del_old = None, None, None
+ seg = path.split("/")
+ LOG.info(_LI("Post clone resize LUN %s"), seg[-1])
+ new_lun = 'new-%s' % (seg[-1])
+ tmp_lun = 'tmp-%s' % (seg[-1])
+ tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
+ new_path = "/vol/%s/%s" % (seg[2], new_lun)
+ try:
+ st_tm_mv = self.zapi_client.move_lun(path, tmp_path)
+ st_nw_mv = self.zapi_client.move_lun(new_path, path)
+ st_del_old = self.zapi_client.destroy_lun(tmp_path)
+ except Exception as e:
+ if st_tm_mv is None:
+ msg = _("Failure staging LUN %s to tmp.")
+ raise exception.VolumeBackendAPIException(data=msg % (seg[-1]))
+ else:
+ if st_nw_mv is None:
+ self.zapi_client.move_lun(tmp_path, path)
+ msg = _("Failure moving new cloned LUN to %s.")
+ raise exception.VolumeBackendAPIException(
+ data=msg % (seg[-1]))
+ elif st_del_old is None:
+ LOG.error(_LE("Failure deleting staged tmp LUN %s."),
+ tmp_lun)
+ else:
+ LOG.error(_LE("Unknown exception in"
+ " post clone resize LUN %s."), seg[-1])
+ LOG.error(_LE("Exception details: %s") % (e.__str__()))
+
+ def _get_lun_block_count(self, path):
+ """Gets block counts for the LUN."""
+ LOG.debug("Getting LUN block count.")
+ lun_infos = self.zapi_client.get_lun_by_args(path=path)
+ if not lun_infos:
+ seg = path.split('/')
+ msg = _('Failure getting LUN info for %s.')
+ raise exception.VolumeBackendAPIException(data=msg % seg[-1])
+ lun_info = lun_infos[-1]
+ bs = int(lun_info.get_child_content('block-size'))
+ ls = int(lun_info.get_child_content('size'))
+ block_count = ls / bs
+ return block_count
+
+ def initialize_connection_iscsi(self, volume, connector):
+ """Driver entry point to attach a volume to an instance.
+
+ Do the LUN masking on the storage system so the initiator can access
+ the LUN on the target. Also return the iSCSI properties so the
+ initiator can find the LUN. This implementation does not call
+ _get_iscsi_properties() to get the properties because cannot store the
+ LUN number in the database. We only find out what the LUN number will
+ be during this method call so we construct the properties dictionary
+ ourselves.
+ """
+
+ initiator_name = connector['initiator']
+ name = volume['name']
+ lun_id = self._map_lun(name, initiator_name, 'iscsi', None)
+ msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s")
+ msg_fmt = {'name': name, 'initiator_name': initiator_name}
+ LOG.debug(msg % msg_fmt)
+ iqn = self.zapi_client.get_iscsi_service_details()
+ target_details_list = self.zapi_client.get_target_details()
+ msg = _("Successfully fetched target details for LUN %(name)s and "
+ "initiator %(initiator_name)s")
+ msg_fmt = {'name': name, 'initiator_name': initiator_name}
+ LOG.debug(msg % msg_fmt)
+
+ if not target_details_list:
+ msg = _('Failed to get LUN target details for the LUN %s')
+ raise exception.VolumeBackendAPIException(data=msg % name)
+ target_details = None
+ for tgt_detail in target_details_list:
+ if tgt_detail.get('interface-enabled', 'true') == 'true':
+ target_details = tgt_detail
+ break
+ if not target_details:
+ target_details = target_details_list[0]
+
+ if not target_details['address'] and target_details['port']:
+ msg = _('Failed to get target portal for the LUN %s')
+ raise exception.VolumeBackendAPIException(data=msg % name)
+ if not iqn:
+ msg = _('Failed to get target IQN for the LUN %s')
+ raise exception.VolumeBackendAPIException(data=msg % name)
+
+ properties = {}
+ properties['target_discovered'] = False
+ (address, port) = (target_details['address'], target_details['port'])
+ properties['target_portal'] = '%s:%s' % (address, port)
+ properties['target_iqn'] = iqn
+ properties['target_lun'] = lun_id
+ properties['volume_id'] = volume['id']
+
+ auth = volume['provider_auth']
+ if auth:
+ (auth_method, auth_username, auth_secret) = auth.split()
+ properties['auth_method'] = auth_method
+ properties['auth_username'] = auth_username
+ properties['auth_password'] = auth_secret
+
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': properties,
+ }
+
+ def terminate_connection_iscsi(self, volume, connector, **kwargs):
+ """Driver entry point to unattach a volume from an instance.
+
+ Unmask the LUN on the storage system so the given initiator can no
+ longer access it.
+ """
+
+ initiator_name = connector['initiator']
+ name = volume['name']
+ metadata = self._get_lun_attr(name, 'metadata')
+ path = metadata['Path']
+ self._unmap_lun(path, initiator_name)
+ msg = _("Unmapped LUN %(name)s from the initiator %(initiator_name)s")
+ msg_fmt = {'name': name, 'initiator_name': initiator_name}
+ LOG.debug(msg % msg_fmt)
--- /dev/null
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Andrew Kerr. All rights reserved.
+# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver library for NetApp C-mode block storage systems.
+"""
+
+import copy
+
+from oslo.utils import units
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.openstack.common import log as logging
+from cinder import utils
+from cinder.volume.drivers.netapp.dataontap import block_base
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap import ssc_cmode
+from cinder.volume.drivers.netapp import options as na_opts
+from cinder.volume.drivers.netapp import utils as na_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetAppBlockStorageCmodeLibrary(block_base.
+ NetAppBlockStorageLibrary):
+ """NetApp block storage library for Data ONTAP (Cluster-mode)."""
+
+ REQUIRED_CMODE_FLAGS = ['netapp_vserver']
+
+ def __init__(self, driver_name, driver_protocol, **kwargs):
+ super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name,
+ driver_protocol,
+ **kwargs)
+ self.configuration.append_config_values(na_opts.netapp_cluster_opts)
+ self.driver_mode = 'cluster'
+
+ def do_setup(self, context):
+ super(NetAppBlockStorageCmodeLibrary, self).do_setup(context)
+ na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
+
+ self.vserver = self.configuration.netapp_vserver
+
+ self.zapi_client = client_cmode.Client(
+ transport_type=self.configuration.netapp_transport_type,
+ username=self.configuration.netapp_login,
+ password=self.configuration.netapp_password,
+ hostname=self.configuration.netapp_server_hostname,
+ port=self.configuration.netapp_server_port,
+ vserver=self.vserver)
+
+ self.ssc_vols = None
+ self.stale_vols = set()
+
+ def check_for_setup_error(self):
+ """Check that the driver is working and can communicate."""
+ ssc_cmode.check_ssc_api_permissions(self.zapi_client)
+ super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
+
+ def _create_lun(self, volume_name, lun_name, size,
+ metadata, qos_policy_group=None):
+ """Creates a LUN, handling Data ONTAP differences as needed."""
+
+ self.zapi_client.create_lun(
+ volume_name, lun_name, size, metadata, qos_policy_group)
+
+ self._update_stale_vols(
+ volume=ssc_cmode.NetAppVolume(volume_name, self.vserver))
+
+ def _create_lun_handle(self, metadata):
+ """Returns LUN handle based on filer type."""
+ return '%s:%s' % (self.vserver, metadata['Path'])
+
+ def _find_mapped_lun_igroup(self, path, initiator, os=None):
+ """Find the igroup for mapped LUN with initiator."""
+ initiator_igroups = self.zapi_client.get_igroup_by_initiator(
+ initiator=initiator)
+ lun_maps = self.zapi_client.get_lun_map(path)
+ if initiator_igroups and lun_maps:
+ for igroup in initiator_igroups:
+ igroup_name = igroup['initiator-group-name']
+ if igroup_name.startswith(self.IGROUP_PREFIX):
+ for lun_map in lun_maps:
+ if lun_map['initiator-group'] == igroup_name:
+ return igroup_name, lun_map['lun-id']
+ return None, None
+
+ def _clone_lun(self, name, new_name, space_reserved='true',
+ src_block=0, dest_block=0, block_count=0):
+ """Clone LUN with the given handle to the new name."""
+ metadata = self._get_lun_attr(name, 'metadata')
+ volume = metadata['Volume']
+ self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
+ src_block=0, dest_block=0, block_count=0)
+ LOG.debug("Cloned LUN with new name %s" % new_name)
+ lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
+ path='/vol/%s/%s'
+ % (volume, new_name))
+ if len(lun) == 0:
+ msg = _("No cloned LUN named %s found on the filer")
+ raise exception.VolumeBackendAPIException(data=msg % new_name)
+ clone_meta = self._create_lun_meta(lun[0])
+ self._add_lun_to_table(
+ block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'],
+ clone_meta['Path']),
+ new_name,
+ lun[0].get_child_content('size'),
+ clone_meta))
+ self._update_stale_vols(
+ volume=ssc_cmode.NetAppVolume(volume, self.vserver))
+
+ def _create_lun_meta(self, lun):
+ """Creates LUN metadata dictionary."""
+ self.zapi_client.check_is_naelement(lun)
+ meta_dict = {}
+ meta_dict['Vserver'] = lun.get_child_content('vserver')
+ meta_dict['Volume'] = lun.get_child_content('volume')
+ meta_dict['Qtree'] = lun.get_child_content('qtree')
+ meta_dict['Path'] = lun.get_child_content('path')
+ meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
+ meta_dict['SpaceReserved'] = \
+ lun.get_child_content('is-space-reservation-enabled')
+ return meta_dict
+
+ def _configure_tunneling(self, do_tunneling=False):
+ """Configures tunneling for Data ONTAP cluster."""
+ if do_tunneling:
+ self.zapi_client.set_vserver(self.vserver)
+ else:
+ self.zapi_client.set_vserver(None)
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from vserver."""
+
+ sync = True if self.ssc_vols is None else False
+ ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.get_connection(),
+ self.vserver, synchronous=sync)
+
+ LOG.debug('Updating volume stats')
+ data = {}
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ data['volume_backend_name'] = backend_name or self.driver_name
+ data['vendor_name'] = 'NetApp'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = self.driver_protocol
+ data['pools'] = self._get_pool_stats()
+
+ self.zapi_client.provide_ems(self, self.driver_name, self.app_version)
+ self._stats = data
+
+ def _get_pool_stats(self):
+ """Retrieve pool (Data ONTAP volume) stats info from SSC volumes."""
+
+ pools = []
+ if not self.ssc_vols:
+ return pools
+
+ for vol in self.ssc_vols['all']:
+ pool = dict()
+ pool['pool_name'] = vol.id['name']
+ pool['QoS_support'] = False
+ pool['reserved_percentage'] = 0
+
+ # convert sizes to GB and de-rate by NetApp multiplier
+ total = float(vol.space['size_total_bytes'])
+ total /= self.configuration.netapp_size_multiplier
+ total /= units.Gi
+ pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
+
+ free = float(vol.space['size_avl_bytes'])
+ free /= self.configuration.netapp_size_multiplier
+ free /= units.Gi
+ pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
+
+ pool['netapp_raid_type'] = vol.aggr['raid_type']
+ pool['netapp_disk_type'] = vol.aggr['disk_type']
+
+ mirrored = vol in self.ssc_vols['mirrored']
+ pool['netapp_mirrored'] = six.text_type(mirrored).lower()
+ pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
+
+ dedup = vol in self.ssc_vols['dedup']
+ pool['netapp_dedup'] = six.text_type(dedup).lower()
+ pool['netapp_nodedup'] = six.text_type(not dedup).lower()
+
+ compression = vol in self.ssc_vols['compression']
+ pool['netapp_compression'] = six.text_type(compression).lower()
+ pool['netapp_nocompression'] = six.text_type(
+ not compression).lower()
+
+ thin = vol in self.ssc_vols['thin']
+ pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
+ pool['netapp_thick_provisioned'] = six.text_type(not thin).lower()
+
+ pools.append(pool)
+
+ return pools
+
+ @utils.synchronized('update_stale')
+ def _update_stale_vols(self, volume=None, reset=False):
+ """Populates stale vols with vol and returns set copy if reset."""
+ if volume:
+ self.stale_vols.add(volume)
+ if reset:
+ set_copy = copy.deepcopy(self.stale_vols)
+ self.stale_vols.clear()
+ return set_copy
+
+ @utils.synchronized("refresh_ssc_vols")
+ def refresh_ssc_vols(self, vols):
+ """Refreshes ssc_vols with latest entries."""
+ self.ssc_vols = vols
+
+ def delete_volume(self, volume):
+ """Driver entry point for destroying existing volumes."""
+ lun = self.lun_table.get(volume['name'])
+ netapp_vol = None
+ if lun:
+ netapp_vol = lun.get_metadata_property('Volume')
+ super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume)
+ if netapp_vol:
+ self._update_stale_vols(
+ volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver))
-# Copyright (c) 2012 NetApp, Inc.
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Glenn Gobeli. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# License for the specific language governing permissions and limitations
# under the License.
"""
-NetApp api for ONTAP and OnCommand DFM.
+NetApp API for Data ONTAP and OnCommand DFM.
-Contains classes required to issue api calls to ONTAP and OnCommand DFM.
+Contains classes required to issue API calls to Data ONTAP and OnCommand DFM.
"""
+import copy
import urllib2
from lxml import etree
import six
+from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
def __init__(self, host, server_type=SERVER_TYPE_FILER,
transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, username=None,
- password=None):
+ password=None, port=None):
self._host = host
self.set_server_type(server_type)
self.set_transport_type(transport_type)
self.set_style(style)
+ if port:
+ self.set_port(port)
self._username = username
self._password = password
self._refresh_conn = True
+ LOG.debug('Using NetApp controller: %s' % self._host)
+
def get_transport_type(self):
"""Get the transport type protocol."""
return self._protocol
def set_transport_type(self, transport_type):
- """Set the transport type protocol for api.
+ """Set the transport type protocol for API.
Supports http and https transport types.
"""
self._refresh_conn = True
def set_api_version(self, major, minor):
- """Set the api version."""
+ """Set the API version."""
try:
self._api_major_version = int(major)
self._api_minor_version = int(minor)
self._refresh_conn = True
def get_api_version(self):
- """Gets the api version tuple."""
+ """Gets the API version tuple."""
if hasattr(self, '_api_version'):
return (self._api_major_version, self._api_minor_version)
return None
self._refresh_conn = True
def invoke_elem(self, na_element, enable_tunneling=False):
- """Invoke the api on the server."""
+ """Invoke the API on the server."""
if na_element and not isinstance(na_element, NaElement):
- ValueError('NaElement must be supplied to invoke api')
+ ValueError('NaElement must be supplied to invoke API')
request = self._create_request(na_element, enable_tunneling)
if not hasattr(self, '_opener') or not self._opener \
or self._refresh_conn:
return self._get_result(xml)
def invoke_successfully(self, na_element, enable_tunneling=False):
- """Invokes api and checks execution status as success.
+ """Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
This helps to use same connection instance to enable or disable
class NaElement(object):
- """Class wraps basic building block for NetApp api request."""
+ """Class wraps basic building block for NetApp API request."""
def __init__(self, name):
"""Name of the element or etree.Element."""
class NaApiError(Exception):
- """Base exception class for NetApp api errors."""
+ """Base exception class for NetApp API errors."""
def __init__(self, code='unknown', message='unknown'):
self.code = code
self.message = message
def __str__(self, *args, **kwargs):
- return 'NetApp api failed. Reason - %s:%s' % (self.code, self.message)
+ return 'NetApp API failed. Reason - %s:%s' % (self.code, self.message)
NaErrors = {'API_NOT_FOUND': NaApiError('13005', 'Unable to find API'),
'INSUFFICIENT_PRIVS': NaApiError('13003',
'Insufficient privileges')}
+
+
+def invoke_api(na_server, api_name, api_family='cm', query=None,
+ des_result=None, additional_elems=None,
+ is_iter=False, records=0, tag=None,
+ timeout=0, tunnel=None):
+ """Invokes any given API call to a NetApp server.
+
+ :param na_server: na_server instance
+ :param api_name: API name string
+ :param api_family: cm or 7m
+ :param query: API query as dict
+ :param des_result: desired result as dict
+ :param additional_elems: dict other than query and des_result
+ :param is_iter: is iterator API
+ :param records: limit for records, 0 for infinite
+ :param timeout: timeout seconds
+ :param tunnel: tunnel entity, vserver or vfiler name
+ """
+ record_step = 50
+ if not (na_server or isinstance(na_server, NaServer)):
+ msg = _("Requires an NaServer instance.")
+ raise exception.InvalidInput(reason=msg)
+ server = copy.copy(na_server)
+ if api_family == 'cm':
+ server.set_vserver(tunnel)
+ else:
+ server.set_vfiler(tunnel)
+ if timeout > 0:
+ server.set_timeout(timeout)
+ iter_records = 0
+ cond = True
+ while cond:
+ na_element = create_api_request(
+ api_name, query, des_result, additional_elems,
+ is_iter, record_step, tag)
+ result = server.invoke_successfully(na_element, True)
+ if is_iter:
+ if records > 0:
+ iter_records = iter_records + record_step
+ if iter_records >= records:
+ cond = False
+ tag_el = result.get_child_by_name('next-tag')
+ tag = tag_el.get_content() if tag_el else None
+ if not tag:
+ cond = False
+ else:
+ cond = False
+ yield result
+
+
+def create_api_request(api_name, query=None, des_result=None,
+ additional_elems=None, is_iter=False,
+ record_step=50, tag=None):
+ """Creates a NetApp API request.
+
+ :param api_name: API name string
+ :param query: API query as dict
+ :param des_result: desired result as dict
+ :param additional_elems: dict other than query and des_result
+ :param is_iter: is iterator API
+ :param record_step: records at a time for iter API
+ :param tag: next tag for iter API
+ """
+ api_el = NaElement(api_name)
+ if query:
+ query_el = NaElement('query')
+ query_el.translate_struct(query)
+ api_el.add_child_elem(query_el)
+ if des_result:
+ res_el = NaElement('desired-attributes')
+ res_el.translate_struct(des_result)
+ api_el.add_child_elem(res_el)
+ if additional_elems:
+ api_el.translate_struct(additional_elems)
+ if is_iter:
+ api_el.add_new_child('max-records', six.text_type(record_step))
+ if tag:
+ api_el.add_new_child('tag', tag, True)
+ return api_el
-# Copyright (c) - 2014, Alex Meade. All rights reserved.
-# All Rights Reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
from cinder import exception
from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
-from cinder.volume.drivers.netapp import api as netapp_api
-from cinder.volume.drivers.netapp.client import base
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client import client_base
LOG = logging.getLogger(__name__)
-class Client(base.Client):
+class Client(client_base.Client):
+
+ def __init__(self, volume_list=None, **kwargs):
+ super(Client, self).__init__(**kwargs)
+ vfiler = kwargs.get('vfiler', None)
+ self.connection.set_vfiler(vfiler)
+
+ (major, minor) = self.get_ontapi_version(cached=False)
+ self.connection.set_api_version(major, minor)
- def __init__(self, connection, volume_list=None):
- super(Client, self).__init__(connection)
self.volume_list = volume_list
def _invoke_vfiler_api(self, na_element, vfiler):
return result.get_child_content('node-name')
def get_lun_list(self):
- """Gets the list of luns on filer."""
+ """Gets the list of LUNs on filer."""
lun_list = []
if self.volume_list:
for vol in self.volume_list:
if luns:
lun_list.extend(luns)
except netapp_api.NaApiError:
- LOG.warning(_LW("Error finding luns for volume %s."
- " Verify volume exists.") % (vol))
+ LOG.warning(_LW("Error finding LUNs for volume %s."
+ " Verify volume exists.") % vol)
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
return lun_list
def _get_vol_luns(self, vol_name):
- """Gets the luns for a volume."""
+ """Gets the LUNs for a volume."""
api = netapp_api.NaElement('lun-list-info')
if vol_name:
api.add_new_child('volume-name', vol_name)
zbc = block_count
if z_calls == 0:
z_calls = 1
- for call in range(0, z_calls):
+ for _call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
bc_limit = 2 ** 24 # 8GB
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
- for segment in range(0, segments):
+ for _segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
clone_ops_info.get_child_content('reason'))
def get_lun_by_args(self, **args):
- """Retrieves luns with specified args."""
+ """Retrieves LUNs with specified args."""
lun_info = netapp_api.NaElement.create_node_with_children(
'lun-list-info', **args)
result = self.connection.invoke_successfully(lun_info, True)
return luns.get_children()
def get_filer_volumes(self, volume=None):
- """Returns list of filer volumes in api format."""
+ """Returns list of filer volumes in API format."""
vol_request = netapp_api.NaElement('volume-list-info')
res = self.connection.invoke_successfully(vol_request, True)
volumes = res.get_child_by_name('volumes')
-# Copyright (c) - 2014, Alex Meade. All rights reserved.
-# All Rights Reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+import socket
import sys
from oslo.utils import excutils
+from oslo.utils import timeutils
import six
from cinder.i18n import _LE, _LW, _LI
from cinder.openstack.common import log as logging
-from cinder.volume.drivers.netapp import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
+from cinder.volume.drivers.netapp.dataontap.client.api import NaElement
+from cinder.volume.drivers.netapp.dataontap.client.api import NaServer
LOG = logging.getLogger(__name__)
class Client(object):
- def __init__(self, connection):
- self.connection = connection
+ def __init__(self, **kwargs):
+ self.connection = NaServer(host=kwargs['hostname'],
+ transport_type=kwargs['transport_type'],
+ port=kwargs['port'],
+ username=kwargs['username'],
+ password=kwargs['password'])
- def get_ontapi_version(self):
+ def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
+
+ if cached:
+ return self.connection.get_api_version()
+
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
- return (major, minor)
+ return major, minor
+
+ def get_connection(self):
+ return self.connection
+
+ def check_is_naelement(self, elem):
+ """Checks if object is instance of NaElement."""
+ if not isinstance(elem, NaElement):
+ raise ValueError('Expects NaElement')
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group=None):
LOG.error(msg % msg_args)
def destroy_lun(self, path, force=True):
- """Destroys the lun at the path."""
+ """Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
LOG.debug("Destroyed LUN %s" % seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
- """Maps lun to the initiator and returns lun id assigned."""
+ """Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
except netapp_api.NaApiError as e:
code = e.code
message = e.message
- msg = _LW('Error mapping lun. Code :%(code)s, Message:%(message)s')
+ msg = _LW('Error mapping LUN. Code :%(code)s, Message:%(message)s')
msg_fmt = {'code': code, 'message': message}
LOG.warning(msg % msg_fmt)
raise
def unmap_lun(self, path, igroup_name):
- """Unmaps a lun from given initiator."""
+ """Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
- msg = _LW("Error unmapping lun. Code :%(code)s,"
+ msg = _LW("Error unmapping LUN. Code :%(code)s,"
" Message:%(message)s")
msg_fmt = {'code': e.code, 'message': e.message}
exc_info = sys.exc_info()
LOG.warning(msg % msg_fmt)
- # if the lun is already unmapped
+ # if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
- """Resize the lun."""
+ """Resize the LUN."""
seg = path.split("/")
- LOG.info(_LI("Resizing lun %s directly to new size."), seg[-1])
+ LOG.info(_LI("Resizing LUN %s directly to new size."), seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
- """Gets the lun geometry."""
+ """Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
- LOG.error(_LE("Lun %(path)s geometry failed. Message - %(msg)s")
+ LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s")
% {'path': path, 'msg': e.message})
return geometry
return opts
def move_lun(self, path, new_path):
- """Moves the lun at path to new path."""
+ """Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
- LOG.debug("Moving lun %(name)s to %(new_name)s."
+ LOG.debug("Moving LUN %(name)s to %(new_name)s."
% {'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
raise NotImplementedError()
def get_lun_list(self):
- """Gets the list of luns on filer."""
+ """Gets the list of LUNs on filer."""
raise NotImplementedError()
def get_igroup_by_initiator(self, initiator):
raise NotImplementedError()
def get_lun_by_args(self, **args):
- """Retrieves luns with specified args."""
+ """Retrieves LUNs with specified args."""
raise NotImplementedError()
+
+ def provide_ems(self, requester, netapp_backend, app_version,
+ server_type="cluster"):
+ """Provide ems with volume stats for the requester.
+
+ :param server_type: cluster or 7mode.
+ """
+ def _create_ems(netapp_backend, app_version, server_type):
+ """Create ems API request."""
+ ems_log = NaElement('ems-autosupport-log')
+ host = socket.getfqdn() or 'Cinder_node'
+ if server_type == "cluster":
+ dest = "cluster node"
+ else:
+ dest = "7 mode controller"
+ ems_log.add_new_child('computer-name', host)
+ ems_log.add_new_child('event-id', '0')
+ ems_log.add_new_child('event-source',
+ 'Cinder driver %s' % netapp_backend)
+ ems_log.add_new_child('app-version', app_version)
+ ems_log.add_new_child('category', 'provisioning')
+ ems_log.add_new_child('event-description',
+ 'OpenStack Cinder connected to %s' % dest)
+ ems_log.add_new_child('log-level', '6')
+ ems_log.add_new_child('auto-support', 'false')
+ return ems_log
+
+ def _create_vs_get():
+ """Create vs_get API request."""
+ vs_get = NaElement('vserver-get-iter')
+ vs_get.add_new_child('max-records', '1')
+ query = NaElement('query')
+ query.add_node_with_children('vserver-info',
+ **{'vserver-type': 'node'})
+ vs_get.add_child_elem(query)
+ desired = NaElement('desired-attributes')
+ desired.add_node_with_children(
+ 'vserver-info', **{'vserver-name': '', 'vserver-type': ''})
+ vs_get.add_child_elem(desired)
+ return vs_get
+
+ def _get_cluster_node(na_server):
+ """Get the cluster node for ems."""
+ na_server.set_vserver(None)
+ vs_get = _create_vs_get()
+ res = na_server.invoke_successfully(vs_get)
+ if (res.get_child_content('num-records') and
+ int(res.get_child_content('num-records')) > 0):
+ attr_list = res.get_child_by_name('attributes-list')
+ vs_info = attr_list.get_child_by_name('vserver-info')
+ vs_name = vs_info.get_child_content('vserver-name')
+ return vs_name
+ return None
+
+ do_ems = True
+ if hasattr(requester, 'last_ems'):
+ sec_limit = 3559
+ if not (timeutils.is_older_than(requester.last_ems, sec_limit)):
+ do_ems = False
+ if do_ems:
+ na_server = copy.copy(self.connection)
+ na_server.set_timeout(25)
+ ems = _create_ems(netapp_backend, app_version, server_type)
+ try:
+ if server_type == "cluster":
+ api_version = na_server.get_api_version()
+ if api_version:
+ major, minor = api_version
+ else:
+ raise NaApiError(code='Not found',
+ message='No API version found')
+ if major == 1 and minor > 15:
+ node = getattr(requester, 'vserver', None)
+ else:
+ node = _get_cluster_node(na_server)
+ if node is None:
+ raise NaApiError(code='Not found',
+ message='No vserver found')
+ na_server.set_vserver(node)
+ else:
+ na_server.set_vfiler(None)
+ na_server.invoke_successfully(ems, True)
+ LOG.debug("ems executed successfully.")
+ except NaApiError as e:
+ LOG.warning(_LW("Failed to invoke ems. Message : %s") % e)
+ finally:
+ requester.last_ems = timeutils.utcnow()
-# Copyright (c) - 2014, Alex Meade. All rights reserved.
-# All Rights Reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
-from cinder.volume.drivers.netapp import api as netapp_api
-from cinder.volume.drivers.netapp.client import base
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
-class Client(base.Client):
+class Client(client_base.Client):
- def __init__(self, connection, vserver):
- super(Client, self).__init__(connection)
- self.vserver = vserver
+ def __init__(self, **kwargs):
+ super(Client, self).__init__(**kwargs)
+ self.vserver = kwargs.get('vserver', None)
+ self.connection.set_vserver(self.vserver)
+
+ # Default values to run first api
+ self.connection.set_api_version(1, 15)
+ (major, minor) = self.get_ontapi_version(cached=False)
+ self.connection.set_api_version(major, minor)
def _invoke_vserver_api(self, na_element, vserver):
server = copy.copy(self.connection)
result = server.invoke_successfully(na_element, True)
return result
+ def set_vserver(self, vserver):
+ self.connection.set_vserver(vserver)
+
def get_target_details(self):
"""Gets the target portal details."""
iscsi_if_iter = netapp_api.NaElement('iscsi-interface-get-iter')
return None
def get_lun_list(self):
- """Gets the list of luns on filer.
+ """Gets the list of LUNs on filer.
- Gets the luns from cluster with vserver.
+ Gets the LUNs from cluster with vserver.
"""
luns = []
return luns
def get_lun_map(self, path):
- """Gets the lun map by lun path."""
+ """Gets the LUN map by LUN path."""
tag = None
map_list = []
while True:
zbc = block_count
if z_calls == 0:
z_calls = 1
- for call in range(0, z_calls):
+ for _call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
block_ranges = netapp_api.NaElement("block-ranges")
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
- for segment in range(0, segments):
+ for _segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
self.connection.invoke_successfully(clone_create, True)
def get_lun_by_args(self, **args):
- """Retrieves lun with specified args."""
+ """Retrieves LUN with specified args."""
lun_iter = netapp_api.NaElement('lun-get-iter')
lun_iter.add_new_child('max-records', '100')
query = netapp_api.NaElement('query')
return attr_list.get_children()
def file_assign_qos(self, flex_vol, qos_policy_group, file_path):
- """Retrieves lun with specified args."""
+ """Retrieves LUN with specified args."""
file_assign_qos = netapp_api.NaElement.create_node_with_children(
'file-assign-qos',
**{'volume': flex_vol,
query = netapp_api.NaElement('query')
net_if_iter.add_child_elem(query)
query.add_node_with_children(
- 'net-interface-info', **{'address': na_utils.resolve_hostname(ip)})
+ 'net-interface-info',
+ **{'address': na_utils.resolve_hostname(ip)})
result = self.connection.invoke_successfully(net_if_iter, True)
num_records = result.get_child_content('num-records')
if num_records and int(num_records) >= 1:
LOG.debug('file-usage for path %(path)s is %(bytes)s'
% {'path': path, 'bytes': unique_bytes})
return unique_bytes
+
+ def get_vserver_ips(self, vserver):
+ """Get ips for the vserver."""
+ result = netapp_api.invoke_api(
+ self.connection, api_name='net-interface-get-iter',
+ is_iter=True, tunnel=vserver)
+ if_list = []
+ for res in result:
+ records = res.get_child_content('num-records')
+ if records > 0:
+ attr_list = res['attributes-list']
+ ifs = attr_list.get_children()
+ if_list.extend(ifs)
+ return if_list
+
+ def check_apis_on_cluster(self, api_list=None):
+ """Checks API availability and permissions on cluster.
+
+ Checks API availability and permissions for executing user.
+ Returns a list of failed apis.
+ """
+ api_list = api_list or []
+ failed_apis = []
+ if api_list:
+ api_version = self.connection.get_api_version()
+ if api_version:
+ major, minor = api_version
+ if major == 1 and minor < 20:
+ for api_name in api_list:
+ na_el = netapp_api.NaElement(api_name)
+ try:
+ self.connection.invoke_successfully(na_el)
+ except Exception as e:
+ if isinstance(e, netapp_api.NaApiError):
+ if (e.code == netapp_api.NaErrors
+ ['API_NOT_FOUND'].code or
+ e.code == netapp_api.NaErrors
+ ['INSUFFICIENT_PRIVS'].code):
+ failed_apis.append(api_name)
+ elif major == 1 and minor >= 20:
+ failed_apis = copy.copy(api_list)
+ result = netapp_api.invoke_api(
+ self.connection,
+ api_name='system-user-capability-get-iter',
+ api_family='cm',
+ additional_elems=None,
+ is_iter=True)
+ for res in result:
+ attr_list = res.get_child_by_name('attributes-list')
+ if attr_list:
+ capabilities = attr_list.get_children()
+ for capability in capabilities:
+ op_list = capability.get_child_by_name(
+ 'operation-list')
+ if op_list:
+ ops = op_list.get_children()
+ for op in ops:
+ apis = op.get_child_content(
+ 'api-name')
+ if apis:
+ api_list = apis.split(',')
+ for api_name in api_list:
+ if (api_name and
+ api_name.strip()
+ in failed_apis):
+ failed_apis.remove(
+ api_name)
+ else:
+ continue
+ else:
+ msg = _("Unsupported Clustered Data ONTAP version.")
+ raise exception.VolumeBackendAPIException(data=msg)
+ else:
+ msg = _("Data ONTAP API version could not be determined.")
+ raise exception.VolumeBackendAPIException(data=msg)
+ return failed_apis
--- /dev/null
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver for NetApp Data ONTAP (7-mode) iSCSI storage systems.
+"""
+
+from cinder.openstack.common import log as logging
+from cinder.volume import driver
+from cinder.volume.drivers.netapp.dataontap.block_7mode import \
+ NetAppBlockStorage7modeLibrary as lib_7mode
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetApp7modeISCSIDriver(driver.ISCSIDriver):
+ """NetApp 7-mode iSCSI volume driver."""
+
+ DRIVER_NAME = 'NetApp_iSCSI_7mode_direct'
+
+ def __init__(self, *args, **kwargs):
+ super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs)
+ self.library = lib_7mode(self.DRIVER_NAME, 'iSCSI', **kwargs)
+
+ def do_setup(self, context):
+ self.library.do_setup(context)
+
+ def check_for_setup_error(self):
+ self.library.check_for_setup_error()
+
+ def create_volume(self, volume):
+ self.library.create_volume(volume)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ self.library.create_volume_from_snapshot(volume, snapshot)
+
+ def create_cloned_volume(self, volume, src_vref):
+ self.library.create_cloned_volume(volume, src_vref)
+
+ def delete_volume(self, volume):
+ self.library.delete_volume(volume)
+
+ def create_snapshot(self, snapshot):
+ self.library.create_snapshot(snapshot)
+
+ def delete_snapshot(self, snapshot):
+ self.library.delete_snapshot(snapshot)
+
+ def get_volume_stats(self, refresh=False):
+ return self.library.get_volume_stats(refresh)
+
+ def extend_volume(self, volume, new_size):
+ self.library.extend_volume(volume, new_size)
+
+ def ensure_export(self, context, volume):
+ return self.library.ensure_export(context, volume)
+
+ def create_export(self, context, volume):
+ return self.library.create_export(context, volume)
+
+ def remove_export(self, context, volume):
+ self.library.remove_export(context, volume)
+
+ def initialize_connection(self, volume, connector):
+ return self.library.initialize_connection_iscsi(volume, connector)
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ return self.library.terminate_connection_iscsi(volume, connector,
+ **kwargs)
+
+ def get_pool(self, volume):
+ return self.library.get_pool(volume)
--- /dev/null
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver for NetApp Data ONTAP (C-mode) iSCSI storage systems.
+"""
+
+from cinder.openstack.common import log as logging
+from cinder.volume import driver
+from cinder.volume.drivers.netapp.dataontap.block_cmode import \
+ NetAppBlockStorageCmodeLibrary as lib_cmode
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
+ """NetApp C-mode iSCSI volume driver."""
+
+ DRIVER_NAME = 'NetApp_iSCSI_Cluster_direct'
+
+ def __init__(self, *args, **kwargs):
+ super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs)
+ self.library = lib_cmode(self.DRIVER_NAME, 'iSCSI', **kwargs)
+
+ def do_setup(self, context):
+ self.library.do_setup(context)
+
+ def check_for_setup_error(self):
+ self.library.check_for_setup_error()
+
+ def create_volume(self, volume):
+ self.library.create_volume(volume)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ self.library.create_volume_from_snapshot(volume, snapshot)
+
+ def create_cloned_volume(self, volume, src_vref):
+ self.library.create_cloned_volume(volume, src_vref)
+
+ def delete_volume(self, volume):
+ self.library.delete_volume(volume)
+
+ def create_snapshot(self, snapshot):
+ self.library.create_snapshot(snapshot)
+
+ def delete_snapshot(self, snapshot):
+ self.library.delete_snapshot(snapshot)
+
+ def get_volume_stats(self, refresh=False):
+ return self.library.get_volume_stats(refresh)
+
+ def extend_volume(self, volume, new_size):
+ self.library.extend_volume(volume, new_size)
+
+ def ensure_export(self, context, volume):
+ return self.library.ensure_export(context, volume)
+
+ def create_export(self, context, volume):
+ return self.library.create_export(context, volume)
+
+ def remove_export(self, context, volume):
+ self.library.remove_export(context, volume)
+
+ def initialize_connection(self, volume, connector):
+ return self.library.initialize_connection_iscsi(volume, connector)
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ return self.library.terminate_connection_iscsi(volume, connector,
+ **kwargs)
+
+ def get_pool(self, volume):
+ return self.library.get_pool(volume)
--- /dev/null
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Bob Callaway. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver for NetApp NFS storage.
+"""
+
+from oslo.utils import units
+import six
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI
+from cinder.openstack.common import log as logging
+from cinder.volume.drivers.netapp.dataontap.client import client_7mode
+from cinder.volume.drivers.netapp.dataontap import nfs_base
+from cinder.volume.drivers.netapp import utils as na_utils
+from cinder.volume import utils as volume_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
+ """NetApp NFS driver for Data ONTAP (7-mode)."""
+
+ def __init__(self, *args, **kwargs):
+ super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs)
+
+ def do_setup(self, context):
+ """Do the customized set up on client if any for 7 mode."""
+ super(NetApp7modeNfsDriver, self).do_setup(context)
+
+ self.zapi_client = client_7mode.Client(
+ transport_type=self.configuration.netapp_transport_type,
+ username=self.configuration.netapp_login,
+ password=self.configuration.netapp_password,
+ hostname=self.configuration.netapp_server_hostname,
+ port=self.configuration.netapp_server_port)
+
+ def check_for_setup_error(self):
+ """Checks if setup occurred properly."""
+ api_version = self.zapi_client.get_ontapi_version()
+ if api_version:
+ major, minor = api_version
+ if major == 1 and minor < 9:
+ msg = _("Unsupported Data ONTAP version."
+ " Data ONTAP version 7.3.1 and above is supported.")
+ raise exception.VolumeBackendAPIException(data=msg)
+ else:
+ msg = _("Data ONTAP API version could not be determined.")
+ raise exception.VolumeBackendAPIException(data=msg)
+ super(NetApp7modeNfsDriver, self).check_for_setup_error()
+
+ def create_volume(self, volume):
+ """Creates a volume.
+
+ :param volume: volume reference
+ """
+ LOG.debug('create_volume on %s' % volume['host'])
+ self._ensure_shares_mounted()
+
+ # get share as pool name
+ share = volume_utils.extract_host(volume['host'], level='pool')
+
+ if share is None:
+ msg = _("Pool is not available in the volume host field.")
+ raise exception.InvalidHost(reason=msg)
+
+ volume['provider_location'] = share
+ LOG.info(_LI('Creating volume at location %s')
+ % volume['provider_location'])
+
+ try:
+ self._do_create_volume(volume)
+ except Exception as ex:
+ LOG.error(_LE("Exception creating vol %(name)s on "
+ "share %(share)s. Details: %(ex)s")
+ % {'name': volume['name'],
+ 'share': volume['provider_location'],
+ 'ex': six.text_type(ex)})
+ msg = _("Volume %s could not be created on shares.")
+ raise exception.VolumeBackendAPIException(
+ data=msg % (volume['name']))
+
+ return {'provider_location': volume['provider_location']}
+
+ def _clone_volume(self, volume_name, clone_name,
+ volume_id, share=None):
+ """Clones mounted volume with NetApp filer."""
+ (_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
+ storage_path = self.zapi_client.get_actual_path_for_export(export_path)
+ target_path = '%s/%s' % (storage_path, clone_name)
+ self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name),
+ target_path)
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from vserver."""
+
+ self._ensure_shares_mounted()
+
+ LOG.debug('Updating volume stats')
+ data = {}
+ netapp_backend = 'NetApp_NFS_7mode_direct'
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ data['volume_backend_name'] = backend_name or netapp_backend
+ data['vendor_name'] = 'NetApp'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = 'nfs'
+ data['pools'] = self._get_pool_stats()
+
+ self._spawn_clean_cache_job()
+ self.zapi_client.provide_ems(self, netapp_backend, self._app_version,
+ server_type="7mode")
+ self._stats = data
+
+ def _get_pool_stats(self):
+ """Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
+
+ pools = []
+
+ for nfs_share in self._mounted_shares:
+
+ capacity = self._get_extended_capacity_info(nfs_share)
+
+ pool = dict()
+ pool['pool_name'] = nfs_share
+ pool['QoS_support'] = False
+ pool['reserved_percentage'] = 0
+
+ # Report pool as reserved when over the configured used_ratio
+ if capacity['used_ratio'] > self.configuration.nfs_used_ratio:
+ pool['reserved_percentage'] = 100
+
+ # Report pool as reserved when over the subscribed ratio
+ if capacity['subscribed_ratio'] >=\
+ self.configuration.nfs_oversub_ratio:
+ pool['reserved_percentage'] = 100
+
+ # convert sizes to GB
+ total = float(capacity['apparent_size']) / units.Gi
+ pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
+
+ free = float(capacity['apparent_available']) / units.Gi
+ pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
+
+ pools.append(pool)
+
+ return pools
+
+ def _shortlist_del_eligible_files(self, share, old_files):
+ """Prepares list of eligible files to be deleted from cache."""
+ file_list = []
+ exp_volume = self.zapi_client.get_actual_path_for_export(share)
+ for file in old_files:
+ path = '/vol/%s/%s' % (exp_volume, file)
+ u_bytes = self.zapi_client.get_file_usage(path)
+ file_list.append((file, u_bytes))
+ LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
+ return file_list
+
+ def _is_filer_ip(self, ip):
+ """Checks whether ip is on the same filer."""
+ try:
+ ifconfig = self.zapi_client.get_ifconfig()
+ if_info = ifconfig.get_child_by_name('interface-config-info')
+ if if_info:
+ ifs = if_info.get_children()
+ for intf in ifs:
+ v4_addr = intf.get_child_by_name('v4-primary-address')
+ if v4_addr:
+ ip_info = v4_addr.get_child_by_name('ip-address-info')
+ if ip_info:
+ address = ip_info.get_child_content('address')
+ if ip == address:
+ return True
+ else:
+ continue
+ except Exception:
+ return False
+ return False
+
+ def _share_match_for_ip(self, ip, shares):
+ """Returns the share that is served by ip.
+
+ Multiple shares can have same dir path but
+ can be served using different ips. It finds the
+ share which is served by ip on same nfs server.
+ """
+ if self._is_filer_ip(ip) and shares:
+ for share in shares:
+ ip_sh = share.split(':')[0]
+ if self._is_filer_ip(ip_sh):
+ LOG.debug('Share match found for ip %s', ip)
+ return share
+ LOG.debug('No share match found for ip %s', ip)
+ return None
+
+ def _is_share_vol_compatible(self, volume, share):
+ """Checks if share is compatible with volume to host it."""
+ return self._is_share_eligible(share, volume['size'])
--- /dev/null
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Bob Callaway. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver for NetApp NFS storage.
+"""
+
+import os
+import re
+from threading import Timer
+import time
+
+from oslo.concurrency import processutils
+from oslo.utils import excutils
+from oslo.utils import units
+import six.moves.urllib.parse as urlparse
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder.image import image_utils
+from cinder.openstack.common import log as logging
+from cinder import utils
+from cinder.volume.drivers.netapp import options as na_opts
+from cinder.volume.drivers.netapp import utils as na_utils
+from cinder.volume.drivers import nfs
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetAppNfsDriver(nfs.NfsDriver):
+ """Base class for NetApp NFS driver for Data ONTAP."""
+
+ # do not increment this as it may be used in volume type definitions
+ VERSION = "1.0.0"
+ REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
+ 'netapp_server_hostname']
+
+ def __init__(self, *args, **kwargs):
+ na_utils.validate_instantiation(**kwargs)
+ self._execute = None
+ self._context = None
+ self._app_version = kwargs.pop("app_version", "unknown")
+ super(NetAppNfsDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(na_opts.netapp_connection_opts)
+ self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
+ self.configuration.append_config_values(na_opts.netapp_transport_opts)
+ self.configuration.append_config_values(na_opts.netapp_img_cache_opts)
+
+ def set_execute(self, execute):
+ self._execute = execute
+
+ def do_setup(self, context):
+ super(NetAppNfsDriver, self).do_setup(context)
+ self._context = context
+ na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
+
+ def check_for_setup_error(self):
+ """Returns an error if prerequisites aren't met."""
+ super(NetAppNfsDriver, self).check_for_setup_error()
+
+ def get_pool(self, volume):
+ """Return pool name where volume resides.
+
+ :param volume: The volume hosted by the driver.
+ :return: Name of the pool where given volume is hosted.
+ """
+ return volume['provider_location']
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot."""
+ vol_size = volume.size
+ snap_size = snapshot.volume_size
+
+ self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
+ share = self._get_volume_location(snapshot.volume_id)
+ volume['provider_location'] = share
+ path = self.local_path(volume)
+ run_as_root = self._execute_as_root
+
+ if self._discover_file_till_timeout(path):
+ self._set_rw_permissions(path)
+ if vol_size != snap_size:
+ try:
+ self.extend_volume(volume, vol_size)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(
+ _LE("Resizing %s failed. Cleaning volume."),
+ volume.name)
+ self._execute('rm', path, run_as_root=run_as_root)
+ else:
+ raise exception.CinderException(
+ _("NFS file %s not discovered.") % volume['name'])
+
+ return {'provider_location': volume['provider_location']}
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot."""
+ self._clone_volume(snapshot['volume_name'],
+ snapshot['name'],
+ snapshot['volume_id'])
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot."""
+ nfs_mount = self._get_provider_location(snapshot.volume_id)
+
+ if self._volume_not_present(nfs_mount, snapshot.name):
+ return True
+
+ self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
+ run_as_root=self._execute_as_root)
+
+ def _get_volume_location(self, volume_id):
+ """Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>."""
+ nfs_server_ip = self._get_host_ip(volume_id)
+ export_path = self._get_export_path(volume_id)
+ return nfs_server_ip + ':' + export_path
+
+ def _clone_volume(self, volume_name, clone_name, volume_id, share=None):
+ """Clones mounted volume using NetApp API."""
+ raise NotImplementedError()
+
+ def _get_provider_location(self, volume_id):
+ """Returns provider location for given volume."""
+ volume = self.db.volume_get(self._context, volume_id)
+ return volume.provider_location
+
+ def _get_host_ip(self, volume_id):
+ """Returns IP address for the given volume."""
+ return self._get_provider_location(volume_id).split(':')[0]
+
+ def _get_export_path(self, volume_id):
+ """Returns NFS export path for the given volume."""
+ return self._get_provider_location(volume_id).split(':')[1]
+
+ def _volume_not_present(self, nfs_mount, volume_name):
+ """Check if volume exists."""
+ try:
+ self._try_execute('ls', self._get_volume_path(nfs_mount,
+ volume_name))
+ except processutils.ProcessExecutionError:
+ # If the volume isn't present
+ return True
+ return False
+
+ def _try_execute(self, *command, **kwargs):
+ # NOTE(vish): Volume commands can partially fail due to timing, but
+ # running them a second time on failure will usually
+ # recover nicely.
+ tries = 0
+ while True:
+ try:
+ self._execute(*command, **kwargs)
+ return True
+ except processutils.ProcessExecutionError:
+ tries += 1
+ if tries >= self.configuration.num_shell_tries:
+ raise
+ LOG.exception(_LE("Recovering from a failed execute. "
+ "Try number %s"), tries)
+ time.sleep(tries ** 2)
+
+ def _get_volume_path(self, nfs_share, volume_name):
+ """Get volume path (local fs path) for given volume name on given nfs
+ share.
+
+ @param nfs_share string, example 172.18.194.100:/var/nfs
+ @param volume_name string,
+ example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
+ """
+
+ return os.path.join(self._get_mount_point_for_share(nfs_share),
+ volume_name)
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Creates a clone of the specified volume."""
+ vol_size = volume.size
+ src_vol_size = src_vref.size
+ self._clone_volume(src_vref.name, volume.name, src_vref.id)
+ share = self._get_volume_location(src_vref.id)
+ volume['provider_location'] = share
+ path = self.local_path(volume)
+
+ if self._discover_file_till_timeout(path):
+ self._set_rw_permissions(path)
+ if vol_size != src_vol_size:
+ try:
+ self.extend_volume(volume, vol_size)
+ except Exception as e:
+ LOG.error(
+ _LE("Resizing %s failed. Cleaning volume."),
+ volume.name)
+ self._execute('rm', path,
+ run_as_root=self._execute_as_root)
+ raise e
+ else:
+ raise exception.CinderException(
+ _("NFS file %s not discovered.") % volume['name'])
+
+ return {'provider_location': volume['provider_location']}
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from volume group."""
+ raise NotImplementedError()
+
+ def copy_image_to_volume(self, context, volume, image_service, image_id):
+ """Fetch the image from image_service and write it to the volume."""
+ super(NetAppNfsDriver, self).copy_image_to_volume(
+ context, volume, image_service, image_id)
+ LOG.info(_LI('Copied image to volume %s using regular download.'),
+ volume['name'])
+ self._register_image_in_cache(volume, image_id)
+
+ def _register_image_in_cache(self, volume, image_id):
+ """Stores image in the cache."""
+ file_name = 'img-cache-%s' % image_id
+ LOG.info(_LI("Registering image in cache %s"), file_name)
+ try:
+ self._do_clone_rel_img_cache(
+ volume['name'], file_name,
+ volume['provider_location'], file_name)
+ except Exception as e:
+ LOG.warning(_LW('Exception while registering image %(image_id)s'
+ ' in cache. Exception: %(exc)s')
+ % {'image_id': image_id, 'exc': e.__str__()})
+
+ def _find_image_in_cache(self, image_id):
+ """Finds image in cache and returns list of shares with file name."""
+ result = []
+ if getattr(self, '_mounted_shares', None):
+ for share in self._mounted_shares:
+ dir = self._get_mount_point_for_share(share)
+ file_name = 'img-cache-%s' % image_id
+ file_path = '%s/%s' % (dir, file_name)
+ if os.path.exists(file_path):
+ LOG.debug('Found cache file for image %(image_id)s'
+ ' on share %(share)s'
+ % {'image_id': image_id, 'share': share})
+ result.append((share, file_name))
+ return result
+
+ def _do_clone_rel_img_cache(self, src, dst, share, cache_file):
+ """Do clone operation w.r.t image cache file."""
+ @utils.synchronized(cache_file, external=True)
+ def _do_clone():
+ dir = self._get_mount_point_for_share(share)
+ file_path = '%s/%s' % (dir, dst)
+ if not os.path.exists(file_path):
+ LOG.info(_LI('Cloning from cache to destination %s'), dst)
+ self._clone_volume(src, dst, volume_id=None, share=share)
+ _do_clone()
+
+ @utils.synchronized('clean_cache')
+ def _spawn_clean_cache_job(self):
+ """Spawns a clean task if not running."""
+ if getattr(self, 'cleaning', None):
+ LOG.debug('Image cache cleaning in progress. Returning... ')
+ return
+ else:
+ # Set cleaning to True
+ self.cleaning = True
+ t = Timer(0, self._clean_image_cache)
+ t.start()
+
+ def _clean_image_cache(self):
+ """Clean the image cache files in cache of space crunch."""
+ try:
+ LOG.debug('Image cache cleaning in progress.')
+ thres_size_perc_start =\
+ self.configuration.thres_avl_size_perc_start
+ thres_size_perc_stop = \
+ self.configuration.thres_avl_size_perc_stop
+ for share in getattr(self, '_mounted_shares', []):
+ try:
+ total_size, total_avl, _total_alc = \
+ self._get_capacity_info(share)
+ avl_percent = int((total_avl / total_size) * 100)
+ if avl_percent <= thres_size_perc_start:
+ LOG.info(_LI('Cleaning cache for share %s.'), share)
+ eligible_files = self._find_old_cache_files(share)
+ threshold_size = int(
+ (thres_size_perc_stop * total_size) / 100)
+ bytes_to_free = int(threshold_size - total_avl)
+ LOG.debug('Files to be queued for deletion %s',
+ eligible_files)
+ self._delete_files_till_bytes_free(
+ eligible_files, share, bytes_to_free)
+ else:
+ continue
+ except Exception as e:
+ LOG.warning(_LW('Exception during cache cleaning'
+ ' %(share)s. Message - %(ex)s')
+ % {'share': share, 'ex': e.__str__()})
+ continue
+ finally:
+ LOG.debug('Image cache cleaning done.')
+ self.cleaning = False
+
+ def _shortlist_del_eligible_files(self, share, old_files):
+ """Prepares list of eligible files to be deleted from cache."""
+ raise NotImplementedError()
+
+ def _find_old_cache_files(self, share):
+ """Finds the old files in cache."""
+ mount_fs = self._get_mount_point_for_share(share)
+ threshold_minutes = self.configuration.expiry_thres_minutes
+ cmd = ['find', mount_fs, '-maxdepth', '1', '-name',
+ 'img-cache*', '-amin', '+%s' % threshold_minutes]
+ res, _err = self._execute(*cmd, run_as_root=self._execute_as_root)
+ if res:
+ old_file_paths = res.strip('\n').split('\n')
+ mount_fs_len = len(mount_fs)
+ old_files = [x[mount_fs_len + 1:] for x in old_file_paths]
+ eligible_files = self._shortlist_del_eligible_files(
+ share, old_files)
+ return eligible_files
+ return []
+
+ def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0):
+ """Delete files from disk till bytes are freed or list exhausted."""
+ LOG.debug('Bytes to free %s', bytes_to_free)
+ if file_list and bytes_to_free > 0:
+ sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True)
+ mount_fs = self._get_mount_point_for_share(share)
+ for f in sorted_files:
+ if f:
+ file_path = '%s/%s' % (mount_fs, f[0])
+ LOG.debug('Delete file path %s', file_path)
+
+ @utils.synchronized(f[0], external=True)
+ def _do_delete():
+ if self._delete_file(file_path):
+ return True
+ return False
+
+ if _do_delete():
+ bytes_to_free -= int(f[1])
+ if bytes_to_free <= 0:
+ return
+
+ def _delete_file(self, path):
+ """Delete file from disk and return result as boolean."""
+ try:
+ LOG.debug('Deleting file at path %s', path)
+ cmd = ['rm', '-f', path]
+ self._execute(*cmd, run_as_root=self._execute_as_root)
+ return True
+ except Exception as ex:
+ LOG.warning(_LW('Exception during deleting %s'), ex.__str__())
+ return False
+
+ def clone_image(self, volume, image_location, image_id, image_meta):
+ """Create a volume efficiently from an existing image.
+
+ image_location is a string whose format depends on the
+ image service backend in use. The driver should use it
+ to determine whether cloning is possible.
+
+ image_id is a string which represents id of the image.
+ It can be used by the driver to introspect internal
+ stores or registry to do an efficient image clone.
+
+ Returns a dict of volume properties eg. provider_location,
+ boolean indicating whether cloning occurred.
+ """
+
+ cloned = False
+ post_clone = False
+ try:
+ cache_result = self._find_image_in_cache(image_id)
+ if cache_result:
+ cloned = self._clone_from_cache(volume, image_id, cache_result)
+ else:
+ cloned = self._direct_nfs_clone(volume, image_location,
+ image_id)
+ if cloned:
+ post_clone = self._post_clone_image(volume)
+ except Exception as e:
+ msg = e.msg if getattr(e, 'msg', None) else e.__str__()
+ LOG.info(_LI('Image cloning unsuccessful for image'
+ ' %(image_id)s. Message: %(msg)s')
+ % {'image_id': image_id, 'msg': msg})
+ vol_path = self.local_path(volume)
+ volume['provider_location'] = None
+ if os.path.exists(vol_path):
+ self._delete_file(vol_path)
+ finally:
+ cloned = cloned and post_clone
+ share = volume['provider_location'] if cloned else None
+ bootable = True if cloned else False
+ return {'provider_location': share, 'bootable': bootable}, cloned
+
+ def _clone_from_cache(self, volume, image_id, cache_result):
+ """Clones a copy from image cache."""
+ cloned = False
+ LOG.info(_LI('Cloning image %s from cache'), image_id)
+ for res in cache_result:
+ # Repeat tries in other shares if failed in some
+ (share, file_name) = res
+ LOG.debug('Cache share: %s', share)
+ if (share and
+ self._is_share_vol_compatible(volume, share)):
+ try:
+ self._do_clone_rel_img_cache(
+ file_name, volume['name'], share, file_name)
+ cloned = True
+ volume['provider_location'] = share
+ break
+ except Exception:
+ LOG.warning(_LW('Unexpected exception during'
+ ' image cloning in share %s'), share)
+ return cloned
+
+ def _direct_nfs_clone(self, volume, image_location, image_id):
+ """Clone directly in nfs share."""
+ LOG.info(_LI('Checking image clone %s from glance share.'), image_id)
+ cloned = False
+ image_location = self._construct_image_nfs_url(image_location)
+ share = self._is_cloneable_share(image_location)
+ run_as_root = self._execute_as_root
+
+ if share and self._is_share_vol_compatible(volume, share):
+ LOG.debug('Share is cloneable %s', share)
+ volume['provider_location'] = share
+ (__, ___, img_file) = image_location.rpartition('/')
+ dir_path = self._get_mount_point_for_share(share)
+ img_path = '%s/%s' % (dir_path, img_file)
+ img_info = image_utils.qemu_img_info(img_path,
+ run_as_root=run_as_root)
+ if img_info.file_format == 'raw':
+ LOG.debug('Image is raw %s', image_id)
+ self._clone_volume(
+ img_file, volume['name'],
+ volume_id=None, share=share)
+ cloned = True
+ else:
+ LOG.info(
+ _LI('Image will locally be converted to raw %s'),
+ image_id)
+ dst = '%s/%s' % (dir_path, volume['name'])
+ image_utils.convert_image(img_path, dst, 'raw',
+ run_as_root=run_as_root)
+ data = image_utils.qemu_img_info(dst, run_as_root=run_as_root)
+ if data.file_format != "raw":
+ raise exception.InvalidResults(
+ _("Converted to raw, but"
+ " format is now %s") % data.file_format)
+ else:
+ cloned = True
+ self._register_image_in_cache(
+ volume, image_id)
+ return cloned
+
+ def _post_clone_image(self, volume):
+ """Do operations post image cloning."""
+ LOG.info(_LI('Performing post clone for %s'), volume['name'])
+ vol_path = self.local_path(volume)
+ if self._discover_file_till_timeout(vol_path):
+ self._set_rw_permissions(vol_path)
+ self._resize_image_file(vol_path, volume['size'])
+ return True
+ raise exception.InvalidResults(
+ _("NFS file could not be discovered."))
+
+ def _resize_image_file(self, path, new_size):
+ """Resize the image file on share to new size."""
+ LOG.debug('Checking file for resize')
+ if self._is_file_size_equal(path, new_size):
+ return
+ else:
+ LOG.info(_LI('Resizing file to %sG'), new_size)
+ image_utils.resize_image(path, new_size,
+ run_as_root=self._execute_as_root)
+ if self._is_file_size_equal(path, new_size):
+ return
+ else:
+ raise exception.InvalidResults(
+ _('Resizing image file failed.'))
+
+ def _is_file_size_equal(self, path, size):
+ """Checks if file size at path is equal to size."""
+ data = image_utils.qemu_img_info(path,
+ run_as_root=self._execute_as_root)
+ virt_size = data.virtual_size / units.Gi
+ if virt_size == size:
+ return True
+ else:
+ return False
+
+ def _discover_file_till_timeout(self, path, timeout=45):
+ """Checks if file size at path is equal to size."""
+ # Sometimes nfs takes time to discover file
+ # Retrying in case any unexpected situation occurs
+ retry_seconds = timeout
+ sleep_interval = 2
+ while True:
+ if os.path.exists(path):
+ return True
+ else:
+ if retry_seconds <= 0:
+ LOG.warning(_LW('Discover file retries exhausted.'))
+ return False
+ else:
+ time.sleep(sleep_interval)
+ retry_seconds -= sleep_interval
+
+ def _is_cloneable_share(self, image_location):
+ """Finds if the image at location is cloneable."""
+ conn, dr = self._check_get_nfs_path_segs(image_location)
+ return self._check_share_in_use(conn, dr)
+
+ def _check_get_nfs_path_segs(self, image_location):
+ """Checks if the nfs path format is matched.
+
+ WebNFS url format with relative-path is supported.
+ Accepting all characters in path-names and checking
+ against the mounted shares which will contain only
+ allowed path segments. Returns connection and dir details.
+ """
+ conn, dr = None, None
+ if image_location:
+ nfs_loc_pattern = \
+ ('^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)'
+ '*(/[^\/\\\\]+)$)')
+ matched = re.match(nfs_loc_pattern, image_location, flags=0)
+ if not matched:
+ LOG.debug('Image location not in the'
+ ' expected format %s', image_location)
+ else:
+ conn = matched.group(2)
+ dr = matched.group(3) or '/'
+ return conn, dr
+
+ def _share_match_for_ip(self, ip, shares):
+ """Returns the share that is served by ip.
+
+ Multiple shares can have same dir path but
+ can be served using different ips. It finds the
+ share which is served by ip on same nfs server.
+ """
+ raise NotImplementedError()
+
+ def _check_share_in_use(self, conn, dir):
+ """Checks if share is cinder mounted and returns it."""
+ try:
+ if conn:
+ host = conn.split(':')[0]
+ ip = na_utils.resolve_hostname(host)
+ share_candidates = []
+ for sh in self._mounted_shares:
+ sh_exp = sh.split(':')[1]
+ if sh_exp == dir:
+ share_candidates.append(sh)
+ if share_candidates:
+ LOG.debug('Found possible share matches %s',
+ share_candidates)
+ return self._share_match_for_ip(ip, share_candidates)
+ except Exception:
+ LOG.warning(_LW("Unexpected exception while "
+ "short listing used share."))
+ return None
+
+ def _construct_image_nfs_url(self, image_location):
+ """Construct direct url for nfs backend.
+
+ It creates direct url from image_location
+ which is a tuple with direct_url and locations.
+ Returns url with nfs scheme if nfs store
+ else returns url. It needs to be verified
+ by backend before use.
+ """
+
+ direct_url, locations = image_location
+ if not direct_url and not locations:
+ raise exception.NotFound(_('Image location not present.'))
+
+ # Locations will be always a list of one until
+ # bp multiple-image-locations is introduced
+ if not locations:
+ return direct_url
+ location = locations[0]
+ url = location['url']
+ if not location['metadata']:
+ return url
+ location_type = location['metadata'].get('type')
+ if not location_type or location_type.lower() != "nfs":
+ return url
+ share_location = location['metadata'].get('share_location')
+ mount_point = location['metadata'].get('mount_point')
+ if not share_location or not mount_point:
+ return url
+ url_parse = urlparse.urlparse(url)
+ abs_path = os.path.join(url_parse.netloc, url_parse.path)
+ rel_path = os.path.relpath(abs_path, mount_point)
+ direct_url = "%s/%s" % (share_location, rel_path)
+ return direct_url
+
+ def extend_volume(self, volume, new_size):
+ """Extend an existing volume to the new size."""
+ LOG.info(_LI('Extending volume %s.'), volume['name'])
+ path = self.local_path(volume)
+ self._resize_image_file(path, new_size)
+
+ def _is_share_vol_compatible(self, volume, share):
+ """Checks if share is compatible with volume to host it."""
+ raise NotImplementedError()
+
+ def _check_share_can_hold_size(self, share, size):
+ """Checks if volume can hold image with size."""
+ _tot_size, tot_available, _tot_allocated = self._get_capacity_info(
+ share)
+ if tot_available < size:
+ msg = _("Container size smaller than required file size.")
+ raise exception.VolumeDriverException(msg)
+
+ def _move_nfs_file(self, source_path, dest_path):
+ """Moves source to destination."""
+
+ @utils.synchronized(dest_path, external=True)
+ def _move_file(src, dst):
+ if os.path.exists(dst):
+ LOG.warning(_LW("Destination %s already exists."), dst)
+ return False
+ self._execute('mv', src, dst, run_as_root=self._execute_as_root)
+ return True
+
+ try:
+ return _move_file(source_path, dest_path)
+ except Exception as e:
+ LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s')
+ % {'src': source_path, 'e': e})
+ return False
+
+ def _get_export_ip_path(self, volume_id=None, share=None):
+ """Returns export ip and path.
+
+ One of volume id or share is used to return the values.
+ """
+
+ if volume_id:
+ host_ip = self._get_host_ip(volume_id)
+ export_path = self._get_export_path(volume_id)
+ elif share:
+ host_ip = share.split(':')[0]
+ export_path = share.split(':')[1]
+ else:
+ raise exception.InvalidInput(
+ 'A volume ID or share was not specified.')
+ return host_ip, export_path
+
+ def _get_extended_capacity_info(self, nfs_share):
+ """Returns an extended set of share capacity metrics."""
+
+ total_size, total_available, total_allocated = \
+ self._get_capacity_info(nfs_share)
+
+ used_ratio = (total_size - total_available) / total_size
+ subscribed_ratio = total_allocated / total_size
+ apparent_size = max(0, total_size * self.configuration.nfs_used_ratio)
+ apparent_available = max(0, apparent_size - total_allocated)
+
+ return {'total_size': total_size, 'total_available': total_available,
+ 'total_allocated': total_allocated, 'used_ratio': used_ratio,
+ 'subscribed_ratio': subscribed_ratio,
+ 'apparent_size': apparent_size,
+ 'apparent_available': apparent_available}
--- /dev/null
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2014 Bob Callaway. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Volume driver for NetApp NFS storage.
+"""
+
+import os
+import uuid
+
+from oslo.utils import units
+import six
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder.image import image_utils
+from cinder.openstack.common import log as logging
+from cinder import utils
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap import nfs_base
+from cinder.volume.drivers.netapp.dataontap import ssc_cmode
+from cinder.volume.drivers.netapp import options as na_opts
+from cinder.volume.drivers.netapp import utils as na_utils
+from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
+from cinder.volume import utils as volume_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
+ """NetApp NFS driver for Data ONTAP (Cluster-mode)."""
+
+ REQUIRED_CMODE_FLAGS = ['netapp_vserver']
+
+ def __init__(self, *args, **kwargs):
+ super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(na_opts.netapp_cluster_opts)
+ self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts)
+
+ def do_setup(self, context):
+ """Do the customized set up on client for cluster mode."""
+ super(NetAppCmodeNfsDriver, self).do_setup(context)
+ na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
+
+ self.vserver = self.configuration.netapp_vserver
+
+ self.zapi_client = client_cmode.Client(
+ transport_type=self.configuration.netapp_transport_type,
+ username=self.configuration.netapp_login,
+ password=self.configuration.netapp_password,
+ hostname=self.configuration.netapp_server_hostname,
+ port=self.configuration.netapp_server_port,
+ vserver=self.vserver)
+
+ self.ssc_enabled = True
+ self.ssc_vols = None
+ self.stale_vols = set()
+
+ def check_for_setup_error(self):
+ """Check that the driver is working and can communicate."""
+ super(NetAppCmodeNfsDriver, self).check_for_setup_error()
+ ssc_cmode.check_ssc_api_permissions(self.zapi_client)
+
+ def create_volume(self, volume):
+ """Creates a volume.
+
+ :param volume: volume reference
+ """
+ LOG.debug('create_volume on %s' % volume['host'])
+ self._ensure_shares_mounted()
+
+ # get share as pool name
+ share = volume_utils.extract_host(volume['host'], level='pool')
+
+ if share is None:
+ msg = _("Pool is not available in the volume host field.")
+ raise exception.InvalidHost(reason=msg)
+
+ extra_specs = get_volume_extra_specs(volume)
+ qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
+ if extra_specs else None
+
+ # warn on obsolete extra specs
+ na_utils.log_extra_spec_warnings(extra_specs)
+
+ try:
+ volume['provider_location'] = share
+ LOG.info(_LI('casted to %s') % volume['provider_location'])
+ self._do_create_volume(volume)
+ if qos_policy_group:
+ self._set_qos_policy_group_on_volume(volume, share,
+ qos_policy_group)
+ return {'provider_location': volume['provider_location']}
+ except Exception as ex:
+ LOG.error(_LW("Exception creating vol %(name)s on "
+ "share %(share)s. Details: %(ex)s")
+ % {'name': volume['name'],
+ 'share': volume['provider_location'],
+ 'ex': ex})
+ volume['provider_location'] = None
+ finally:
+ if self.ssc_enabled:
+ self._update_stale_vols(self._get_vol_for_share(share))
+
+ msg = _("Volume %s could not be created on shares.")
+ raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
+
+ def _set_qos_policy_group_on_volume(self, volume, share, qos_policy_group):
+ target_path = '%s' % (volume['name'])
+ export_path = share.split(':')[1]
+ flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver,
+ export_path)
+ self.zapi_client.file_assign_qos(flex_vol_name,
+ qos_policy_group,
+ target_path)
+
+ def _clone_volume(self, volume_name, clone_name,
+ volume_id, share=None):
+ """Clones mounted volume on NetApp Cluster."""
+ (vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
+ self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
+ vserver)
+ share = share if share else self._get_provider_location(volume_id)
+ self._post_prov_deprov_in_ssc(share)
+
+ def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
+ """Gets the vserver and export volume for share."""
+ (host_ip, export_path) = self._get_export_ip_path(volume_id, share)
+ ifs = self.zapi_client.get_if_info_by_ip(host_ip)
+ vserver = ifs[0].get_child_content('vserver')
+ exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver,
+ export_path)
+ return vserver, exp_volume
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from vserver."""
+
+ self._ensure_shares_mounted()
+ sync = True if self.ssc_vols is None else False
+ ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.connection,
+ self.vserver, synchronous=sync)
+
+ LOG.debug('Updating volume stats')
+ data = {}
+ netapp_backend = 'NetApp_NFS_Cluster_direct'
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ data['volume_backend_name'] = backend_name or netapp_backend
+ data['vendor_name'] = 'NetApp'
+ data['driver_version'] = self.VERSION
+ data['storage_protocol'] = 'nfs'
+ data['pools'] = self._get_pool_stats()
+
+ self._spawn_clean_cache_job()
+ self.zapi_client.provide_ems(self, netapp_backend, self._app_version)
+ self._stats = data
+
+ def _get_pool_stats(self):
+ """Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
+
+ pools = []
+
+ for nfs_share in self._mounted_shares:
+
+ capacity = self._get_extended_capacity_info(nfs_share)
+
+ pool = dict()
+ pool['pool_name'] = nfs_share
+ pool['QoS_support'] = False
+ pool['reserved_percentage'] = 0
+
+ # Report pool as reserved when over the configured used_ratio
+ if capacity['used_ratio'] > self.configuration.nfs_used_ratio:
+ pool['reserved_percentage'] = 100
+
+ # Report pool as reserved when over the subscribed ratio
+ if capacity['subscribed_ratio'] >=\
+ self.configuration.nfs_oversub_ratio:
+ pool['reserved_percentage'] = 100
+
+ # convert sizes to GB
+ total = float(capacity['apparent_size']) / units.Gi
+ pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
+
+ free = float(capacity['apparent_available']) / units.Gi
+ pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
+
+ # add SSC content if available
+ vol = self._get_vol_for_share(nfs_share)
+ if vol and self.ssc_vols:
+ pool['netapp_raid_type'] = vol.aggr['raid_type']
+ pool['netapp_disk_type'] = vol.aggr['disk_type']
+
+ mirrored = vol in self.ssc_vols['mirrored']
+ pool['netapp_mirrored'] = six.text_type(mirrored).lower()
+ pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
+
+ dedup = vol in self.ssc_vols['dedup']
+ pool['netapp_dedup'] = six.text_type(dedup).lower()
+ pool['netapp_nodedup'] = six.text_type(not dedup).lower()
+
+ compression = vol in self.ssc_vols['compression']
+ pool['netapp_compression'] = six.text_type(compression).lower()
+ pool['netapp_nocompression'] = six.text_type(
+ not compression).lower()
+
+ thin = vol in self.ssc_vols['thin']
+ pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
+ pool['netapp_thick_provisioned'] = six.text_type(
+ not thin).lower()
+
+ pools.append(pool)
+
+ return pools
+
+ @utils.synchronized('update_stale')
+ def _update_stale_vols(self, volume=None, reset=False):
+ """Populates stale vols with vol and returns set copy."""
+ if volume:
+ self.stale_vols.add(volume)
+ set_copy = self.stale_vols.copy()
+ if reset:
+ self.stale_vols.clear()
+ return set_copy
+
+ @utils.synchronized("refresh_ssc_vols")
+ def refresh_ssc_vols(self, vols):
+ """Refreshes ssc_vols with latest entries."""
+ if not self._mounted_shares:
+ LOG.warning(_LW("No shares found hence skipping ssc refresh."))
+ return
+ mnt_share_vols = set()
+ vs_ifs = self.zapi_client.get_vserver_ips(self.vserver)
+ for vol in vols['all']:
+ for sh in self._mounted_shares:
+ host = sh.split(':')[0]
+ junction = sh.split(':')[1]
+ ip = na_utils.resolve_hostname(host)
+ if (self._ip_in_ifs(ip, vs_ifs) and
+ junction == vol.id['junction_path']):
+ mnt_share_vols.add(vol)
+ vol.export['path'] = sh
+ break
+ for key in vols.keys():
+ vols[key] = vols[key] & mnt_share_vols
+ self.ssc_vols = vols
+
+ def _ip_in_ifs(self, ip, api_ifs):
+ """Checks if ip is listed for ifs in API format."""
+ if api_ifs is None:
+ return False
+ for ifc in api_ifs:
+ ifc_ip = ifc.get_child_content("address")
+ if ifc_ip == ip:
+ return True
+ return False
+
+ def _shortlist_del_eligible_files(self, share, old_files):
+ """Prepares list of eligible files to be deleted from cache."""
+ file_list = []
+ (vserver, exp_volume) = self._get_vserver_and_exp_vol(
+ volume_id=None, share=share)
+ for file in old_files:
+ path = '/vol/%s/%s' % (exp_volume, file)
+ u_bytes = self.zapi_client.get_file_usage(path, vserver)
+ file_list.append((file, u_bytes))
+ LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
+ return file_list
+
+ def _share_match_for_ip(self, ip, shares):
+ """Returns the share that is served by ip.
+
+ Multiple shares can have same dir path but
+ can be served using different ips. It finds the
+ share which is served by ip on same nfs server.
+ """
+ ip_vserver = self._get_vserver_for_ip(ip)
+ if ip_vserver and shares:
+ for share in shares:
+ ip_sh = share.split(':')[0]
+ sh_vserver = self._get_vserver_for_ip(ip_sh)
+ if sh_vserver == ip_vserver:
+ LOG.debug('Share match found for ip %s', ip)
+ return share
+ LOG.debug('No share match found for ip %s', ip)
+ return None
+
+ def _get_vserver_for_ip(self, ip):
+ """Get vserver for the mentioned ip."""
+ try:
+ ifs = self.zapi_client.get_if_info_by_ip(ip)
+ vserver = ifs[0].get_child_content('vserver')
+ return vserver
+ except Exception:
+ return None
+
+ def _get_vol_for_share(self, nfs_share):
+ """Gets the ssc vol with given share."""
+ if self.ssc_vols:
+ for vol in self.ssc_vols['all']:
+ if vol.export['path'] == nfs_share:
+ return vol
+ return None
+
+ def _is_share_vol_compatible(self, volume, share):
+ """Checks if share is compatible with volume to host it."""
+ compatible = self._is_share_eligible(share, volume['size'])
+ if compatible and self.ssc_enabled:
+ matched = self._is_share_vol_type_match(volume, share)
+ compatible = compatible and matched
+ return compatible
+
+ def _is_share_vol_type_match(self, volume, share):
+ """Checks if share matches volume type."""
+ netapp_vol = self._get_vol_for_share(share)
+ LOG.debug("Found volume %(vol)s for share %(share)s."
+ % {'vol': netapp_vol, 'share': share})
+ extra_specs = get_volume_extra_specs(volume)
+ vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
+ return netapp_vol in vols
+
+ def delete_volume(self, volume):
+ """Deletes a logical volume."""
+ share = volume['provider_location']
+ super(NetAppCmodeNfsDriver, self).delete_volume(volume)
+ self._post_prov_deprov_in_ssc(share)
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot."""
+ share = self._get_provider_location(snapshot.volume_id)
+ super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot)
+ self._post_prov_deprov_in_ssc(share)
+
+ def _post_prov_deprov_in_ssc(self, share):
+ if self.ssc_enabled and share:
+ netapp_vol = self._get_vol_for_share(share)
+ if netapp_vol:
+ self._update_stale_vols(volume=netapp_vol)
+
+ def copy_image_to_volume(self, context, volume, image_service, image_id):
+ """Fetch the image from image_service and write it to the volume."""
+ copy_success = False
+ try:
+ major, minor = self.zapi_client.get_ontapi_version()
+ col_path = self.configuration.netapp_copyoffload_tool_path
+ if major == 1 and minor >= 20 and col_path:
+ self._try_copyoffload(context, volume, image_service, image_id)
+ copy_success = True
+ LOG.info(_LI('Copied image %(img)s to volume %(vol)s using '
+ 'copy offload workflow.')
+ % {'img': image_id, 'vol': volume['id']})
+ else:
+ LOG.debug("Copy offload either not configured or"
+ " unsupported.")
+ except Exception as e:
+ LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e)
+ finally:
+ if not copy_success:
+ super(NetAppCmodeNfsDriver, self).copy_image_to_volume(
+ context, volume, image_service, image_id)
+ if self.ssc_enabled:
+ sh = self._get_provider_location(volume['id'])
+ self._update_stale_vols(self._get_vol_for_share(sh))
+
+ def _try_copyoffload(self, context, volume, image_service, image_id):
+ """Tries server side file copy offload."""
+ copied = False
+ cache_result = self._find_image_in_cache(image_id)
+ if cache_result:
+ copied = self._copy_from_cache(volume, image_id, cache_result)
+ if not cache_result or not copied:
+ self._copy_from_img_service(context, volume, image_service,
+ image_id)
+
+ def _get_ip_verify_on_cluster(self, host):
+ """Verifies if host on same cluster and returns ip."""
+ ip = na_utils.resolve_hostname(host)
+ vserver = self._get_vserver_for_ip(ip)
+ if not vserver:
+ raise exception.NotFound(_("Unable to locate an SVM that is "
+ "managing the IP address '%s'") % ip)
+ return ip
+
+ def _copy_from_cache(self, volume, image_id, cache_result):
+ """Try copying image file_name from cached file_name."""
+ LOG.debug("Trying copy from cache using copy offload.")
+ copied = False
+ for res in cache_result:
+ try:
+ (share, file_name) = res
+ LOG.debug("Found cache file_name on share %s.", share)
+ if share != self._get_provider_location(volume['id']):
+ col_path = self.configuration.netapp_copyoffload_tool_path
+ src_ip = self._get_ip_verify_on_cluster(
+ share.split(':')[0])
+ src_path = os.path.join(share.split(':')[1], file_name)
+ dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
+ volume['id']))
+ dst_path = os.path.join(
+ self._get_export_path(volume['id']), volume['name'])
+ self._execute(col_path, src_ip, dst_ip,
+ src_path, dst_path,
+ run_as_root=self._execute_as_root,
+ check_exit_code=0)
+ self._register_image_in_cache(volume, image_id)
+ LOG.debug("Copied image from cache to volume %s using"
+ " copy offload.", volume['id'])
+ else:
+ self._clone_file_dst_exists(share, file_name,
+ volume['name'],
+ dest_exists=True)
+ LOG.debug("Copied image from cache to volume %s using"
+ " cloning.", volume['id'])
+ self._post_clone_image(volume)
+ copied = True
+ break
+ except Exception as e:
+ LOG.exception(_LE('Error in workflow copy from cache. %s.'), e)
+ return copied
+
+ def _clone_file_dst_exists(self, share, src_name, dst_name,
+ dest_exists=False):
+ """Clone file even if dest exists."""
+ (vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share)
+ self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver,
+ dest_exists=dest_exists)
+
+ def _copy_from_img_service(self, context, volume, image_service,
+ image_id):
+ """Copies from the image service using copy offload."""
+ LOG.debug("Trying copy from image service using copy offload.")
+ image_loc = image_service.get_location(context, image_id)
+ image_loc = self._construct_image_nfs_url(image_loc)
+ conn, dr = self._check_get_nfs_path_segs(image_loc)
+ if conn:
+ src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
+ else:
+ raise exception.NotFound(_("Source host details not found."))
+ (__, ___, img_file) = image_loc.rpartition('/')
+ src_path = os.path.join(dr, img_file)
+ dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
+ volume['id']))
+ # tmp file is required to deal with img formats
+ tmp_img_file = six.text_type(uuid.uuid4())
+ col_path = self.configuration.netapp_copyoffload_tool_path
+ img_info = image_service.show(context, image_id)
+ dst_share = self._get_provider_location(volume['id'])
+ self._check_share_can_hold_size(dst_share, img_info['size'])
+ run_as_root = self._execute_as_root
+
+ dst_dir = self._get_mount_point_for_share(dst_share)
+ dst_img_local = os.path.join(dst_dir, tmp_img_file)
+ try:
+ # If src and dst share not equal
+ if (('%s:%s' % (src_ip, dr)) !=
+ ('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
+ dst_img_serv_path = os.path.join(
+ self._get_export_path(volume['id']), tmp_img_file)
+ self._execute(col_path, src_ip, dst_ip, src_path,
+ dst_img_serv_path, run_as_root=run_as_root,
+ check_exit_code=0)
+ else:
+ self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
+ self._discover_file_till_timeout(dst_img_local, timeout=120)
+ LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
+ % {'img': image_id, 'tmp': tmp_img_file})
+ dst_img_cache_local = os.path.join(dst_dir,
+ 'img-cache-%s' % image_id)
+ if img_info['disk_format'] == 'raw':
+ LOG.debug('Image is raw %s.', image_id)
+ self._clone_file_dst_exists(dst_share, tmp_img_file,
+ volume['name'], dest_exists=True)
+ self._move_nfs_file(dst_img_local, dst_img_cache_local)
+ LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
+ % {'img': image_id, 'vol': volume['id']})
+ else:
+ LOG.debug('Image will be converted to raw %s.', image_id)
+ img_conv = six.text_type(uuid.uuid4())
+ dst_img_conv_local = os.path.join(dst_dir, img_conv)
+
+ # Checking against image size which is approximate check
+ self._check_share_can_hold_size(dst_share, img_info['size'])
+ try:
+ image_utils.convert_image(dst_img_local,
+ dst_img_conv_local, 'raw',
+ run_as_root=run_as_root)
+ data = image_utils.qemu_img_info(dst_img_conv_local,
+ run_as_root=run_as_root)
+ if data.file_format != "raw":
+ raise exception.InvalidResults(
+ _("Converted to raw, but format is now %s.")
+ % data.file_format)
+ else:
+ self._clone_file_dst_exists(dst_share, img_conv,
+ volume['name'],
+ dest_exists=True)
+ self._move_nfs_file(dst_img_conv_local,
+ dst_img_cache_local)
+ LOG.debug('Copied locally converted raw image'
+ ' %(img)s to volume %(vol)s.'
+ % {'img': image_id, 'vol': volume['id']})
+ finally:
+ if os.path.exists(dst_img_conv_local):
+ self._delete_file(dst_img_conv_local)
+ self._post_clone_image(volume)
+ finally:
+ if os.path.exists(dst_img_local):
+ self._delete_file(dst_img_local)
-# Copyright (c) 2012 NetApp, Inc.
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
import six
from cinder import exception
-from cinder.i18n import _, _LW
+from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import log as logging
from cinder import utils
-from cinder.volume import driver
-from cinder.volume.drivers.netapp import api
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
'volume-space-attributes',
'volume-state-attributes',
'volume-qos-attributes']}
- result = na_utils.invoke_api(na_server, api_name='volume-get-iter',
- api_family='cm', query=query,
- des_result=des_attr,
- additional_elems=None,
- is_iter=True)
+ result = netapp_api.invoke_api(na_server, api_name='volume-get-iter',
+ api_family='cm', query=query,
+ des_result=des_attr,
+ additional_elems=None,
+ is_iter=True)
vols = set()
for res in result:
records = res.get_child_content('num-records')
add_elems = {'aggregate': aggr_name}
attrs = {}
try:
- result = na_utils.invoke_api(na_server,
- api_name='aggr-options-list-info',
- api_family='cm', query=None,
- des_result=None,
- additional_elems=add_elems,
- is_iter=False)
+ result = netapp_api.invoke_api(na_server,
+ api_name='aggr-options-list-info',
+ api_family='cm', query=None,
+ des_result=None,
+ additional_elems=add_elems,
+ is_iter=False)
for res in result:
options = res.get_child_by_name('options')
if options:
query_attr['path'] = vol_path
query = {'sis-status-info': query_attr}
try:
- result = na_utils.invoke_api(na_server,
- api_name='sis-get-iter',
- api_family='cm',
- query=query,
- is_iter=True)
+ result = netapp_api.invoke_api(na_server,
+ api_name='sis-get-iter',
+ api_family='cm',
+ query=query,
+ is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
query_attr['source-volume'] = volume
query = {'snapmirror-info': query_attr}
try:
- result = na_utils.invoke_api(na_server,
- api_name='snapmirror-get-iter',
- api_family='cm', query=query,
- is_iter=True)
+ result = netapp_api.invoke_api(na_server,
+ api_name='snapmirror-get-iter',
+ api_family='cm', query=query,
+ is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
des_attr = {'storage-disk-info':
{'disk-raid-info': ['effective-disk-type']}}
try:
- result = na_utils.invoke_api(na_server,
- api_name='storage-disk-get-iter',
- api_family='cm', query=query,
- des_result=des_attr,
- additional_elems=None,
- is_iter=True)
+ result = netapp_api.invoke_api(na_server,
+ api_name='storage-disk-get-iter',
+ api_family='cm', query=query,
+ des_result=des_attr,
+ additional_elems=None,
+ is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
@utils.synchronized(lock_pr)
def refresh_stale_ssc():
stale_vols = backend._update_stale_vols(reset=True)
- LOG.info(_('Running stale ssc refresh job for %(server)s'
- ' and vserver %(vs)s')
+ LOG.info(_LI('Running stale ssc refresh job for %(server)s'
+ ' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
- LOG.info(_('Successfully completed stale refresh job for'
- ' %(server)s and vserver %(vs)s')
+ LOG.info(_LI('Successfully completed stale refresh job for'
+ ' %(server)s and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
refresh_stale_ssc()
@utils.synchronized(lock_pr)
def get_latest_ssc():
- LOG.info(_('Running cluster latest ssc job for %(server)s'
- ' and vserver %(vs)s')
+ LOG.info(_LI('Running cluster latest ssc job for %(server)s'
+ ' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
- LOG.info(_('Successfully completed ssc job for %(server)s'
- ' and vserver %(vs)s')
+ LOG.info(_LI('Successfully completed ssc job for %(server)s'
+ ' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
get_latest_ssc()
def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
"""Refresh cluster ssc for backend."""
- if not isinstance(backend, driver.VolumeDriver):
- raise exception.InvalidInput(reason=_("Backend not a VolumeDriver."))
- if not isinstance(na_server, api.NaServer):
+ if not isinstance(na_server, netapp_api.NaServer):
raise exception.InvalidInput(reason=_("Backend server not NaServer."))
delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
if getattr(backend, 'ssc_job_running', None):
return result
-def check_ssc_api_permissions(na_server):
- """Checks backend ssc api permissions for the user."""
+def check_ssc_api_permissions(client_cmode):
+ """Checks backend SSC API permissions for the user."""
api_map = {'storage-disk-get-iter': ['netapp:disk_type'],
'snapmirror-get-iter': ['netapp_mirrored',
'netapp_unmirrored'],
'netapp_nocompression'],
'aggr-options-list-info': ['netapp:raid_type'],
'volume-get-iter': []}
- failed_apis = na_utils.check_apis_on_cluster(na_server, api_map.keys())
+ failed_apis = client_cmode.check_apis_on_cluster(api_map.keys())
if failed_apis:
if 'volume-get-iter' in failed_apis:
msg = _("Fatal error: User not permitted"
for fail in failed_apis:
unsupp_ssc_features.extend(api_map[fail])
LOG.warning(_LW("The user does not have access or sufficient "
- "privileges to use all netapp apis. The "
+ "privileges to use all netapp APIs. The "
"following extra_specs will fail or be ignored: "
"%s"), unsupp_ssc_features)
-# Copyright (c) 2014 NetApp, Inc.
-# All Rights Reserved.
+# Copyright (c) 2014 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
import six.moves.urllib.parse as urlparse
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
# Catching error conditions other than the perceived ones.
# Helps propagating only known exceptions back to the caller.
except Exception as e:
- LOG.exception(_("Unexpected error while invoking web service."
- " Error - %s."), e)
+ LOG.exception(_LE("Unexpected error while invoking web service."
+ " Error - %s."), e)
raise exception.NetAppDriverException(
_("Invoking web service failed."))
self._eval_response(response)
-# Copyright (c) 2014 NetApp, Inc.
-# All Rights Reserved.
+# Copyright (c) 2014 NetApp, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.eseries import client
+from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_eseries_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
-from cinder.volume.drivers.netapp import utils
+from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
CONF.register_opts(netapp_transport_opts)
-class Driver(driver.ISCSIDriver):
+class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
"""Executes commands relating to Volumes."""
VERSION = "1.0.0"
- required_flags = ['netapp_server_hostname', 'netapp_controller_ips',
+ REQUIRED_FLAGS = ['netapp_server_hostname', 'netapp_controller_ips',
'netapp_login', 'netapp_password',
'netapp_storage_pools']
SLEEP_SECS = 5
}
def __init__(self, *args, **kwargs):
- super(Driver, self).__init__(*args, **kwargs)
- utils.validate_instantiation(**kwargs)
+ super(NetAppEseriesISCSIDriver, self).__init__(*args, **kwargs)
+ na_utils.validate_instantiation(**kwargs)
self.configuration.append_config_values(netapp_basicauth_opts)
self.configuration.append_config_values(netapp_connection_opts)
self.configuration.append_config_values(netapp_transport_opts)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
- self._check_flags()
+ na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
+
port = self.configuration.netapp_server_port
scheme = self.configuration.netapp_transport_type.lower()
if port is None:
port = 8080
elif scheme == 'https':
port = 8443
+
self._client = client.RestClient(
scheme=scheme,
host=self.configuration.netapp_server_hostname,
password=self.configuration.netapp_password)
self._check_mode_get_or_register_storage_system()
- def _check_flags(self):
- """Ensure that the flags we care about are set."""
- required_flags = self.required_flags
- for flag in required_flags:
- if not getattr(self.configuration, flag, None):
- msg = _('%s is not set.') % flag
- raise exception.InvalidInput(reason=msg)
- if not self.configuration.use_multipath_for_image_xfer:
- msg = _('Production use of "%(backend)s" backend requires the '
- 'Cinder controller to have multipathing properly set up '
- 'and the configuration option "%(mpflag)s" to be set to '
- '"True".') % {'backend': self._backend_name,
- 'mpflag': 'use_multipath_for_image_xfer'}
- LOG.warning(msg)
-
def check_for_setup_error(self):
+ self._check_host_type()
+ self._check_multipath()
+ self._check_storage_system()
+ self._populate_system_objects()
+
+ def _check_host_type(self):
self.host_type =\
self.HOST_TYPES.get(self.configuration.netapp_eseries_host_type,
None)
if not self.host_type:
raise exception.NetAppDriverException(
_('Configured host type is not supported.'))
- self._check_storage_system()
- self._populate_system_objects()
+
+ def _check_multipath(self):
+ if not self.configuration.use_multipath_for_image_xfer:
+ msg = _LW('Production use of "%(backend)s" backend requires the '
+ 'Cinder controller to have multipathing properly set up '
+ 'and the configuration option "%(mpflag)s" to be set to '
+ '"True".') % {'backend': self._backend_name,
+ 'mpflag': 'use_multipath_for_image_xfer'}
+ LOG.warning(msg)
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
def _resolve_host(host):
try:
- ip = utils.resolve_hostname(host)
+ ip = na_utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
ips = self.configuration.netapp_controller_ips
ips = [i.strip() for i in ips.split(",")]
ips = [x for x in ips if _resolve_host(x)]
- host = utils.resolve_hostname(
+ host = na_utils.resolve_hostname(
self.configuration.netapp_server_hostname)
if not ips:
msg = _('Controller ips not valid after resolution.')
raise exception.NotFound(_("Host type %s not supported.") % host_type)
def _get_free_lun(self, host, maps=None):
- """Gets free lun for given host."""
+ """Gets free LUN for given host."""
ref = host['hostRef']
luns = maps or self._get_vol_mapping_for_host_frm_array(ref)
used_luns = set(map(lambda lun: int(lun['lun']), luns))
for lun in xrange(self.MAX_LUNS_PER_HOST):
if lun not in used_luns:
return lun
- msg = _("No free luns. Host might exceeded max luns.")
+ msg = _("No free LUNs. Host might exceeded max LUNs.")
raise exception.NetAppDriverException(msg)
def _get_vol_mapping_for_host_frm_array(self, host_ref):
def _garbage_collect_tmp_vols(self):
"""Removes tmp vols with no snapshots."""
try:
- if not utils.set_safe_attr(self, 'clean_job_running', True):
+ if not na_utils.set_safe_attr(self, 'clean_job_running', True):
LOG.warning(_LW('Returning as clean tmp '
'vol job already running.'))
return
LOG.debug("Error deleting vol with label %s.",
label)
finally:
- utils.set_safe_attr(self, 'clean_job_running', False)
+ na_utils.set_safe_attr(self, 'clean_job_running', False)
--- /dev/null
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Utilities for NetApp E-series drivers.
+"""
+
+import base64
+import binascii
+import uuid
+
+import six
+
+from cinder.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+def encode_hex_to_base32(hex_string):
+ """Encodes hex to base32 bit as per RFC4648."""
+ bin_form = binascii.unhexlify(hex_string)
+ return base64.b32encode(bin_form)
+
+
+def decode_base32_to_hex(base32_string):
+ """Decodes base32 string to hex string."""
+ bin_form = base64.b32decode(base32_string)
+ return binascii.hexlify(bin_form)
+
+
+def convert_uuid_to_es_fmt(uuid_str):
+ """Converts uuid to e-series compatible name format."""
+ uuid_base32 = encode_hex_to_base32(uuid.UUID(six.text_type(uuid_str)).hex)
+ return uuid_base32.strip('=')
+
+
+def convert_es_fmt_to_uuid(es_label):
+ """Converts e-series name format to uuid."""
+ es_label_b32 = es_label.ljust(32, '=')
+ return uuid.UUID(binascii.hexlify(base64.b32decode(es_label_b32)))
+++ /dev/null
-# Copyright (c) 2012 NetApp, Inc.
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Volume driver for NetApp iSCSI storage systems.
-
-This driver requires NetApp Clustered Data ONTAP or 7-mode
-storage systems with installed iSCSI licenses.
-"""
-
-import copy
-import sys
-import uuid
-
-from oslo.utils import excutils
-from oslo.utils import timeutils
-from oslo.utils import units
-import six
-
-from cinder import exception
-from cinder.i18n import _, _LE, _LI, _LW
-from cinder.openstack.common import log as logging
-from cinder import utils
-from cinder.volume import driver
-from cinder.volume.drivers.netapp.api import NaApiError
-from cinder.volume.drivers.netapp.api import NaElement
-from cinder.volume.drivers.netapp.api import NaServer
-from cinder.volume.drivers.netapp.client import cmode
-from cinder.volume.drivers.netapp.client import seven_mode
-from cinder.volume.drivers.netapp.options import netapp_7mode_opts
-from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
-from cinder.volume.drivers.netapp.options import netapp_cluster_opts
-from cinder.volume.drivers.netapp.options import netapp_connection_opts
-from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
-from cinder.volume.drivers.netapp.options import netapp_transport_opts
-from cinder.volume.drivers.netapp import ssc_utils
-from cinder.volume.drivers.netapp import utils as na_utils
-from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
-from cinder.volume.drivers.netapp.utils import round_down
-from cinder.volume.drivers.netapp.utils import set_safe_attr
-from cinder.volume.drivers.netapp.utils import validate_instantiation
-from cinder.volume import utils as volume_utils
-
-
-LOG = logging.getLogger(__name__)
-
-
-class NetAppLun(object):
- """Represents a LUN on NetApp storage."""
-
- def __init__(self, handle, name, size, metadata_dict):
- self.handle = handle
- self.name = name
- self.size = size
- self.metadata = metadata_dict or {}
-
- def get_metadata_property(self, prop):
- """Get the metadata property of a LUN."""
- if prop in self.metadata:
- return self.metadata[prop]
- name = self.name
- msg = _("No metadata property %(prop)s defined for the"
- " LUN %(name)s")
- msg_fmt = {'prop': prop, 'name': name}
- LOG.debug(msg % msg_fmt)
-
- def __str__(self, *args, **kwargs):
- return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
- % (self.handle, self.name, self.size, self.metadata)
-
-
-class NetAppDirectISCSIDriver(driver.ISCSIDriver):
- """NetApp Direct iSCSI volume driver."""
-
- # do not increment this as it may be used in volume type definitions
- VERSION = "1.0.0"
-
- IGROUP_PREFIX = 'openstack-'
- required_flags = ['netapp_login', 'netapp_password',
- 'netapp_server_hostname']
-
- def __init__(self, *args, **kwargs):
- self._app_version = kwargs.pop("app_version", "unknown")
- super(NetAppDirectISCSIDriver, self).__init__(*args, **kwargs)
- validate_instantiation(**kwargs)
- self.configuration.append_config_values(netapp_connection_opts)
- self.configuration.append_config_values(netapp_basicauth_opts)
- self.configuration.append_config_values(netapp_transport_opts)
- self.configuration.append_config_values(netapp_provisioning_opts)
- self.lun_table = {}
- self.zapi_client = None
-
- def _create_client(self, **kwargs):
- """Instantiate a client for NetApp server.
-
- This method creates NetApp server client for api communication.
- """
-
- host_filer = kwargs['hostname']
- LOG.debug('Using NetApp filer: %s' % host_filer)
- self.client = NaServer(host=host_filer,
- server_type=NaServer.SERVER_TYPE_FILER,
- transport_type=kwargs['transport_type'],
- style=NaServer.STYLE_LOGIN_PASSWORD,
- username=kwargs['login'],
- password=kwargs['password'])
- if kwargs['port'] is not None:
- self.client.set_port(kwargs['port'])
-
- def _do_custom_setup(self):
- """Does custom setup depending on the type of filer."""
- raise NotImplementedError()
-
- def _check_flags(self):
- """Ensure that the flags we care about are set."""
- required_flags = self.required_flags
- for flag in required_flags:
- if not getattr(self.configuration, flag, None):
- msg = _('%s is not set') % flag
- raise exception.InvalidInput(reason=msg)
-
- def do_setup(self, context):
- """Setup the NetApp Volume driver.
-
- Called one time by the manager after the driver is loaded.
- Validate the flags we care about and setup NetApp
- client.
- """
-
- self._check_flags()
- self._create_client(
- transport_type=self.configuration.netapp_transport_type,
- login=self.configuration.netapp_login,
- password=self.configuration.netapp_password,
- hostname=self.configuration.netapp_server_hostname,
- port=self.configuration.netapp_server_port)
- self._do_custom_setup()
-
- def check_for_setup_error(self):
- """Check that the driver is working and can communicate.
-
- Discovers the LUNs on the NetApp server.
- """
-
- self.lun_table = {}
- lun_list = self.zapi_client.get_lun_list()
- self._extract_and_populate_luns(lun_list)
- LOG.debug("Success getting LUN list from server")
-
- def get_pool(self, volume):
- """Return pool name where volume resides.
-
- :param volume: The volume hosted by the driver.
- :return: Name of the pool where given volume is hosted.
- """
- name = volume['name']
- metadata = self._get_lun_attr(name, 'metadata') or dict()
- return metadata.get('Volume', None)
-
- def create_volume(self, volume):
- """Driver entry point for creating a new volume (aka ONTAP LUN)."""
-
- LOG.debug('create_volume on %s' % volume['host'])
-
- # get ONTAP volume name as pool name
- ontap_volume_name = volume_utils.extract_host(volume['host'],
- level='pool')
-
- if ontap_volume_name is None:
- msg = _("Pool is not available in the volume host field.")
- raise exception.InvalidHost(reason=msg)
-
- lun_name = volume['name']
-
- # start with default size, get requested size
- default_size = units.Mi * 100 # 100 MB
- size = default_size if not int(volume['size'])\
- else int(volume['size']) * units.Gi
-
- metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
-
- extra_specs = get_volume_extra_specs(volume)
- qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
- if extra_specs else None
-
- # warn on obsolete extra specs
- na_utils.log_extra_spec_warnings(extra_specs)
-
- self.create_lun(ontap_volume_name, lun_name, size,
- metadata, qos_policy_group)
- LOG.debug('Created LUN with name %s' % lun_name)
-
- metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
- metadata['Volume'] = ontap_volume_name
- metadata['Qtree'] = None
-
- handle = self._create_lun_handle(metadata)
- self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
-
- def delete_volume(self, volume):
- """Driver entry point for destroying existing volumes."""
- name = volume['name']
- metadata = self._get_lun_attr(name, 'metadata')
- if not metadata:
- msg = _LW("No entry in LUN table for volume/snapshot %(name)s.")
- msg_fmt = {'name': name}
- LOG.warning(msg % msg_fmt)
- return
- self.zapi_client.destroy_lun(metadata['Path'])
- self.lun_table.pop(name)
-
- def ensure_export(self, context, volume):
- """Driver entry point to get the export info for an existing volume."""
- handle = self._get_lun_attr(volume['name'], 'handle')
- return {'provider_location': handle}
-
- def create_export(self, context, volume):
- """Driver entry point to get the export info for a new volume."""
- handle = self._get_lun_attr(volume['name'], 'handle')
- return {'provider_location': handle}
-
- def remove_export(self, context, volume):
- """Driver entry point to remove an export for a volume.
-
- Since exporting is idempotent in this driver, we have nothing
- to do for unexporting.
- """
-
- pass
-
- def initialize_connection(self, volume, connector):
- """Driver entry point to attach a volume to an instance.
-
- Do the LUN masking on the storage system so the initiator can access
- the LUN on the target. Also return the iSCSI properties so the
- initiator can find the LUN. This implementation does not call
- _get_iscsi_properties() to get the properties because cannot store the
- LUN number in the database. We only find out what the LUN number will
- be during this method call so we construct the properties dictionary
- ourselves.
- """
-
- initiator_name = connector['initiator']
- name = volume['name']
- lun_id = self._map_lun(name, initiator_name, 'iscsi', None)
- msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s")
- msg_fmt = {'name': name, 'initiator_name': initiator_name}
- LOG.debug(msg % msg_fmt)
- iqn = self.zapi_client.get_iscsi_service_details()
- target_details_list = self.zapi_client.get_target_details()
- msg = _("Successfully fetched target details for LUN %(name)s and "
- "initiator %(initiator_name)s")
- msg_fmt = {'name': name, 'initiator_name': initiator_name}
- LOG.debug(msg % msg_fmt)
-
- if not target_details_list:
- msg = _('No iscsi target details were found for LUN %s')
- raise exception.VolumeBackendAPIException(data=msg % name)
- target_details = None
- for tgt_detail in target_details_list:
- if tgt_detail.get('interface-enabled', 'true') == 'true':
- target_details = tgt_detail
- break
- if not target_details:
- target_details = target_details_list[0]
-
- if not target_details['address'] and target_details['port']:
- msg = _('Failed to get target portal for the LUN %s')
- raise exception.VolumeBackendAPIException(data=msg % name)
- if not iqn:
- msg = _('Failed to get target IQN for the LUN %s')
- raise exception.VolumeBackendAPIException(data=msg % name)
-
- properties = {}
- properties['target_discovered'] = False
- (address, port) = (target_details['address'], target_details['port'])
- properties['target_portal'] = '%s:%s' % (address, port)
- properties['target_iqn'] = iqn
- properties['target_lun'] = lun_id
- properties['volume_id'] = volume['id']
-
- auth = volume['provider_auth']
- if auth:
- (auth_method, auth_username, auth_secret) = auth.split()
- properties['auth_method'] = auth_method
- properties['auth_username'] = auth_username
- properties['auth_password'] = auth_secret
-
- return {
- 'driver_volume_type': 'iscsi',
- 'data': properties,
- }
-
- def create_snapshot(self, snapshot):
- """Driver entry point for creating a snapshot.
-
- This driver implements snapshots by using efficient single-file
- (LUN) cloning.
- """
-
- vol_name = snapshot['volume_name']
- snapshot_name = snapshot['name']
- lun = self._get_lun_from_table(vol_name)
- self._clone_lun(lun.name, snapshot_name, 'false')
-
- def delete_snapshot(self, snapshot):
- """Driver entry point for deleting a snapshot."""
- self.delete_volume(snapshot)
- LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Driver entry point for creating a new volume from a snapshot.
-
- Many would call this "cloning" and in fact we use cloning to implement
- this feature.
- """
-
- vol_size = volume['size']
- snap_size = snapshot['volume_size']
- snapshot_name = snapshot['name']
- new_name = volume['name']
- self._clone_lun(snapshot_name, new_name, 'true')
- if vol_size != snap_size:
- try:
- self.extend_volume(volume, volume['size'])
- except Exception:
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Resizing %s failed. "
- "Cleaning volume."), new_name)
- self.delete_volume(volume)
-
- def terminate_connection(self, volume, connector, **kwargs):
- """Driver entry point to unattach a volume from an instance.
-
- Unmask the LUN on the storage system so the given initiator can no
- longer access it.
- """
-
- initiator_name = connector['initiator']
- name = volume['name']
- metadata = self._get_lun_attr(name, 'metadata')
- path = metadata['Path']
- self._unmap_lun(path, initiator_name)
- msg = _("Unmapped LUN %(name)s from the initiator "
- "%(initiator_name)s")
- msg_fmt = {'name': name, 'initiator_name': initiator_name}
- LOG.debug(msg % msg_fmt)
-
- def create_lun(self, volume_name, lun_name, size,
- metadata, qos_policy_group=None):
- """Creates a LUN, handling ONTAP differences as needed."""
- raise NotImplementedError()
-
- def _create_lun_handle(self, metadata):
- """Returns lun handle based on filer type."""
- raise NotImplementedError()
-
- def _extract_and_populate_luns(self, api_luns):
- """Extracts the luns from api.
-
- Populates in the lun table.
- """
-
- for lun in api_luns:
- meta_dict = self._create_lun_meta(lun)
- path = lun.get_child_content('path')
- (_rest, _splitter, name) = path.rpartition('/')
- handle = self._create_lun_handle(meta_dict)
- size = lun.get_child_content('size')
- discovered_lun = NetAppLun(handle, name,
- size, meta_dict)
- self._add_lun_to_table(discovered_lun)
-
- def _is_naelement(self, elem):
- """Checks if element is NetApp element."""
- if not isinstance(elem, NaElement):
- raise ValueError('Expects NaElement')
-
- def _map_lun(self, name, initiator, initiator_type='iscsi', lun_id=None):
- """Maps lun to the initiator and returns lun id assigned."""
- metadata = self._get_lun_attr(name, 'metadata')
- os = metadata['OsType']
- path = metadata['Path']
- if self._check_allowed_os(os):
- os = os
- else:
- os = 'default'
- igroup_name = self._get_or_create_igroup(initiator,
- initiator_type, os)
- try:
- return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
- except NaApiError:
- exc_info = sys.exc_info()
- (_igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator)
- if lun_id is not None:
- return lun_id
- else:
- raise exc_info[0], exc_info[1], exc_info[2]
-
- def _unmap_lun(self, path, initiator):
- """Unmaps a lun from given initiator."""
- (igroup_name, _lun_id) = self._find_mapped_lun_igroup(path, initiator)
- self.zapi_client.unmap_lun(path, igroup_name)
-
- def _find_mapped_lun_igroup(self, path, initiator, os=None):
- """Find the igroup for mapped lun with initiator."""
- raise NotImplementedError()
-
- def _get_or_create_igroup(self, initiator, initiator_type='iscsi',
- os='default'):
- """Checks for an igroup for an initiator.
-
- Creates igroup if not found.
- """
-
- igroups = self.zapi_client.get_igroup_by_initiator(initiator=initiator)
- igroup_name = None
- for igroup in igroups:
- if igroup['initiator-group-os-type'] == os:
- if igroup['initiator-group-type'] == initiator_type or \
- igroup['initiator-group-type'] == 'mixed':
- if igroup['initiator-group-name'].startswith(
- self.IGROUP_PREFIX):
- igroup_name = igroup['initiator-group-name']
- break
- if not igroup_name:
- igroup_name = self.IGROUP_PREFIX + six.text_type(uuid.uuid4())
- self.zapi_client.create_igroup(igroup_name, initiator_type, os)
- self.zapi_client.add_igroup_initiator(igroup_name, initiator)
- return igroup_name
-
- def _check_allowed_os(self, os):
- """Checks if the os type supplied is NetApp supported."""
- if os in ['linux', 'aix', 'hpux', 'windows', 'solaris',
- 'netware', 'vmware', 'openvms', 'xen', 'hyper_v']:
- return True
- else:
- return False
-
- def _add_lun_to_table(self, lun):
- """Adds LUN to cache table."""
- if not isinstance(lun, NetAppLun):
- msg = _("Object is not a NetApp LUN.")
- raise exception.VolumeBackendAPIException(data=msg)
- self.lun_table[lun.name] = lun
-
- def _get_lun_from_table(self, name):
- """Gets LUN from cache table.
-
- Refreshes cache if lun not found in cache.
- """
- lun = self.lun_table.get(name)
- if lun is None:
- lun_list = self.zapi_client.get_lun_list()
- self._extract_and_populate_luns(lun_list)
- lun = self.lun_table.get(name)
- if lun is None:
- raise exception.VolumeNotFound(volume_id=name)
- return lun
-
- def _clone_lun(self, name, new_name, space_reserved='true',
- src_block=0, dest_block=0, block_count=0):
- """Clone LUN with the given name to the new name."""
- raise NotImplementedError()
-
- def _get_lun_attr(self, name, attr):
- """Get the lun attribute if found else None."""
- try:
- attr = getattr(self._get_lun_from_table(name), attr)
- return attr
- except exception.VolumeNotFound as e:
- LOG.error(_LE("Message: %s"), e.msg)
- except Exception as e:
- LOG.error(_LE("Error getting lun attribute. Exception: %s"),
- e.__str__())
- return None
-
- def _create_lun_meta(self, lun):
- raise NotImplementedError()
-
- def create_cloned_volume(self, volume, src_vref):
- """Creates a clone of the specified volume."""
- vol_size = volume['size']
- src_vol = self._get_lun_from_table(src_vref['name'])
- src_vol_size = src_vref['size']
- new_name = volume['name']
- self._clone_lun(src_vol.name, new_name, 'true')
- if vol_size != src_vol_size:
- try:
- self.extend_volume(volume, volume['size'])
- except Exception:
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Resizing %s failed. "
- "Cleaning volume."), new_name)
- self.delete_volume(volume)
-
- def get_volume_stats(self, refresh=False):
- """Get volume stats.
-
- If 'refresh' is True, run update the stats first.
- """
-
- if refresh:
- self._update_volume_stats()
-
- return self._stats
-
- def _update_volume_stats(self):
- """Retrieve stats info from volume group."""
- raise NotImplementedError()
-
- def extend_volume(self, volume, new_size):
- """Extend an existing volume to the new size."""
- name = volume['name']
- lun = self._get_lun_from_table(name)
- path = lun.metadata['Path']
- curr_size_bytes = six.text_type(lun.size)
- new_size_bytes = six.text_type(int(new_size) * units.Gi)
- # Reused by clone scenarios.
- # Hence comparing the stored size.
- if curr_size_bytes != new_size_bytes:
- lun_geometry = self.zapi_client.get_lun_geometry(path)
- if (lun_geometry and lun_geometry.get("max_resize")
- and int(lun_geometry.get("max_resize")) >=
- int(new_size_bytes)):
- self.zapi_client.do_direct_resize(path, new_size_bytes)
- else:
- self._do_sub_clone_resize(path, new_size_bytes)
- self.lun_table[name].size = new_size_bytes
- else:
- LOG.info(_LI("No need to extend volume %s"
- " as it is already the requested new size."), name)
-
- def _get_vol_option(self, volume_name, option_name):
- """Get the value for the volume option."""
- value = None
- options = self.zapi_client.get_volume_options(volume_name)
- for opt in options:
- if opt.get_child_content('name') == option_name:
- value = opt.get_child_content('value')
- break
- return value
-
- def _do_sub_clone_resize(self, path, new_size_bytes):
- """Does sub lun clone after verification.
-
- Clones the block ranges and swaps
- the luns also deletes older lun
- after a successful clone.
- """
- seg = path.split("/")
- LOG.info(_LI("Resizing lun %s using sub clone to new size."), seg[-1])
- name = seg[-1]
- vol_name = seg[2]
- lun = self._get_lun_from_table(name)
- metadata = lun.metadata
- compression = self._get_vol_option(vol_name, 'compression')
- if compression == "on":
- msg = _('%s cannot be sub clone resized'
- ' as it is hosted on compressed volume')
- raise exception.VolumeBackendAPIException(data=msg % name)
- else:
- block_count = self._get_lun_block_count(path)
- if block_count == 0:
- msg = _('%s cannot be sub clone resized'
- ' as it contains no blocks.')
- raise exception.VolumeBackendAPIException(data=msg % name)
- new_lun = 'new-%s' % (name)
- self.zapi_client.create_lun(vol_name, new_lun, new_size_bytes,
- metadata)
- try:
- self._clone_lun(name, new_lun, block_count=block_count)
- self._post_sub_clone_resize(path)
- except Exception:
- with excutils.save_and_reraise_exception():
- new_path = '/vol/%s/%s' % (vol_name, new_lun)
- self.zapi_client.destroy_lun(new_path)
-
- def _post_sub_clone_resize(self, path):
- """Try post sub clone resize in a transactional manner."""
- st_tm_mv, st_nw_mv, st_del_old = None, None, None
- seg = path.split("/")
- LOG.info(_LI("Post clone resize lun %s"), seg[-1])
- new_lun = 'new-%s' % (seg[-1])
- tmp_lun = 'tmp-%s' % (seg[-1])
- tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
- new_path = "/vol/%s/%s" % (seg[2], new_lun)
- try:
- st_tm_mv = self.zapi_client.move_lun(path, tmp_path)
- st_nw_mv = self.zapi_client.move_lun(new_path, path)
- st_del_old = self.zapi_client.destroy_lun(tmp_path)
- except Exception as e:
- if st_tm_mv is None:
- msg = _("Failure staging lun %s to tmp.")
- raise exception.VolumeBackendAPIException(data=msg % (seg[-1]))
- else:
- if st_nw_mv is None:
- self.zapi_client.move_lun(tmp_path, path)
- msg = _("Failure moving new cloned lun to %s.")
- raise exception.VolumeBackendAPIException(
- data=msg % (seg[-1]))
- elif st_del_old is None:
- LOG.error(_LE("Failure deleting staged tmp lun %s."),
- tmp_lun)
- else:
- LOG.error(_LE("Unknown exception in"
- " post clone resize lun %s."), seg[-1])
- LOG.error(_LE("Exception details: %s") % (e.__str__()))
-
- def _get_lun_block_count(self, path):
- """Gets block counts for the lun."""
- LOG.debug("Getting lun block count.")
- block_count = 0
- lun_infos = self.zapi_client.get_lun_by_args(path=path)
- if not lun_infos:
- seg = path.split('/')
- msg = _('Failure getting lun info for %s.')
- raise exception.VolumeBackendAPIException(data=msg % seg[-1])
- lun_info = lun_infos[-1]
- bs = int(lun_info.get_child_content('block-size'))
- ls = int(lun_info.get_child_content('size'))
- block_count = ls / bs
- return block_count
-
-
-class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
- """NetApp C-mode iSCSI volume driver."""
-
- DEFAULT_VS = 'openstack'
-
- def __init__(self, *args, **kwargs):
- super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs)
- self.configuration.append_config_values(netapp_cluster_opts)
-
- def _do_custom_setup(self):
- """Does custom setup for ontap cluster."""
- self.vserver = self.configuration.netapp_vserver
- self.vserver = self.vserver if self.vserver else self.DEFAULT_VS
- self.zapi_client = cmode.Client(self.client, self.vserver)
- # We set vserver in client permanently.
- # To use tunneling enable_tunneling while invoking api
- self.client.set_vserver(self.vserver)
- # Default values to run first api
- self.client.set_api_version(1, 15)
- (major, minor) = self.zapi_client.get_ontapi_version()
- self.client.set_api_version(major, minor)
- self.ssc_vols = None
- self.stale_vols = set()
-
- def check_for_setup_error(self):
- """Check that the driver is working and can communicate."""
- ssc_utils.check_ssc_api_permissions(self.client)
- super(NetAppDirectCmodeISCSIDriver, self).check_for_setup_error()
-
- def create_lun(self, volume_name, lun_name, size,
- metadata, qos_policy_group=None):
- """Creates a LUN, handling ONTAP differences as needed."""
-
- self.zapi_client.create_lun(
- volume_name, lun_name, size, metadata, qos_policy_group)
-
- self._update_stale_vols(
- volume=ssc_utils.NetAppVolume(volume_name, self.vserver))
-
- def _create_lun_handle(self, metadata):
- """Returns lun handle based on filer type."""
- return '%s:%s' % (self.vserver, metadata['Path'])
-
- def _find_mapped_lun_igroup(self, path, initiator, os=None):
- """Find the igroup for mapped lun with initiator."""
- initiator_igroups = self.zapi_client.get_igroup_by_initiator(
- initiator=initiator)
- lun_maps = self.zapi_client.get_lun_map(path)
- if initiator_igroups and lun_maps:
- for igroup in initiator_igroups:
- igroup_name = igroup['initiator-group-name']
- if igroup_name.startswith(self.IGROUP_PREFIX):
- for lun_map in lun_maps:
- if lun_map['initiator-group'] == igroup_name:
- return (igroup_name, lun_map['lun-id'])
- return (None, None)
-
- def _clone_lun(self, name, new_name, space_reserved='true',
- src_block=0, dest_block=0, block_count=0):
- """Clone LUN with the given handle to the new name."""
- metadata = self._get_lun_attr(name, 'metadata')
- volume = metadata['Volume']
- self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
- src_block=0, dest_block=0, block_count=0)
- LOG.debug("Cloned LUN with new name %s" % new_name)
- lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
- path='/vol/%s/%s'
- % (volume, new_name))
- if len(lun) == 0:
- msg = _("No cloned lun named %s found on the filer")
- raise exception.VolumeBackendAPIException(data=msg % (new_name))
- clone_meta = self._create_lun_meta(lun[0])
- self._add_lun_to_table(NetAppLun('%s:%s' % (clone_meta['Vserver'],
- clone_meta['Path']),
- new_name,
- lun[0].get_child_content('size'),
- clone_meta))
- self._update_stale_vols(
- volume=ssc_utils.NetAppVolume(volume, self.vserver))
-
- def _create_lun_meta(self, lun):
- """Creates lun metadata dictionary."""
- self._is_naelement(lun)
- meta_dict = {}
- meta_dict['Vserver'] = lun.get_child_content('vserver')
- meta_dict['Volume'] = lun.get_child_content('volume')
- meta_dict['Qtree'] = lun.get_child_content('qtree')
- meta_dict['Path'] = lun.get_child_content('path')
- meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
- meta_dict['SpaceReserved'] = \
- lun.get_child_content('is-space-reservation-enabled')
- return meta_dict
-
- def _configure_tunneling(self, do_tunneling=False):
- """Configures tunneling for ontap cluster."""
- if do_tunneling:
- self.client.set_vserver(self.vserver)
- else:
- self.client.set_vserver(None)
-
- def _update_volume_stats(self):
- """Retrieve stats info from vserver."""
-
- sync = True if self.ssc_vols is None else False
- ssc_utils.refresh_cluster_ssc(self, self.client,
- self.vserver, synchronous=sync)
-
- LOG.debug('Updating volume stats')
- data = {}
- netapp_backend = 'NetApp_iSCSI_Cluster_direct'
- backend_name = self.configuration.safe_get('volume_backend_name')
- data['volume_backend_name'] = backend_name or netapp_backend
- data['vendor_name'] = 'NetApp'
- data['driver_version'] = self.VERSION
- data['storage_protocol'] = 'iSCSI'
- data['pools'] = self._get_pool_stats()
-
- na_utils.provide_ems(self, self.client, netapp_backend,
- self._app_version)
- self._stats = data
-
- def _get_pool_stats(self):
- """Retrieve pool (i.e. ONTAP volume) stats info from SSC volumes."""
-
- pools = []
- if not self.ssc_vols:
- return pools
-
- for vol in self.ssc_vols['all']:
- pool = dict()
- pool['pool_name'] = vol.id['name']
- pool['QoS_support'] = False
- pool['reserved_percentage'] = 0
-
- # convert sizes to GB and de-rate by NetApp multiplier
- total = float(vol.space['size_total_bytes'])
- total /= self.configuration.netapp_size_multiplier
- total /= units.Gi
- pool['total_capacity_gb'] = round_down(total, '0.01')
-
- free = float(vol.space['size_avl_bytes'])
- free /= self.configuration.netapp_size_multiplier
- free /= units.Gi
- pool['free_capacity_gb'] = round_down(free, '0.01')
-
- pool['netapp_raid_type'] = vol.aggr['raid_type']
- pool['netapp_disk_type'] = vol.aggr['disk_type']
-
- mirrored = vol in self.ssc_vols['mirrored']
- pool['netapp_mirrored'] = six.text_type(mirrored).lower()
- pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
-
- dedup = vol in self.ssc_vols['dedup']
- pool['netapp_dedup'] = six.text_type(dedup).lower()
- pool['netapp_nodedup'] = six.text_type(not dedup).lower()
-
- compression = vol in self.ssc_vols['compression']
- pool['netapp_compression'] = six.text_type(compression).lower()
- pool['netapp_nocompression'] = six.text_type(
- not compression).lower()
-
- thin = vol in self.ssc_vols['thin']
- pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
- pool['netapp_thick_provisioned'] = six.text_type(not thin).lower()
-
- pools.append(pool)
-
- return pools
-
- @utils.synchronized('update_stale')
- def _update_stale_vols(self, volume=None, reset=False):
- """Populates stale vols with vol and returns set copy if reset."""
- if volume:
- self.stale_vols.add(volume)
- if reset:
- set_copy = copy.deepcopy(self.stale_vols)
- self.stale_vols.clear()
- return set_copy
-
- @utils.synchronized("refresh_ssc_vols")
- def refresh_ssc_vols(self, vols):
- """Refreshes ssc_vols with latest entries."""
- self.ssc_vols = vols
-
- def delete_volume(self, volume):
- """Driver entry point for destroying existing volumes."""
- try:
- lun = self._get_lun_from_table(volume['name'])
- except exception.VolumeNotFound:
- lun = None
- netapp_vol = None
- if lun:
- netapp_vol = lun.get_metadata_property('Volume')
- super(NetAppDirectCmodeISCSIDriver, self).delete_volume(volume)
- if netapp_vol:
- self._update_stale_vols(
- volume=ssc_utils.NetAppVolume(netapp_vol, self.vserver))
-
-
-class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
- """NetApp 7-mode iSCSI volume driver."""
-
- def __init__(self, *args, **kwargs):
- super(NetAppDirect7modeISCSIDriver, self).__init__(*args, **kwargs)
- self.configuration.append_config_values(netapp_7mode_opts)
-
- def _do_custom_setup(self):
- """Does custom setup depending on the type of filer."""
- self.vfiler = self.configuration.netapp_vfiler
- self.volume_list = self.configuration.netapp_volume_list
- if self.volume_list:
- self.volume_list = self.volume_list.split(',')
- self.volume_list = [el.strip() for el in self.volume_list]
- self.zapi_client = seven_mode.Client(self.client, self.volume_list)
- (major, minor) = self.zapi_client.get_ontapi_version()
- self.client.set_api_version(major, minor)
- if self.vfiler:
- self.client.set_vfiler(self.vfiler)
- self.vol_refresh_time = None
- self.vol_refresh_interval = 1800
- self.vol_refresh_running = False
- self.vol_refresh_voluntary = False
- self.root_volume_name = self._get_root_volume_name()
-
- def check_for_setup_error(self):
- """Check that the driver is working and can communicate."""
- api_version = self.client.get_api_version()
- if api_version:
- major, minor = api_version
- if major == 1 and minor < 9:
- msg = _("Unsupported ONTAP version."
- " ONTAP version 7.3.1 and above is supported.")
- raise exception.VolumeBackendAPIException(data=msg)
- else:
- msg = _("Api version could not be determined.")
- raise exception.VolumeBackendAPIException(data=msg)
- super(NetAppDirect7modeISCSIDriver, self).check_for_setup_error()
-
- def create_lun(self, volume_name, lun_name, size,
- metadata, qos_policy_group=None):
- """Creates a LUN, handling ONTAP differences as needed."""
-
- self.zapi_client.create_lun(
- volume_name, lun_name, size, metadata, qos_policy_group)
-
- self.vol_refresh_voluntary = True
-
- def _get_root_volume_name(self):
- # switch to volume-get-root-name API when possible
- vols = self.zapi_client.get_filer_volumes()
- for vol in vols:
- volume_name = vol.get_child_content('name')
- if self._get_vol_option(volume_name, 'root') == 'true':
- return volume_name
- LOG.warning(_LW('Could not determine root volume name '
- 'on %s.') % self._get_owner())
- return None
-
- def _get_owner(self):
- if self.vfiler:
- owner = '%s:%s' % (self.configuration.netapp_server_hostname,
- self.vfiler)
- else:
- owner = self.configuration.netapp_server_hostname
- return owner
-
- def _create_lun_handle(self, metadata):
- """Returns lun handle based on filer type."""
- owner = self._get_owner()
- return '%s:%s' % (owner, metadata['Path'])
-
- def _find_mapped_lun_igroup(self, path, initiator, os=None):
- """Find the igroup for mapped lun with initiator."""
- result = self.zapi_client.get_lun_map(path)
- igroups = result.get_child_by_name('initiator-groups')
- if igroups:
- igroup = None
- lun_id = None
- found = False
- igroup_infs = igroups.get_children()
- for ig in igroup_infs:
- initiators = ig.get_child_by_name('initiators')
- init_infs = initiators.get_children()
- for info in init_infs:
- if info.get_child_content('initiator-name') == initiator:
- found = True
- igroup = ig.get_child_content('initiator-group-name')
- lun_id = ig.get_child_content('lun-id')
- break
- if found:
- break
- return (igroup, lun_id)
-
- def _clone_lun(self, name, new_name, space_reserved='true',
- src_block=0, dest_block=0, block_count=0):
- """Clone LUN with the given handle to the new name."""
- metadata = self._get_lun_attr(name, 'metadata')
- path = metadata['Path']
- (parent, _splitter, name) = path.rpartition('/')
- clone_path = '%s/%s' % (parent, new_name)
-
- self.zapi_client.clone_lun(path, clone_path, name, new_name,
- space_reserved, src_block=0,
- dest_block=0, block_count=0)
-
- self.vol_refresh_voluntary = True
- luns = self.zapi_client.get_lun_by_args(path=clone_path)
- if luns:
- cloned_lun = luns[0]
- self.zapi_client.set_space_reserve(clone_path, space_reserved)
- clone_meta = self._create_lun_meta(cloned_lun)
- handle = self._create_lun_handle(clone_meta)
- self._add_lun_to_table(
- NetAppLun(handle, new_name,
- cloned_lun.get_child_content('size'),
- clone_meta))
- else:
- raise NaApiError('ENOLUNENTRY', 'No Lun entry found on the filer')
-
- def _create_lun_meta(self, lun):
- """Creates lun metadata dictionary."""
- self._is_naelement(lun)
- meta_dict = {}
- meta_dict['Path'] = lun.get_child_content('path')
- meta_dict['Volume'] = lun.get_child_content('path').split('/')[2]
- meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
- meta_dict['SpaceReserved'] = lun.get_child_content(
- 'is-space-reservation-enabled')
- return meta_dict
-
- def _update_volume_stats(self):
- """Retrieve stats info from filer."""
-
- # ensure we get current data
- self.vol_refresh_voluntary = True
- self._refresh_volume_info()
-
- LOG.debug('Updating volume stats')
- data = {}
- netapp_backend = 'NetApp_iSCSI_7mode_direct'
- backend_name = self.configuration.safe_get('volume_backend_name')
- data['volume_backend_name'] = backend_name or netapp_backend
- data['vendor_name'] = 'NetApp'
- data['driver_version'] = self.VERSION
- data['storage_protocol'] = 'iSCSI'
- data['pools'] = self._get_pool_stats()
-
- na_utils.provide_ems(self, self.client, netapp_backend,
- self._app_version, server_type='7mode')
- self._stats = data
-
- def _get_pool_stats(self):
- """Retrieve pool (i.e. ONTAP volume) stats info from volumes."""
-
- pools = []
- if not self.vols:
- return pools
-
- for vol in self.vols:
-
- # omit volumes not specified in the config
- volume_name = vol.get_child_content('name')
- if self.volume_list and volume_name not in self.volume_list:
- continue
-
- # omit root volume
- if volume_name == self.root_volume_name:
- continue
-
- # ensure good volume state
- state = vol.get_child_content('state')
- inconsistent = vol.get_child_content('is-inconsistent')
- invalid = vol.get_child_content('is-invalid')
- if (state != 'online' or
- inconsistent != 'false' or
- invalid != 'false'):
- continue
-
- pool = dict()
- pool['pool_name'] = volume_name
- pool['QoS_support'] = False
- pool['reserved_percentage'] = 0
-
- # convert sizes to GB and de-rate by NetApp multiplier
- total = float(vol.get_child_content('size-total') or 0)
- total /= self.configuration.netapp_size_multiplier
- total /= units.Gi
- pool['total_capacity_gb'] = round_down(total, '0.01')
-
- free = float(vol.get_child_content('size-available') or 0)
- free /= self.configuration.netapp_size_multiplier
- free /= units.Gi
- pool['free_capacity_gb'] = round_down(free, '0.01')
-
- pools.append(pool)
-
- return pools
-
- def _get_lun_block_count(self, path):
- """Gets block counts for the lun."""
- bs = super(
- NetAppDirect7modeISCSIDriver, self)._get_lun_block_count(path)
- api_version = self.client.get_api_version()
- if api_version:
- major = api_version[0]
- minor = api_version[1]
- if major == 1 and minor < 15:
- bs = bs - 1
- return bs
-
- def _refresh_volume_info(self):
- """Saves the volume information for the filer."""
-
- if (self.vol_refresh_time is None or self.vol_refresh_voluntary or
- timeutils.is_newer_than(self.vol_refresh_time,
- self.vol_refresh_interval)):
- try:
- job_set = set_safe_attr(self, 'vol_refresh_running', True)
- if not job_set:
- LOG.warning(_LW("Volume refresh job already "
- "running. Returning..."))
- return
- self.vol_refresh_voluntary = False
- self.vols = self.zapi_client.get_filer_volumes()
- self.vol_refresh_time = timeutils.utcnow()
- except Exception as e:
- LOG.warning(_LW("Error refreshing volume info. Message: %s"),
- six.text_type(e))
- finally:
- set_safe_attr(self, 'vol_refresh_running', False)
-
- def delete_volume(self, volume):
- """Driver entry point for destroying existing volumes."""
- super(NetAppDirect7modeISCSIDriver, self).delete_volume(volume)
- self.vol_refresh_voluntary = True
+++ /dev/null
-# Copyright (c) 2012 NetApp, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Volume driver for NetApp NFS storage.
-"""
-
-import os
-import re
-from threading import Timer
-import time
-import uuid
-
-from oslo.concurrency import processutils
-from oslo.utils import excutils
-from oslo.utils import units
-import six
-import six.moves.urllib.parse as urlparse
-
-from cinder import exception
-from cinder.i18n import _, _LE, _LI, _LW
-from cinder.image import image_utils
-from cinder.openstack.common import log as logging
-from cinder import utils
-from cinder.volume.drivers.netapp.api import NaElement
-from cinder.volume.drivers.netapp.api import NaServer
-from cinder.volume.drivers.netapp.client import cmode
-from cinder.volume.drivers.netapp.client import seven_mode
-from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
-from cinder.volume.drivers.netapp.options import netapp_cluster_opts
-from cinder.volume.drivers.netapp.options import netapp_connection_opts
-from cinder.volume.drivers.netapp.options import netapp_img_cache_opts
-from cinder.volume.drivers.netapp.options import netapp_nfs_extra_opts
-from cinder.volume.drivers.netapp.options import netapp_transport_opts
-from cinder.volume.drivers.netapp import ssc_utils
-from cinder.volume.drivers.netapp import utils as na_utils
-from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
-from cinder.volume.drivers.netapp.utils import validate_instantiation
-from cinder.volume.drivers import nfs
-from cinder.volume import utils as volume_utils
-
-
-LOG = logging.getLogger(__name__)
-
-
-class NetAppNFSDriver(nfs.NfsDriver):
- """Base class for NetApp NFS driver.
- Executes commands relating to Volumes.
- """
-
- # do not increment this as it may be used in volume type definitions
- VERSION = "1.0.0"
-
- def __init__(self, *args, **kwargs):
- # NOTE(vish): db is set by Manager
- validate_instantiation(**kwargs)
- self._execute = None
- self._context = None
- self._app_version = kwargs.pop("app_version", "unknown")
- super(NetAppNFSDriver, self).__init__(*args, **kwargs)
- self.configuration.append_config_values(netapp_connection_opts)
- self.configuration.append_config_values(netapp_basicauth_opts)
- self.configuration.append_config_values(netapp_transport_opts)
- self.configuration.append_config_values(netapp_img_cache_opts)
-
- def set_execute(self, execute):
- self._execute = execute
-
- def do_setup(self, context):
- super(NetAppNFSDriver, self).do_setup(context)
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met."""
- raise NotImplementedError()
-
- def get_pool(self, volume):
- """Return pool name where volume resides.
-
- :param volume: The volume hosted by the driver.
- :return: Name of the pool where given volume is hosted.
- """
- return volume['provider_location']
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
- vol_size = volume.size
- snap_size = snapshot.volume_size
-
- self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
- share = self._get_volume_location(snapshot.volume_id)
- volume['provider_location'] = share
- path = self.local_path(volume)
- run_as_root = self._execute_as_root
-
- if self._discover_file_till_timeout(path):
- self._set_rw_permissions(path)
- if vol_size != snap_size:
- try:
- self.extend_volume(volume, vol_size)
- except Exception:
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Resizing %s failed. Cleaning volume."),
- volume.name)
- self._execute('rm', path, run_as_root=run_as_root)
- else:
- raise exception.CinderException(
- _("NFS file %s not discovered.") % volume['name'])
-
- return {'provider_location': volume['provider_location']}
-
- def create_snapshot(self, snapshot):
- """Creates a snapshot."""
- self._clone_volume(snapshot['volume_name'],
- snapshot['name'],
- snapshot['volume_id'])
-
- def delete_snapshot(self, snapshot):
- """Deletes a snapshot."""
- nfs_mount = self._get_provider_location(snapshot.volume_id)
-
- if self._volume_not_present(nfs_mount, snapshot.name):
- return True
-
- self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
- run_as_root=self._execute_as_root)
-
- def _get_client(self):
- """Creates client for server."""
- raise NotImplementedError()
-
- def _get_volume_location(self, volume_id):
- """Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>."""
- nfs_server_ip = self._get_host_ip(volume_id)
- export_path = self._get_export_path(volume_id)
- return (nfs_server_ip + ':' + export_path)
-
- def _clone_volume(self, volume_name, clone_name, volume_id, share=None):
- """Clones mounted volume using NetApp api."""
- raise NotImplementedError()
-
- def _get_provider_location(self, volume_id):
- """Returns provider location for given volume."""
- volume = self.db.volume_get(self._context, volume_id)
- return volume.provider_location
-
- def _get_host_ip(self, volume_id):
- """Returns IP address for the given volume."""
- return self._get_provider_location(volume_id).split(':')[0]
-
- def _get_export_path(self, volume_id):
- """Returns NFS export path for the given volume."""
- return self._get_provider_location(volume_id).split(':')[1]
-
- def _volume_not_present(self, nfs_mount, volume_name):
- """Check if volume exists."""
- try:
- self._try_execute('ls', self._get_volume_path(nfs_mount,
- volume_name))
- except processutils.ProcessExecutionError:
- # If the volume isn't present
- return True
- return False
-
- def _try_execute(self, *command, **kwargs):
- # NOTE(vish): Volume commands can partially fail due to timing, but
- # running them a second time on failure will usually
- # recover nicely.
- tries = 0
- while True:
- try:
- self._execute(*command, **kwargs)
- return True
- except processutils.ProcessExecutionError:
- tries = tries + 1
- if tries >= self.configuration.num_shell_tries:
- raise
- LOG.exception(_LE("Recovering from a failed execute. "
- "Try number %s"), tries)
- time.sleep(tries ** 2)
-
- def _get_volume_path(self, nfs_share, volume_name):
- """Get volume path (local fs path) for given volume name on given nfs
- share.
-
- @param nfs_share string, example 172.18.194.100:/var/nfs
- @param volume_name string,
- example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
- """
-
- return os.path.join(self._get_mount_point_for_share(nfs_share),
- volume_name)
-
- def create_cloned_volume(self, volume, src_vref):
- """Creates a clone of the specified volume."""
- vol_size = volume.size
- src_vol_size = src_vref.size
- self._clone_volume(src_vref.name, volume.name, src_vref.id)
- share = self._get_volume_location(src_vref.id)
- volume['provider_location'] = share
- path = self.local_path(volume)
-
- if self._discover_file_till_timeout(path):
- self._set_rw_permissions(path)
- if vol_size != src_vol_size:
- try:
- self.extend_volume(volume, vol_size)
- except Exception as e:
- LOG.error(_LE("Resizing %s failed. "
- "Cleaning volume. "), volume.name)
- self._execute('rm', path,
- run_as_root=self._execute_as_root)
- raise e
- else:
- raise exception.CinderException(
- _("NFS file %s not discovered.") % volume['name'])
-
- return {'provider_location': volume['provider_location']}
-
- def _update_volume_stats(self):
- """Retrieve stats info from volume group."""
- raise NotImplementedError()
-
- def copy_image_to_volume(self, context, volume, image_service, image_id):
- """Fetch the image from image_service and write it to the volume."""
- super(NetAppNFSDriver, self).copy_image_to_volume(
- context, volume, image_service, image_id)
- LOG.info(_LI('Copied image to volume %s using regular download.'),
- volume['name'])
- self._register_image_in_cache(volume, image_id)
-
- def _register_image_in_cache(self, volume, image_id):
- """Stores image in the cache."""
- file_name = 'img-cache-%s' % image_id
- LOG.info(_LI("Registering image in cache %s"), file_name)
- try:
- self._do_clone_rel_img_cache(
- volume['name'], file_name,
- volume['provider_location'], file_name)
- except Exception as e:
- LOG.warning(_LW('Exception while registering image %(image_id)s'
- ' in cache. Exception: %(exc)s')
- % {'image_id': image_id, 'exc': e.__str__()})
-
- def _find_image_in_cache(self, image_id):
- """Finds image in cache and returns list of shares with file name."""
- result = []
- if getattr(self, '_mounted_shares', None):
- for share in self._mounted_shares:
- dir = self._get_mount_point_for_share(share)
- file_name = 'img-cache-%s' % image_id
- file_path = '%s/%s' % (dir, file_name)
- if os.path.exists(file_path):
- LOG.debug('Found cache file for image %(image_id)s'
- ' on share %(share)s'
- % {'image_id': image_id, 'share': share})
- result.append((share, file_name))
- return result
-
- def _do_clone_rel_img_cache(self, src, dst, share, cache_file):
- """Do clone operation w.r.t image cache file."""
- @utils.synchronized(cache_file, external=True)
- def _do_clone():
- dir = self._get_mount_point_for_share(share)
- file_path = '%s/%s' % (dir, dst)
- if not os.path.exists(file_path):
- LOG.info(_LI('Cloning from cache to destination %s'), dst)
- self._clone_volume(src, dst, volume_id=None, share=share)
- _do_clone()
-
- @utils.synchronized('clean_cache')
- def _spawn_clean_cache_job(self):
- """Spawns a clean task if not running."""
- if getattr(self, 'cleaning', None):
- LOG.debug('Image cache cleaning in progress. Returning... ')
- return
- else:
- # Set cleaning to True
- self.cleaning = True
- t = Timer(0, self._clean_image_cache)
- t.start()
-
- def _clean_image_cache(self):
- """Clean the image cache files in cache of space crunch."""
- try:
- LOG.debug('Image cache cleaning in progress.')
- thres_size_perc_start =\
- self.configuration.thres_avl_size_perc_start
- thres_size_perc_stop = \
- self.configuration.thres_avl_size_perc_stop
- for share in getattr(self, '_mounted_shares', []):
- try:
- total_size, total_avl, _total_alc = \
- self._get_capacity_info(share)
- avl_percent = int((total_avl / total_size) * 100)
- if avl_percent <= thres_size_perc_start:
- LOG.info(_LI('Cleaning cache for share %s.'), share)
- eligible_files = self._find_old_cache_files(share)
- threshold_size = int(
- (thres_size_perc_stop * total_size) / 100)
- bytes_to_free = int(threshold_size - total_avl)
- LOG.debug('Files to be queued for deletion %s',
- eligible_files)
- self._delete_files_till_bytes_free(
- eligible_files, share, bytes_to_free)
- else:
- continue
- except Exception as e:
- LOG.warning(_LW('Exception during cache cleaning'
- ' %(share)s. Message - %(ex)s')
- % {'share': share, 'ex': e.__str__()})
- continue
- finally:
- LOG.debug('Image cache cleaning done.')
- self.cleaning = False
-
- def _shortlist_del_eligible_files(self, share, old_files):
- """Prepares list of eligible files to be deleted from cache."""
- raise NotImplementedError()
-
- def _find_old_cache_files(self, share):
- """Finds the old files in cache."""
- mount_fs = self._get_mount_point_for_share(share)
- threshold_minutes = self.configuration.expiry_thres_minutes
- cmd = ['find', mount_fs, '-maxdepth', '1', '-name',
- 'img-cache*', '-amin', '+%s' % (threshold_minutes)]
- res, _err = self._execute(*cmd,
- run_as_root=self._execute_as_root)
- if res:
- old_file_paths = res.strip('\n').split('\n')
- mount_fs_len = len(mount_fs)
- old_files = [x[mount_fs_len + 1:] for x in old_file_paths]
- eligible_files = self._shortlist_del_eligible_files(
- share, old_files)
- return eligible_files
- return []
-
- def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0):
- """Delete files from disk till bytes are freed or list exhausted."""
- LOG.debug('Bytes to free %s', bytes_to_free)
- if file_list and bytes_to_free > 0:
- sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True)
- mount_fs = self._get_mount_point_for_share(share)
- for f in sorted_files:
- if f:
- file_path = '%s/%s' % (mount_fs, f[0])
- LOG.debug('Delete file path %s', file_path)
-
- @utils.synchronized(f[0], external=True)
- def _do_delete():
- if self._delete_file(file_path):
- return True
- return False
-
- if _do_delete():
- bytes_to_free = bytes_to_free - int(f[1])
- if bytes_to_free <= 0:
- return
-
- def _delete_file(self, path):
- """Delete file from disk and return result as boolean."""
- try:
- LOG.debug('Deleting file at path %s', path)
- cmd = ['rm', '-f', path]
- self._execute(*cmd, run_as_root=self._execute_as_root)
- return True
- except Exception as ex:
- LOG.warning(_LW('Exception during deleting %s'), ex.__str__())
- return False
-
- def clone_image(self, volume, image_location, image_id, image_meta):
- """Create a volume efficiently from an existing image.
-
- image_location is a string whose format depends on the
- image service backend in use. The driver should use it
- to determine whether cloning is possible.
-
- image_id is a string which represents id of the image.
- It can be used by the driver to introspect internal
- stores or registry to do an efficient image clone.
-
- Returns a dict of volume properties eg. provider_location,
- boolean indicating whether cloning occurred.
- """
-
- cloned = False
- post_clone = False
- share = None
- try:
- cache_result = self._find_image_in_cache(image_id)
- if cache_result:
- cloned = self._clone_from_cache(volume, image_id, cache_result)
- else:
- cloned = self._direct_nfs_clone(volume, image_location,
- image_id)
- if cloned:
- post_clone = self._post_clone_image(volume)
- except Exception as e:
- msg = e.msg if getattr(e, 'msg', None) else e.__str__()
- LOG.info(_LI('Image cloning unsuccessful for image'
- ' %(image_id)s. Message: %(msg)s')
- % {'image_id': image_id, 'msg': msg})
- vol_path = self.local_path(volume)
- volume['provider_location'] = None
- if os.path.exists(vol_path):
- self._delete_file(vol_path)
- finally:
- cloned = cloned and post_clone
- share = volume['provider_location'] if cloned else None
- bootable = True if cloned else False
- return {'provider_location': share, 'bootable': bootable}, cloned
-
- def _clone_from_cache(self, volume, image_id, cache_result):
- """Clones a copy from image cache."""
- cloned = False
- LOG.info(_LI('Cloning image %s from cache'), image_id)
- for res in cache_result:
- # Repeat tries in other shares if failed in some
- (share, file_name) = res
- LOG.debug('Cache share: %s', share)
- if (share and
- self._is_share_vol_compatible(volume, share)):
- try:
- self._do_clone_rel_img_cache(
- file_name, volume['name'], share, file_name)
- cloned = True
- volume['provider_location'] = share
- break
- except Exception:
- LOG.warning(_LW('Unexpected exception during'
- ' image cloning in share %s'), share)
- return cloned
-
- def _direct_nfs_clone(self, volume, image_location, image_id):
- """Clone directly in nfs share."""
- LOG.info(_LI('Checking image clone %s from glance share.'), image_id)
- cloned = False
- image_location = self._construct_image_nfs_url(image_location)
- share = self._is_cloneable_share(image_location)
- run_as_root = self._execute_as_root
-
- if share and self._is_share_vol_compatible(volume, share):
- LOG.debug('Share is cloneable %s', share)
- volume['provider_location'] = share
- (__, ___, img_file) = image_location.rpartition('/')
- dir_path = self._get_mount_point_for_share(share)
- img_path = '%s/%s' % (dir_path, img_file)
- img_info = image_utils.qemu_img_info(img_path,
- run_as_root=run_as_root)
- if img_info.file_format == 'raw':
- LOG.debug('Image is raw %s', image_id)
- self._clone_volume(
- img_file, volume['name'],
- volume_id=None, share=share)
- cloned = True
- else:
- LOG.info(_LI('Image will locally be converted to raw %s'),
- image_id)
- dst = '%s/%s' % (dir_path, volume['name'])
- image_utils.convert_image(img_path, dst, 'raw',
- run_as_root=run_as_root)
- data = image_utils.qemu_img_info(dst, run_as_root=run_as_root)
- if data.file_format != "raw":
- raise exception.InvalidResults(
- _("Converted to raw, but"
- " format is now %s") % data.file_format)
- else:
- cloned = True
- self._register_image_in_cache(
- volume, image_id)
- return cloned
-
- def _post_clone_image(self, volume):
- """Do operations post image cloning."""
- LOG.info(_LI('Performing post clone for %s'), volume['name'])
- vol_path = self.local_path(volume)
- if self._discover_file_till_timeout(vol_path):
- self._set_rw_permissions(vol_path)
- self._resize_image_file(vol_path, volume['size'])
- return True
- raise exception.InvalidResults(
- _("NFS file could not be discovered."))
-
- def _resize_image_file(self, path, new_size):
- """Resize the image file on share to new size."""
- LOG.debug('Checking file for resize')
- if self._is_file_size_equal(path, new_size):
- return
- else:
- LOG.info(_LI('Resizing file to %sG'), new_size)
- image_utils.resize_image(path, new_size,
- run_as_root=self._execute_as_root)
- if self._is_file_size_equal(path, new_size):
- return
- else:
- raise exception.InvalidResults(
- _('Resizing image file failed.'))
-
- def _is_file_size_equal(self, path, size):
- """Checks if file size at path is equal to size."""
- data = image_utils.qemu_img_info(path,
- run_as_root=self._execute_as_root)
- virt_size = data.virtual_size / units.Gi
- if virt_size == size:
- return True
- else:
- return False
-
- def _discover_file_till_timeout(self, path, timeout=45):
- """Checks if file size at path is equal to size."""
- # Sometimes nfs takes time to discover file
- # Retrying in case any unexpected situation occurs
- retry_seconds = timeout
- sleep_interval = 2
- while True:
- if os.path.exists(path):
- return True
- else:
- if retry_seconds <= 0:
- LOG.warning(_LW('Discover file retries exhausted.'))
- return False
- else:
- time.sleep(sleep_interval)
- retry_seconds = retry_seconds - sleep_interval
-
- def _is_cloneable_share(self, image_location):
- """Finds if the image at location is cloneable."""
- conn, dr = self._check_get_nfs_path_segs(image_location)
- return self._check_share_in_use(conn, dr)
-
- def _check_get_nfs_path_segs(self, image_location):
- """Checks if the nfs path format is matched.
-
- WebNFS url format with relative-path is supported.
- Accepting all characters in path-names and checking
- against the mounted shares which will contain only
- allowed path segments. Returns connection and dir details.
- """
- conn, dr = None, None
- if image_location:
- nfs_loc_pattern = \
- ('^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)'
- '*(/[^\/\\\\]+)$)')
- matched = re.match(nfs_loc_pattern, image_location, flags=0)
- if not matched:
- LOG.debug('Image location not in the'
- ' expected format %s', image_location)
- else:
- conn = matched.group(2)
- dr = matched.group(3) or '/'
- return (conn, dr)
-
- def _share_match_for_ip(self, ip, shares):
- """Returns the share that is served by ip.
-
- Multiple shares can have same dir path but
- can be served using different ips. It finds the
- share which is served by ip on same nfs server.
- """
- raise NotImplementedError()
-
- def _check_share_in_use(self, conn, dir):
- """Checks if share is cinder mounted and returns it."""
- try:
- if conn:
- host = conn.split(':')[0]
- ip = na_utils.resolve_hostname(host)
- share_candidates = []
- for sh in self._mounted_shares:
- sh_exp = sh.split(':')[1]
- if sh_exp == dir:
- share_candidates.append(sh)
- if share_candidates:
- LOG.debug('Found possible share matches %s',
- share_candidates)
- return self._share_match_for_ip(ip, share_candidates)
- except Exception:
- LOG.warning(_LW("Unexpected exception while short "
- "listing used share."))
- return None
-
- def _construct_image_nfs_url(self, image_location):
- """Construct direct url for nfs backend.
-
- It creates direct url from image_location
- which is a tuple with direct_url and locations.
- Returns url with nfs scheme if nfs store
- else returns url. It needs to be verified
- by backend before use.
- """
-
- direct_url, locations = image_location
- if not direct_url and not locations:
- raise exception.NotFound(_('Image location not present.'))
-
- # Locations will be always a list of one until
- # bp multiple-image-locations is introduced
- if not locations:
- return direct_url
- location = locations[0]
- url = location['url']
- if not location['metadata']:
- return url
- location_type = location['metadata'].get('type')
- if not location_type or location_type.lower() != "nfs":
- return url
- share_location = location['metadata'].get('share_location')
- mount_point = location['metadata'].get('mount_point')
- if not share_location or not mount_point:
- return url
- url_parse = urlparse.urlparse(url)
- abs_path = os.path.join(url_parse.netloc, url_parse.path)
- rel_path = os.path.relpath(abs_path, mount_point)
- direct_url = "%s/%s" % (share_location, rel_path)
- return direct_url
-
- def extend_volume(self, volume, new_size):
- """Extend an existing volume to the new size."""
- LOG.info(_LI('Extending volume %s.'), volume['name'])
- path = self.local_path(volume)
- self._resize_image_file(path, new_size)
-
- def _is_share_vol_compatible(self, volume, share):
- """Checks if share is compatible with volume to host it."""
- raise NotImplementedError()
-
- def _check_share_can_hold_size(self, share, size):
- """Checks if volume can hold image with size."""
- _tot_size, tot_available, _tot_allocated = self._get_capacity_info(
- share)
- if tot_available < size:
- msg = _("Container size smaller than required file size.")
- raise exception.VolumeDriverException(msg)
-
- def _move_nfs_file(self, source_path, dest_path):
- """Moves source to destination."""
-
- @utils.synchronized(dest_path, external=True)
- def _move_file(src, dst):
- if os.path.exists(dst):
- LOG.warning(_LW("Destination %s already exists."), dst)
- return False
- self._execute('mv', src, dst,
- run_as_root=self._execute_as_root)
- return True
-
- try:
- return _move_file(source_path, dest_path)
- except Exception as e:
- LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s')
- % {'src': source_path, 'e': e})
- return False
-
-
-class NetAppDirectNfsDriver(NetAppNFSDriver):
- """Executes commands related to volumes on NetApp filer."""
-
- def __init__(self, *args, **kwargs):
- super(NetAppDirectNfsDriver, self).__init__(*args, **kwargs)
-
- def do_setup(self, context):
- super(NetAppDirectNfsDriver, self).do_setup(context)
- self._context = context
- self._client = self._get_client()
- self._do_custom_setup(self._client)
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met."""
- self._check_flags()
-
- def _check_flags(self):
- """Raises error if any required configuration flag is missing."""
- required_flags = ['netapp_login',
- 'netapp_password',
- 'netapp_server_hostname']
- for flag in required_flags:
- if not getattr(self.configuration, flag, None):
- raise exception.CinderException(_('%s is not set') % flag)
-
- def _get_client(self):
- """Creates NetApp api client."""
- client = NaServer(
- host=self.configuration.netapp_server_hostname,
- server_type=NaServer.SERVER_TYPE_FILER,
- transport_type=self.configuration.netapp_transport_type,
- style=NaServer.STYLE_LOGIN_PASSWORD,
- username=self.configuration.netapp_login,
- password=self.configuration.netapp_password)
- if self.configuration.netapp_server_port is not None:
- client.set_port(self.configuration.netapp_server_port)
- return client
-
- def _do_custom_setup(self, client):
- """Do the customized set up on client if any for different types."""
- raise NotImplementedError()
-
- def _is_naelement(self, elem):
- """Checks if element is NetApp element."""
- if not isinstance(elem, NaElement):
- raise ValueError('Expects NaElement')
-
- def _get_export_ip_path(self, volume_id=None, share=None):
- """Returns export ip and path.
-
- One of volume id or share is used to return the values.
- """
-
- if volume_id:
- host_ip = self._get_host_ip(volume_id)
- export_path = self._get_export_path(volume_id)
- elif share:
- host_ip = share.split(':')[0]
- export_path = share.split(':')[1]
- else:
- raise exception.InvalidInput('None of vol id or share specified.')
- return (host_ip, export_path)
-
- def _create_file_usage_req(self, path):
- """Creates the request element for file_usage_get."""
- file_use = NaElement.create_node_with_children(
- 'file-usage-get', **{'path': path})
- return file_use
-
- def _get_extended_capacity_info(self, nfs_share):
- """Returns an extended set of share capacity metrics."""
-
- total_size, total_available, total_allocated = \
- self._get_capacity_info(nfs_share)
-
- used_ratio = (total_size - total_available) / total_size
- subscribed_ratio = total_allocated / total_size
- apparent_size = max(0, total_size * self.configuration.nfs_used_ratio)
- apparent_available = max(0, apparent_size - total_allocated)
-
- return {'total_size': total_size, 'total_available': total_available,
- 'total_allocated': total_allocated, 'used_ratio': used_ratio,
- 'subscribed_ratio': subscribed_ratio,
- 'apparent_size': apparent_size,
- 'apparent_available': apparent_available}
-
-
-class NetAppDirectCmodeNfsDriver(NetAppDirectNfsDriver):
- """Executes commands related to volumes on c mode."""
-
- def __init__(self, *args, **kwargs):
- super(NetAppDirectCmodeNfsDriver, self).__init__(*args, **kwargs)
- self.configuration.append_config_values(netapp_cluster_opts)
- self.configuration.append_config_values(netapp_nfs_extra_opts)
-
- def _do_custom_setup(self, client):
- """Do the customized set up on client for cluster mode."""
- # Default values to run first api
- client.set_api_version(1, 15)
- self.vserver = self.configuration.netapp_vserver
- self.zapi_client = cmode.Client(client, self.vserver)
- (major, minor) = self.zapi_client.get_ontapi_version()
- client.set_api_version(major, minor)
- self.ssc_vols = None
- self.stale_vols = set()
- if self.vserver:
- self.ssc_enabled = True
- LOG.info(_LI("Shares on vserver %s will only"
- " be used for provisioning.") % self.vserver)
- else:
- self.ssc_enabled = False
- LOG.warning(_LW("No vserver set in config. "
- "SSC will be disabled."))
-
- def check_for_setup_error(self):
- """Check that the driver is working and can communicate."""
- super(NetAppDirectCmodeNfsDriver, self).check_for_setup_error()
- if self.ssc_enabled:
- ssc_utils.check_ssc_api_permissions(self._client)
-
- def create_volume(self, volume):
- """Creates a volume.
-
- :param volume: volume reference
- """
- LOG.debug('create_volume on %s' % volume['host'])
- self._ensure_shares_mounted()
-
- # get share as pool name
- share = volume_utils.extract_host(volume['host'], level='pool')
-
- if share is None:
- msg = _("Pool is not available in the volume host field.")
- raise exception.InvalidHost(reason=msg)
-
- extra_specs = get_volume_extra_specs(volume)
- qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
- if extra_specs else None
-
- # warn on obsolete extra specs
- na_utils.log_extra_spec_warnings(extra_specs)
-
- try:
- volume['provider_location'] = share
- LOG.info(_LI('casted to %s') % volume['provider_location'])
- self._do_create_volume(volume)
- if qos_policy_group:
- self._set_qos_policy_group_on_volume(volume, share,
- qos_policy_group)
- return {'provider_location': volume['provider_location']}
- except Exception as ex:
- LOG.error(_LE("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s")
- % {'name': volume['name'],
- 'share': volume['provider_location'],
- 'ex': ex})
- volume['provider_location'] = None
- finally:
- if self.ssc_enabled:
- self._update_stale_vols(self._get_vol_for_share(share))
-
- msg = _("Volume %s could not be created on shares.")
- raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
-
- def _set_qos_policy_group_on_volume(self, volume, share, qos_policy_group):
- target_path = '%s' % (volume['name'])
- export_path = share.split(':')[1]
- flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver,
- export_path)
- self.zapi_client.file_assign_qos(flex_vol_name,
- qos_policy_group,
- target_path)
-
- def _clone_volume(self, volume_name, clone_name,
- volume_id, share=None):
- """Clones mounted volume on NetApp Cluster."""
- (vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
- self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
- vserver)
- share = share if share else self._get_provider_location(volume_id)
- self._post_prov_deprov_in_ssc(share)
-
- def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
- """Gets the vserver and export volume for share."""
- (host_ip, export_path) = self._get_export_ip_path(volume_id, share)
- ifs = self.zapi_client.get_if_info_by_ip(host_ip)
- vserver = ifs[0].get_child_content('vserver')
- exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver,
- export_path)
- return (vserver, exp_volume)
-
- def _get_vserver_ips(self, vserver):
- """Get ips for the vserver."""
- result = na_utils.invoke_api(
- self._client, api_name='net-interface-get-iter',
- is_iter=True, tunnel=vserver)
- if_list = []
- for res in result:
- records = res.get_child_content('num-records')
- if records > 0:
- attr_list = res['attributes-list']
- ifs = attr_list.get_children()
- if_list.extend(ifs)
- return if_list
-
- def _update_volume_stats(self):
- """Retrieve stats info from vserver."""
-
- self._ensure_shares_mounted()
- sync = True if self.ssc_vols is None else False
- ssc_utils.refresh_cluster_ssc(self, self._client,
- self.vserver, synchronous=sync)
-
- LOG.debug('Updating volume stats')
- data = {}
- netapp_backend = 'NetApp_NFS_Cluster_direct'
- backend_name = self.configuration.safe_get('volume_backend_name')
- data['volume_backend_name'] = backend_name or netapp_backend
- data['vendor_name'] = 'NetApp'
- data['driver_version'] = self.VERSION
- data['storage_protocol'] = 'nfs'
- data['pools'] = self._get_pool_stats()
-
- self._spawn_clean_cache_job()
- na_utils.provide_ems(self, self._client, netapp_backend,
- self._app_version)
- self._stats = data
-
- def _get_pool_stats(self):
- """Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
-
- pools = []
-
- for nfs_share in self._mounted_shares:
-
- capacity = self._get_extended_capacity_info(nfs_share)
-
- pool = dict()
- pool['pool_name'] = nfs_share
- pool['QoS_support'] = False
- pool['reserved_percentage'] = 0
-
- # Report pool as reserved when over the configured used_ratio
- if capacity['used_ratio'] > self.configuration.nfs_used_ratio:
- pool['reserved_percentage'] = 100
-
- # Report pool as reserved when over the subscribed ratio
- if capacity['subscribed_ratio'] >=\
- self.configuration.nfs_oversub_ratio:
- pool['reserved_percentage'] = 100
-
- # convert sizes to GB
- total = float(capacity['apparent_size']) / units.Gi
- pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
-
- free = float(capacity['apparent_available']) / units.Gi
- pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
-
- # add SSC content if available
- vol = self._get_vol_for_share(nfs_share)
- if vol and self.ssc_vols:
- pool['netapp_raid_type'] = vol.aggr['raid_type']
- pool['netapp_disk_type'] = vol.aggr['disk_type']
-
- mirrored = vol in self.ssc_vols['mirrored']
- pool['netapp_mirrored'] = six.text_type(mirrored).lower()
- pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
-
- dedup = vol in self.ssc_vols['dedup']
- pool['netapp_dedup'] = six.text_type(dedup).lower()
- pool['netapp_nodedup'] = six.text_type(not dedup).lower()
-
- compression = vol in self.ssc_vols['compression']
- pool['netapp_compression'] = six.text_type(compression).lower()
- pool['netapp_nocompression'] = six.text_type(
- not compression).lower()
-
- thin = vol in self.ssc_vols['thin']
- pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
- pool['netapp_thick_provisioned'] = six.text_type(
- not thin).lower()
-
- pools.append(pool)
-
- return pools
-
- @utils.synchronized('update_stale')
- def _update_stale_vols(self, volume=None, reset=False):
- """Populates stale vols with vol and returns set copy."""
- if volume:
- self.stale_vols.add(volume)
- set_copy = self.stale_vols.copy()
- if reset:
- self.stale_vols.clear()
- return set_copy
-
- @utils.synchronized("refresh_ssc_vols")
- def refresh_ssc_vols(self, vols):
- """Refreshes ssc_vols with latest entries."""
- if not self._mounted_shares:
- LOG.warning(_LW("No shares found hence skipping ssc refresh."))
- return
- mnt_share_vols = set()
- vs_ifs = self._get_vserver_ips(self.vserver)
- for vol in vols['all']:
- for sh in self._mounted_shares:
- host = sh.split(':')[0]
- junction = sh.split(':')[1]
- ip = na_utils.resolve_hostname(host)
- if (self._ip_in_ifs(ip, vs_ifs) and
- junction == vol.id['junction_path']):
- mnt_share_vols.add(vol)
- vol.export['path'] = sh
- break
- for key in vols.keys():
- vols[key] = vols[key] & mnt_share_vols
- self.ssc_vols = vols
-
- def _ip_in_ifs(self, ip, api_ifs):
- """Checks if ip is listed for ifs in api format."""
- if api_ifs is None:
- return False
- for ifc in api_ifs:
- ifc_ip = ifc.get_child_content("address")
- if ifc_ip == ip:
- return True
- return False
-
- def _shortlist_del_eligible_files(self, share, old_files):
- """Prepares list of eligible files to be deleted from cache."""
- file_list = []
- (vserver, exp_volume) = self._get_vserver_and_exp_vol(
- volume_id=None, share=share)
- for file in old_files:
- path = '/vol/%s/%s' % (exp_volume, file)
- u_bytes = self.zapi_client.get_file_usage(path, vserver)
- file_list.append((file, u_bytes))
- LOG.debug('Shortlisted del elg files %s', file_list)
- return file_list
-
- def _share_match_for_ip(self, ip, shares):
- """Returns the share that is served by ip.
-
- Multiple shares can have same dir path but
- can be served using different ips. It finds the
- share which is served by ip on same nfs server.
- """
- ip_vserver = self._get_vserver_for_ip(ip)
- if ip_vserver and shares:
- for share in shares:
- ip_sh = share.split(':')[0]
- sh_vserver = self._get_vserver_for_ip(ip_sh)
- if sh_vserver == ip_vserver:
- LOG.debug('Share match found for ip %s', ip)
- return share
- LOG.debug('No share match found for ip %s', ip)
- return None
-
- def _get_vserver_for_ip(self, ip):
- """Get vserver for the mentioned ip."""
- try:
- ifs = self.zapi_client.get_if_info_by_ip(ip)
- vserver = ifs[0].get_child_content('vserver')
- return vserver
- except Exception:
- return None
-
- def _get_vol_for_share(self, nfs_share):
- """Gets the ssc vol with given share."""
- if self.ssc_vols:
- for vol in self.ssc_vols['all']:
- if vol.export['path'] == nfs_share:
- return vol
- return None
-
- def _is_share_vol_compatible(self, volume, share):
- """Checks if share is compatible with volume to host it."""
- compatible = self._is_share_eligible(share, volume['size'])
- if compatible and self.ssc_enabled:
- matched = self._is_share_vol_type_match(volume, share)
- compatible = compatible and matched
- return compatible
-
- def _is_share_vol_type_match(self, volume, share):
- """Checks if share matches volume type."""
- netapp_vol = self._get_vol_for_share(share)
- LOG.debug("Found volume %(vol)s for share %(share)s."
- % {'vol': netapp_vol, 'share': share})
- extra_specs = get_volume_extra_specs(volume)
- vols = ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs)
- return netapp_vol in vols
-
- def delete_volume(self, volume):
- """Deletes a logical volume."""
- share = volume['provider_location']
- super(NetAppDirectCmodeNfsDriver, self).delete_volume(volume)
- self._post_prov_deprov_in_ssc(share)
-
- def delete_snapshot(self, snapshot):
- """Deletes a snapshot."""
- share = self._get_provider_location(snapshot.volume_id)
- super(NetAppDirectCmodeNfsDriver, self).delete_snapshot(snapshot)
- self._post_prov_deprov_in_ssc(share)
-
- def _post_prov_deprov_in_ssc(self, share):
- if self.ssc_enabled and share:
- netapp_vol = self._get_vol_for_share(share)
- if netapp_vol:
- self._update_stale_vols(volume=netapp_vol)
-
- def copy_image_to_volume(self, context, volume, image_service, image_id):
- """Fetch the image from image_service and write it to the volume."""
- copy_success = False
- try:
- major, minor = self._client.get_api_version()
- col_path = self.configuration.netapp_copyoffload_tool_path
- if (major == 1 and minor >= 20 and col_path):
- self._try_copyoffload(context, volume, image_service, image_id)
- copy_success = True
- LOG.info(_LI('Copied image %(img)s to '
- 'volume %(vol)s using copy'
- ' offload workflow.')
- % {'img': image_id, 'vol': volume['id']})
- else:
- LOG.debug("Copy offload either not configured or"
- " unsupported.")
- except Exception as e:
- LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e)
- finally:
- if not copy_success:
- super(NetAppDirectCmodeNfsDriver, self).copy_image_to_volume(
- context, volume, image_service, image_id)
- if self.ssc_enabled:
- sh = self._get_provider_location(volume['id'])
- self._update_stale_vols(self._get_vol_for_share(sh))
-
- def _try_copyoffload(self, context, volume, image_service, image_id):
- """Tries server side file copy offload."""
- copied = False
- cache_result = self._find_image_in_cache(image_id)
- if cache_result:
- copied = self._copy_from_cache(volume, image_id, cache_result)
- if not cache_result or not copied:
- self._copy_from_img_service(context, volume, image_service,
- image_id)
-
- def _get_ip_verify_on_cluster(self, host):
- """Verifies if host on same cluster and returns ip."""
- ip = na_utils.resolve_hostname(host)
- vserver = self._get_vserver_for_ip(ip)
- if not vserver:
- raise exception.NotFound(_("No vserver owning the ip %s.") % ip)
- return ip
-
- def _copy_from_cache(self, volume, image_id, cache_result):
- """Try copying image file_name from cached file_name."""
- LOG.debug("Trying copy from cache using copy offload.")
- copied = False
- for res in cache_result:
- try:
- (share, file_name) = res
- LOG.debug("Found cache file_name on share %s.", share)
- if share != self._get_provider_location(volume['id']):
- col_path = self.configuration.netapp_copyoffload_tool_path
- src_ip = self._get_ip_verify_on_cluster(
- share.split(':')[0])
- src_path = os.path.join(share.split(':')[1], file_name)
- dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
- volume['id']))
- dst_path = os.path.join(
- self._get_export_path(volume['id']), volume['name'])
- self._execute(col_path, src_ip, dst_ip,
- src_path, dst_path,
- run_as_root=self._execute_as_root,
- check_exit_code=0)
- self._register_image_in_cache(volume, image_id)
- LOG.debug("Copied image from cache to volume %s using"
- " copy offload.", volume['id'])
- else:
- self._clone_file_dst_exists(share, file_name,
- volume['name'],
- dest_exists=True)
- LOG.debug("Copied image from cache to volume %s using"
- " cloning.", volume['id'])
- self._post_clone_image(volume)
- copied = True
- break
- except Exception as e:
- LOG.exception(_LE('Error in workflow copy '
- 'from cache. %s.'), e)
- return copied
-
- def _clone_file_dst_exists(self, share, src_name, dst_name,
- dest_exists=False):
- """Clone file even if dest exists."""
- (vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share)
- self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver,
- dest_exists=dest_exists)
-
- def _copy_from_img_service(self, context, volume, image_service,
- image_id):
- """Copies from the image service using copy offload."""
- LOG.debug("Trying copy from image service using copy offload.")
- image_loc = image_service.get_location(context, image_id)
- image_loc = self._construct_image_nfs_url(image_loc)
- conn, dr = self._check_get_nfs_path_segs(image_loc)
- if conn:
- src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
- else:
- raise exception.NotFound(_("Source host details not found."))
- (__, ___, img_file) = image_loc.rpartition('/')
- src_path = os.path.join(dr, img_file)
- dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
- volume['id']))
- # tmp file is required to deal with img formats
- tmp_img_file = six.text_type(uuid.uuid4())
- col_path = self.configuration.netapp_copyoffload_tool_path
- img_info = image_service.show(context, image_id)
- dst_share = self._get_provider_location(volume['id'])
- self._check_share_can_hold_size(dst_share, img_info['size'])
- run_as_root = self._execute_as_root
-
- dst_dir = self._get_mount_point_for_share(dst_share)
- dst_img_local = os.path.join(dst_dir, tmp_img_file)
- try:
- # If src and dst share not equal
- if (('%s:%s' % (src_ip, dr)) !=
- ('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
- dst_img_serv_path = os.path.join(
- self._get_export_path(volume['id']), tmp_img_file)
- self._execute(col_path, src_ip, dst_ip, src_path,
- dst_img_serv_path, run_as_root=run_as_root,
- check_exit_code=0)
- else:
- self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
- self._discover_file_till_timeout(dst_img_local, timeout=120)
- LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
- % {'img': image_id, 'tmp': tmp_img_file})
- dst_img_cache_local = os.path.join(dst_dir,
- 'img-cache-%s' % (image_id))
- if img_info['disk_format'] == 'raw':
- LOG.debug('Image is raw %s.', image_id)
- self._clone_file_dst_exists(dst_share, tmp_img_file,
- volume['name'], dest_exists=True)
- self._move_nfs_file(dst_img_local, dst_img_cache_local)
- LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
- % {'img': image_id, 'vol': volume['id']})
- else:
- LOG.debug('Image will be converted to raw %s.', image_id)
- img_conv = six.text_type(uuid.uuid4())
- dst_img_conv_local = os.path.join(dst_dir, img_conv)
-
- # Checking against image size which is approximate check
- self._check_share_can_hold_size(dst_share, img_info['size'])
- try:
- image_utils.convert_image(dst_img_local,
- dst_img_conv_local, 'raw',
- run_as_root=run_as_root)
- data = image_utils.qemu_img_info(dst_img_conv_local,
- run_as_root=run_as_root)
- if data.file_format != "raw":
- raise exception.InvalidResults(
- _("Converted to raw, but format is now %s.")
- % data.file_format)
- else:
- self._clone_file_dst_exists(dst_share, img_conv,
- volume['name'],
- dest_exists=True)
- self._move_nfs_file(dst_img_conv_local,
- dst_img_cache_local)
- LOG.debug('Copied locally converted raw image'
- ' %(img)s to volume %(vol)s.'
- % {'img': image_id, 'vol': volume['id']})
- finally:
- if os.path.exists(dst_img_conv_local):
- self._delete_file(dst_img_conv_local)
- self._post_clone_image(volume)
- finally:
- if os.path.exists(dst_img_local):
- self._delete_file(dst_img_local)
-
-
-class NetAppDirect7modeNfsDriver(NetAppDirectNfsDriver):
- """Executes commands related to volumes on 7 mode."""
-
- def __init__(self, *args, **kwargs):
- super(NetAppDirect7modeNfsDriver, self).__init__(*args, **kwargs)
-
- def _do_custom_setup(self, client):
- """Do the customized set up on client if any for 7 mode."""
- self.zapi_client = seven_mode.Client(client)
- (major, minor) = self.zapi_client.get_ontapi_version()
- client.set_api_version(major, minor)
-
- def check_for_setup_error(self):
- """Checks if setup occurred properly."""
- api_version = self._client.get_api_version()
- if api_version:
- major, minor = api_version
- if major == 1 and minor < 9:
- msg = _("Unsupported ONTAP version."
- " ONTAP version 7.3.1 and above is supported.")
- raise exception.VolumeBackendAPIException(data=msg)
- else:
- msg = _("Api version could not be determined.")
- raise exception.VolumeBackendAPIException(data=msg)
- super(NetAppDirect7modeNfsDriver, self).check_for_setup_error()
-
- def create_volume(self, volume):
- """Creates a volume.
-
- :param volume: volume reference
- """
- LOG.debug('create_volume on %s' % volume['host'])
- self._ensure_shares_mounted()
-
- # get share as pool name
- share = volume_utils.extract_host(volume['host'], level='pool')
-
- if share is None:
- msg = _("Pool is not available in the volume host field.")
- raise exception.InvalidHost(reason=msg)
-
- volume['provider_location'] = share
- LOG.info(_LI('Creating volume at location %s')
- % volume['provider_location'])
-
- try:
- self._do_create_volume(volume)
- except Exception as ex:
- LOG.error(_LE("Exception creating vol %(name)s on "
- "share %(share)s. Details: %(ex)s")
- % {'name': volume['name'],
- 'share': volume['provider_location'],
- 'ex': six.text_type(ex)})
- msg = _("Volume %s could not be created on shares.")
- raise exception.VolumeBackendAPIException(
- data=msg % (volume['name']))
-
- return {'provider_location': volume['provider_location']}
-
- def _clone_volume(self, volume_name, clone_name,
- volume_id, share=None):
- """Clones mounted volume with NetApp filer."""
- (_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
- storage_path = self.zapi_client.get_actual_path_for_export(export_path)
- target_path = '%s/%s' % (storage_path, clone_name)
- self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name),
- target_path)
-
- def _update_volume_stats(self):
- """Retrieve stats info from vserver."""
-
- self._ensure_shares_mounted()
-
- LOG.debug('Updating volume stats')
- data = {}
- netapp_backend = 'NetApp_NFS_7mode_direct'
- backend_name = self.configuration.safe_get('volume_backend_name')
- data['volume_backend_name'] = backend_name or netapp_backend
- data['vendor_name'] = 'NetApp'
- data['driver_version'] = self.VERSION
- data['storage_protocol'] = 'nfs'
- data['pools'] = self._get_pool_stats()
-
- self._spawn_clean_cache_job()
- na_utils.provide_ems(self, self._client, netapp_backend,
- self._app_version, server_type="7mode")
- self._stats = data
-
- def _get_pool_stats(self):
- """Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
-
- pools = []
-
- for nfs_share in self._mounted_shares:
-
- capacity = self._get_extended_capacity_info(nfs_share)
-
- pool = dict()
- pool['pool_name'] = nfs_share
- pool['QoS_support'] = False
- pool['reserved_percentage'] = 0
-
- # Report pool as reserved when over the configured used_ratio
- if capacity['used_ratio'] > self.configuration.nfs_used_ratio:
- pool['reserved_percentage'] = 100
-
- # Report pool as reserved when over the subscribed ratio
- if capacity['subscribed_ratio'] >=\
- self.configuration.nfs_oversub_ratio:
- pool['reserved_percentage'] = 100
-
- # convert sizes to GB
- total = float(capacity['apparent_size']) / units.Gi
- pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
-
- free = float(capacity['apparent_available']) / units.Gi
- pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
-
- pools.append(pool)
-
- return pools
-
- def _shortlist_del_eligible_files(self, share, old_files):
- """Prepares list of eligible files to be deleted from cache."""
- file_list = []
- exp_volume = self.zapi_client.get_actual_path_for_export(share)
- for file in old_files:
- path = '/vol/%s/%s' % (exp_volume, file)
- u_bytes = self.zapi_client.get_file_usage(path)
- file_list.append((file, u_bytes))
- LOG.debug('Shortlisted del elg files %s', file_list)
- return file_list
-
- def _is_filer_ip(self, ip):
- """Checks whether ip is on the same filer."""
- try:
- ifconfig = self.zapi_client.get_ifconfig()
- if_info = ifconfig.get_child_by_name('interface-config-info')
- if if_info:
- ifs = if_info.get_children()
- for intf in ifs:
- v4_addr = intf.get_child_by_name('v4-primary-address')
- if v4_addr:
- ip_info = v4_addr.get_child_by_name('ip-address-info')
- if ip_info:
- address = ip_info.get_child_content('address')
- if ip == address:
- return True
- else:
- continue
- except Exception:
- return False
- return False
-
- def _share_match_for_ip(self, ip, shares):
- """Returns the share that is served by ip.
-
- Multiple shares can have same dir path but
- can be served using different ips. It finds the
- share which is served by ip on same nfs server.
- """
- if self._is_filer_ip(ip) and shares:
- for share in shares:
- ip_sh = share.split(':')[0]
- if self._is_filer_ip(ip_sh):
- LOG.debug('Share match found for ip %s', ip)
- return share
- LOG.debug('No share match found for ip %s', ip)
- return None
-
- def _is_share_vol_compatible(self, volume, share):
- """Checks if share is compatible with volume to host it."""
- return self._is_share_eligible(share, volume['size'])
-# Copyright (c) 2012 NetApp, Inc.
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Bob Callaway. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
-# Copyright (c) 2012 NetApp, Inc.
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
+# Copyright (c) 2012 NetApp, Inc. All rights reserved.
+# Copyright (c) 2014 Navneet Singh. All rights reserved.
+# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
NetApp drivers to achieve the desired functionality.
"""
-import base64
-import binascii
-import copy
+
import decimal
import platform
import socket
-import uuid
from oslo.concurrency import processutils as putils
-from oslo.utils import timeutils
import six
from cinder import context
from cinder import exception
-from cinder.i18n import _, _LW
+from cinder.i18n import _, _LW, _LI
from cinder.openstack.common import log as logging
from cinder import utils
from cinder import version
-from cinder.volume.drivers.netapp.api import NaApiError
-from cinder.volume.drivers.netapp.api import NaElement
-from cinder.volume.drivers.netapp.api import NaErrors
-from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume import volume_types
'netapp_thick_provisioned': 'netapp_thin_provisioned'}
-def provide_ems(requester, server, netapp_backend, app_version,
- server_type="cluster"):
- """Provide ems with volume stats for the requester.
-
- :param server_type: cluster or 7mode.
- """
-
- def _create_ems(netapp_backend, app_version, server_type):
- """Create ems api request."""
- ems_log = NaElement('ems-autosupport-log')
- host = socket.getfqdn() or 'Cinder_node'
- if server_type == "cluster":
- dest = "cluster node"
- else:
- dest = "7 mode controller"
- ems_log.add_new_child('computer-name', host)
- ems_log.add_new_child('event-id', '0')
- ems_log.add_new_child('event-source',
- 'Cinder driver %s' % netapp_backend)
- ems_log.add_new_child('app-version', app_version)
- ems_log.add_new_child('category', 'provisioning')
- ems_log.add_new_child('event-description',
- 'OpenStack Cinder connected to %s' % dest)
- ems_log.add_new_child('log-level', '6')
- ems_log.add_new_child('auto-support', 'false')
- return ems_log
-
- def _create_vs_get():
- """Create vs_get api request."""
- vs_get = NaElement('vserver-get-iter')
- vs_get.add_new_child('max-records', '1')
- query = NaElement('query')
- query.add_node_with_children('vserver-info',
- **{'vserver-type': 'node'})
- vs_get.add_child_elem(query)
- desired = NaElement('desired-attributes')
- desired.add_node_with_children(
- 'vserver-info', **{'vserver-name': '', 'vserver-type': ''})
- vs_get.add_child_elem(desired)
- return vs_get
-
- def _get_cluster_node(na_server):
- """Get the cluster node for ems."""
- na_server.set_vserver(None)
- vs_get = _create_vs_get()
- res = na_server.invoke_successfully(vs_get)
- if (res.get_child_content('num-records') and
- int(res.get_child_content('num-records')) > 0):
- attr_list = res.get_child_by_name('attributes-list')
- vs_info = attr_list.get_child_by_name('vserver-info')
- vs_name = vs_info.get_child_content('vserver-name')
- return vs_name
- return None
-
- do_ems = True
- if hasattr(requester, 'last_ems'):
- sec_limit = 3559
- if not (timeutils.is_older_than(requester.last_ems, sec_limit)):
- do_ems = False
- if do_ems:
- na_server = copy.copy(server)
- na_server.set_timeout(25)
- ems = _create_ems(netapp_backend, app_version, server_type)
- try:
- if server_type == "cluster":
- api_version = na_server.get_api_version()
- if api_version:
- major, minor = api_version
- else:
- raise NaApiError(code='Not found',
- message='No api version found')
- if major == 1 and minor > 15:
- node = getattr(requester, 'vserver', None)
- else:
- node = _get_cluster_node(na_server)
- if node is None:
- raise NaApiError(code='Not found',
- message='No vserver found')
- na_server.set_vserver(node)
- else:
- na_server.set_vfiler(None)
- na_server.invoke_successfully(ems, True)
- LOG.debug("ems executed successfully.")
- except NaApiError as e:
- LOG.warning(_LW("Failed to invoke ems. Message : %s") % e)
- finally:
- requester.last_ems = timeutils.utcnow()
-
-
def validate_instantiation(**kwargs):
"""Checks if a driver is instantiated other than by the unified driver.
"Please use NetAppDriver to achieve the functionality."))
-def invoke_api(na_server, api_name, api_family='cm', query=None,
- des_result=None, additional_elems=None,
- is_iter=False, records=0, tag=None,
- timeout=0, tunnel=None):
- """Invokes any given api call to a NetApp server.
-
- :param na_server: na_server instance
- :param api_name: api name string
- :param api_family: cm or 7m
- :param query: api query as dict
- :param des_result: desired result as dict
- :param additional_elems: dict other than query and des_result
- :param is_iter: is iterator api
- :param records: limit for records, 0 for infinite
- :param timeout: timeout seconds
- :param tunnel: tunnel entity, vserver or vfiler name
- """
- record_step = 50
- if not (na_server or isinstance(na_server, NaServer)):
- msg = _("Requires an NaServer instance.")
- raise exception.InvalidInput(reason=msg)
- server = copy.copy(na_server)
- if api_family == 'cm':
- server.set_vserver(tunnel)
- else:
- server.set_vfiler(tunnel)
- if timeout > 0:
- server.set_timeout(timeout)
- iter_records = 0
- cond = True
- while cond:
- na_element = create_api_request(
- api_name, query, des_result, additional_elems,
- is_iter, record_step, tag)
- result = server.invoke_successfully(na_element, True)
- if is_iter:
- if records > 0:
- iter_records = iter_records + record_step
- if iter_records >= records:
- cond = False
- tag_el = result.get_child_by_name('next-tag')
- tag = tag_el.get_content() if tag_el else None
- if not tag:
- cond = False
- else:
- cond = False
- yield result
-
-
-def create_api_request(api_name, query=None, des_result=None,
- additional_elems=None, is_iter=False,
- record_step=50, tag=None):
- """Creates a NetApp api request.
-
- :param api_name: api name string
- :param query: api query as dict
- :param des_result: desired result as dict
- :param additional_elems: dict other than query and des_result
- :param is_iter: is iterator api
- :param record_step: records at a time for iter api
- :param tag: next tag for iter api
- """
- api_el = NaElement(api_name)
- if query:
- query_el = NaElement('query')
- query_el.translate_struct(query)
- api_el.add_child_elem(query_el)
- if des_result:
- res_el = NaElement('desired-attributes')
- res_el.translate_struct(des_result)
- api_el.add_child_elem(res_el)
- if additional_elems:
- api_el.translate_struct(additional_elems)
- if is_iter:
- api_el.add_new_child('max-records', six.text_type(record_step))
- if tag:
- api_el.add_new_child('tag', tag, True)
- return api_el
+def check_flags(required_flags, configuration):
+ """Ensure that the flags we care about are set."""
+ for flag in required_flags:
+ if not getattr(configuration, flag, None):
+ msg = _('Configuration value %s is not set.') % flag
+ raise exception.InvalidInput(reason=msg)
def to_bool(val):
return specs
-def check_apis_on_cluster(na_server, api_list=None):
- """Checks api availability and permissions on cluster.
-
- Checks api availability and permissions for executing user.
- Returns a list of failed apis.
- """
- api_list = api_list or []
- failed_apis = []
- if api_list:
- api_version = na_server.get_api_version()
- if api_version:
- major, minor = api_version
- if major == 1 and minor < 20:
- for api_name in api_list:
- na_el = NaElement(api_name)
- try:
- na_server.invoke_successfully(na_el)
- except Exception as e:
- if isinstance(e, NaApiError):
- if (e.code == NaErrors['API_NOT_FOUND'].code or
- e.code ==
- NaErrors['INSUFFICIENT_PRIVS'].code):
- failed_apis.append(api_name)
- elif major == 1 and minor >= 20:
- failed_apis = copy.copy(api_list)
- result = invoke_api(
- na_server,
- api_name='system-user-capability-get-iter',
- api_family='cm',
- additional_elems=None,
- is_iter=True)
- for res in result:
- attr_list = res.get_child_by_name('attributes-list')
- if attr_list:
- capabilities = attr_list.get_children()
- for capability in capabilities:
- op_list = capability.get_child_by_name(
- 'operation-list')
- if op_list:
- ops = op_list.get_children()
- for op in ops:
- apis = op.get_child_content('api-name')
- if apis:
- api_list = apis.split(',')
- for api_name in api_list:
- if (api_name and
- api_name.strip()
- in failed_apis):
- failed_apis.remove(api_name)
- else:
- continue
- else:
- msg = _("Unsupported Clustered Data ONTAP version.")
- raise exception.VolumeBackendAPIException(data=msg)
- else:
- msg = _("Api version could not be determined.")
- raise exception.VolumeBackendAPIException(data=msg)
- return failed_apis
-
-
def resolve_hostname(hostname):
"""Resolves host name to IP address."""
res = socket.getaddrinfo(hostname, None)[0]
return sockaddr[0]
-def encode_hex_to_base32(hex_string):
- """Encodes hex to base32 bit as per RFC4648."""
- bin_form = binascii.unhexlify(hex_string)
- return base64.b32encode(bin_form)
-
-
-def decode_base32_to_hex(base32_string):
- """Decodes base32 string to hex string."""
- bin_form = base64.b32decode(base32_string)
- return binascii.hexlify(bin_form)
-
-
-def convert_uuid_to_es_fmt(uuid_str):
- """Converts uuid to e-series compatible name format."""
- uuid_base32 = encode_hex_to_base32(uuid.UUID(six.text_type(uuid_str)).hex)
- return uuid_base32.strip('=')
-
-
-def convert_es_fmt_to_uuid(es_label):
- """Converts e-series name format to uuid."""
- es_label_b32 = es_label.ljust(32, '=')
- return uuid.UUID(binascii.hexlify(base64.b32decode(es_label_b32)))
-
-
def round_down(value, precision):
return float(decimal.Decimal(six.text_type(value)).quantize(
decimal.Decimal(precision), rounding=decimal.ROUND_DOWN))
"'%{version}\t%{release}\t%{vendor}'",
self.PACKAGE_NAME)
if not out:
- LOG.info(_('No rpm info found for %(pkg)s package.') % {
+ LOG.info(_LI('No rpm info found for %(pkg)s package.') % {
'pkg': self.PACKAGE_NAME})
return False
parts = out.split()
self._vendor = ' '.join(parts[2::])
return True
except Exception as e:
- LOG.info(_('Could not run rpm command: %(msg)s.') % {
- 'msg': e})
+ LOG.info(_LI('Could not run rpm command: %(msg)s.') % {'msg': e})
return False
# ubuntu, mirantis on ubuntu
out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
self.PACKAGE_NAME)
if not out:
- LOG.info(_('No dpkg-query info found for %(pkg)s package.') % {
- 'pkg': self.PACKAGE_NAME})
+ LOG.info(_LI('No dpkg-query info found for %(pkg)s package.')
+ % {'pkg': self.PACKAGE_NAME})
return False
# debian format: [epoch:]upstream_version[-debian_revision]
deb_version = out
self._vendor = _vendor
return True
except Exception as e:
- LOG.info(_('Could not run dpkg-query command: %(msg)s.') % {
+ LOG.info(_LI('Could not run dpkg-query command: %(msg)s.') % {
'msg': e})
return False