From d92f73e04da2723767e242d29a6ca7e89a7475a4 Mon Sep 17 00:00:00 2001 From: Navneet Singh Date: Fri, 17 May 2013 04:19:50 -0700 Subject: [PATCH] NetApp unified driver implementation. NetApp has growing number of multiple drivers depending on storage families and technologies. NetApp unified driver simplifies configuration and provides single entry point for all storage technologies/drivers. It provides new mechanism to support multiple NetApp technologies and block drivers related to them. Deprecated 7mode dfm and c mode webservice based drivers. blueprint netapp-unified-driver Change-Id: Ife3cb12ef2256e0124c9a02a968c20a580b5df93 --- cinder/tests/test_drivers_compatibility.py | 28 - cinder/tests/test_netapp.py | 1654 +++----------------- cinder/tests/test_netapp_nfs.py | 319 +--- cinder/volume/drivers/netapp/common.py | 147 ++ cinder/volume/drivers/netapp/iscsi.py | 1568 +------------------ cinder/volume/drivers/netapp/nfs.py | 318 +--- cinder/volume/drivers/netapp/options.py | 77 + cinder/volume/drivers/netapp/utils.py | 120 ++ cinder/volume/manager.py | 6 - 9 files changed, 713 insertions(+), 3524 deletions(-) create mode 100644 cinder/volume/drivers/netapp/common.py create mode 100644 cinder/volume/drivers/netapp/options.py create mode 100644 cinder/volume/drivers/netapp/utils.py diff --git a/cinder/tests/test_drivers_compatibility.py b/cinder/tests/test_drivers_compatibility.py index 7dc6157de..e6cb6c550 100644 --- a/cinder/tests/test_drivers_compatibility.py +++ b/cinder/tests/test_drivers_compatibility.py @@ -29,10 +29,6 @@ NEXENTA_MODULE = "cinder.volume.drivers.nexenta.volume.NexentaDriver" SAN_MODULE = "cinder.volume.drivers.san.san.SanISCSIDriver" SOLARIS_MODULE = "cinder.volume.drivers.san.solaris.SolarisISCSIDriver" LEFTHAND_MODULE = "cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver" -NETAPP_MODULE = "cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver" -NETAPP_CMODE_MODULE =\ - "cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver" -NETAPP_NFS_MODULE = "cinder.volume.drivers.netapp.nfs.NetAppNFSDriver" NFS_MODULE = "cinder.volume.drivers.nfs.NfsDriver" SOLIDFIRE_MODULE = "cinder.volume.drivers.solidfire.SolidFire" STORWIZE_SVC_MODULE = "cinder.volume.drivers.storwize_svc.StorwizeSVCDriver" @@ -114,30 +110,6 @@ class VolumeDriverCompatibility(test.TestCase): self._load_driver(LEFTHAND_MODULE) self.assertEquals(self._driver_module_name(), LEFTHAND_MODULE) - def test_netapp_old(self): - self._load_driver('cinder.volume.netapp.NetAppISCSIDriver') - self.assertEquals(self._driver_module_name(), NETAPP_MODULE) - - def test_netapp_new(self): - self._load_driver(NETAPP_MODULE) - self.assertEquals(self._driver_module_name(), NETAPP_MODULE) - - def test_netapp_cmode_old(self): - self._load_driver('cinder.volume.netapp.NetAppCmodeISCSIDriver') - self.assertEquals(self._driver_module_name(), NETAPP_CMODE_MODULE) - - def test_netapp_cmode_new(self): - self._load_driver(NETAPP_CMODE_MODULE) - self.assertEquals(self._driver_module_name(), NETAPP_CMODE_MODULE) - - def test_netapp_nfs_old(self): - self._load_driver('cinder.volume.netapp_nfs.NetAppNFSDriver') - self.assertEquals(self._driver_module_name(), NETAPP_NFS_MODULE) - - def test_netapp_nfs_new(self): - self._load_driver(NETAPP_NFS_MODULE) - self.assertEquals(self._driver_module_name(), NETAPP_NFS_MODULE) - def test_nfs_old(self): self._load_driver('cinder.volume.nfs.NfsDriver') self.assertEquals(self._driver_module_name(), NFS_MODULE) diff --git a/cinder/tests/test_netapp.py b/cinder/tests/test_netapp.py index c5e573881..d79d58a9d 100644 --- a/cinder/tests/test_netapp.py +++ b/cinder/tests/test_netapp.py @@ -21,595 +21,35 @@ Tests for NetApp volume driver import BaseHTTPServer import httplib -import logging as generic_logging -import shutil import StringIO -import tempfile from lxml import etree +from cinder.exception import InvalidInput from cinder.exception import VolumeBackendAPIException from cinder.openstack.common import log as logging from cinder import test from cinder.volume import configuration as conf -from cinder.volume.drivers.netapp import iscsi -from cinder.volume.drivers.netapp.iscsi import netapp_opts +from cinder.volume.drivers.netapp import common +from cinder.volume.drivers.netapp.options import netapp_7mode_opts +from cinder.volume.drivers.netapp.options import netapp_basicauth_opts +from cinder.volume.drivers.netapp.options import netapp_cluster_opts +from cinder.volume.drivers.netapp.options import netapp_connection_opts +from cinder.volume.drivers.netapp.options import netapp_provisioning_opts +from cinder.volume.drivers.netapp.options import netapp_transport_opts LOG = logging.getLogger("cinder.volume.driver") -#NOTE(rushiagr): A bug in Suds package -# (https://fedorahosted.org/suds/ticket/359) causes nasty errors -# with tests while using debug-level logging. Unfortunately, -# the maintainers of the package stopped tending to any patch -# requests almost two years back. So setting the logging level to -# INFO here seems the only plausible workaround. -generic_logging.getLogger('suds.mx.core').setLevel(generic_logging.INFO) - -WSDL_HEADER = """ -""" - -WSDL_TYPES = """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -""" - -WSDL_TRAILER = """ - - -""" - -RESPONSE_PREFIX = """ -""" - -RESPONSE_SUFFIX = """""" - -APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext', - 'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit', - 'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout', - 'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext', - 'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart', - 'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd', - 'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd', - 'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd', - 'StorageServiceDatasetProvision'] - -iter_count = 0 -iter_table = {} - def create_configuration(): configuration = conf.Configuration(None) - configuration.append_config_values(netapp_opts) + configuration.append_config_values(netapp_connection_opts) + configuration.append_config_values(netapp_transport_opts) + configuration.append_config_values(netapp_basicauth_opts) + configuration.append_config_values(netapp_cluster_opts) + configuration.append_config_values(netapp_7mode_opts) + configuration.append_config_values(netapp_provisioning_opts) return configuration @@ -620,315 +60,6 @@ class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): pass -class FakeDfmServerHandler(FakeHTTPRequestHandler): - """HTTP handler that fakes enough stuff to allow the driver to run.""" - - def do_GET(s): - """Respond to a GET request.""" - if '/dfm.wsdl' != s.path: - s.send_response(404) - s.end_headers - return - s.send_response(200) - s.send_header("Content-Type", "application/wsdl+xml") - s.end_headers() - out = s.wfile - out.write(WSDL_HEADER) - out.write(WSDL_TYPES) - for api in APIS: - out.write('' % api) - out.write('' % api) - out.write('') - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - for api in APIS: - out.write('' % api) - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - out.write('') - out.write('') - for api in APIS: - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - out.write('') - out.write('') - out.write(WSDL_TRAILER) - - def do_POST(s): - """Respond to a POST request.""" - if '/apis/soap/v1' != s.path: - s.send_response(404) - s.end_headers - return - request_xml = s.rfile.read(int(s.headers['Content-Length'])) - ntap_ns = 'http://www.netapp.com/management/v1' - nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/', - 'na': ntap_ns} - root = etree.fromstring(request_xml) - - body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0] - request = body.getchildren()[0] - tag = request.tag - if not tag.startswith('{' + ntap_ns + '}'): - s.send_response(500) - s.end_headers - return - api = tag[(2 + len(ntap_ns)):] - global iter_count - global iter_table - if 'DatasetListInfoIterStart' == api: - iter_name = 'dataset_%s' % iter_count - iter_count = iter_count + 1 - iter_table[iter_name] = 0 - body = """ - 1 - %s - """ % iter_name - elif 'DatasetListInfoIterNext' == api: - tags = body.xpath('na:DatasetListInfoIterNext/na:Tag', - namespaces=nsmap) - iter_name = tags[0].text - if iter_table[iter_name]: - body = """ - - 0 - """ - else: - iter_table[iter_name] = 1 - body = """ - - - 0 - - - OpenStackProject - testproj - - - OpenStackVolType - - - - OpenStack_testproj - - - 1 - """ - elif 'DatasetListInfoIterEnd' == api: - body = """""" - elif 'DatasetEditBegin' == api: - body = """ - 0 - """ - elif 'DatasetEditCommit' == api: - body = """ - false - - - 0 - - - """ - elif 'DatasetProvisionMember' == api: - body = """""" - elif 'DatasetRemoveMember' == api: - body = """""" - elif 'DfmAbout' == api: - body = """""" - elif 'DpJobProgressEventListIterStart' == api: - iter_name = 'dpjobprogress_%s' % iter_count - iter_count = iter_count + 1 - iter_table[iter_name] = 0 - body = """ - 2 - %s - """ % iter_name - elif 'DpJobProgressEventListIterNext' == api: - tags = body.xpath('na:DpJobProgressEventListIterNext/na:Tag', - namespaces=nsmap) - iter_name = tags[0].text - if iter_table[iter_name]: - body = """""" - else: - iter_table[iter_name] = 1 - name = ('filer:/OpenStack_testproj/volume-00000001/' - 'volume-00000001') - body = """ - - - normal - lun-create - - 0 - %s - - - - normal - job-end - - - 2 - """ % name - elif 'DpJobProgressEventListIterEnd' == api: - body = """""" - elif 'DatasetMemberListInfoIterStart' == api: - iter_name = 'datasetmember_%s' % iter_count - iter_count = iter_count + 1 - iter_table[iter_name] = 0 - body = """ - 1 - %s - """ % iter_name - elif 'DatasetMemberListInfoIterNext' == api: - tags = body.xpath('na:DatasetMemberListInfoIterNext/na:Tag', - namespaces=nsmap) - iter_name = tags[0].text - if iter_table[iter_name]: - body = """ - - 0 - """ - else: - iter_table[iter_name] = 1 - name = ('filer:/OpenStack_testproj/volume-00000001/' - 'volume-00000001') - body = """ - - - 0 - %s - - - 1 - """ % name - elif 'DatasetMemberListInfoIterEnd' == api: - body = """""" - elif 'HostListInfoIterStart' == api: - body = """ - 1 - host - """ - elif 'HostListInfoIterNext' == api: - body = """ - - - 1.2.3.4 - 0 - filer - - - 1 - """ - elif 'HostListInfoIterEnd' == api: - body = """""" - elif 'LunListInfoIterStart' == api: - body = """ - 1 - lun - """ - elif 'LunListInfoIterNext' == api: - path = 'OpenStack_testproj/volume-00000001/volume-00000001' - body = """ - - - 0 - %s - volume-00000001 - - - 1 - """ % path - elif 'LunListInfoIterEnd' == api: - body = """""" - elif 'ApiProxy' == api: - names = body.xpath('na:ApiProxy/na:Request/na:Name', - namespaces=nsmap) - proxy = names[0].text - if 'clone-list-status' == proxy: - op_elem = body.xpath('na:ApiProxy/na:Request/na:Args/' - 'clone-id/clone-id-info/clone-op-id', - namespaces=nsmap) - proxy_body = """ - - completed - - """ - if '0' == op_elem[0].text: - proxy_body = '' - elif 'clone-start' == proxy: - proxy_body = """ - - 1 - xxx - - """ - elif 'igroup-list-info' == proxy: - igroup = 'openstack-iqn.1993-08.org.debian:01:23456789' - initiator = 'iqn.1993-08.org.debian:01:23456789' - proxy_body = """ - - %s - iscsi - linux - - - %s - - - - """ % (igroup, initiator) - elif 'igroup-create' == proxy: - proxy_body = '' - elif 'igroup-add' == proxy: - proxy_body = '' - elif 'lun-map-list-info' == proxy: - proxy_body = '' - elif 'lun-map' == proxy: - proxy_body = '0' - elif 'lun-unmap' == proxy: - proxy_body = '' - elif 'iscsi-portal-list-info' == proxy: - proxy_body = """ - - 1.2.3.4 - 3260 - 1000 - - """ - elif 'iscsi-node-get-name' == proxy: - target = 'iqn.1992-08.com.netapp:sn.111111111' - proxy_body = '%s' % target - else: - # Unknown proxy API - s.send_response(500) - s.end_headers - return - api = api + ':' + proxy - proxy_header = '' - proxy_trailer = """passed - """ - body = proxy_header + proxy_body + proxy_trailer - else: - # Unknown API - s.send_response(500) - s.end_headers - return - s.send_response(200) - s.send_header("Content-Type", "text/xml; charset=utf-8") - s.end_headers() - s.wfile.write(RESPONSE_PREFIX) - s.wfile.write(body) - s.wfile.write(RESPONSE_SUFFIX) - - class FakeHttplibSocket(object): """A fake socket implementation for httplib.HTTPResponse""" def __init__(self, value): @@ -949,550 +80,6 @@ class FakeHttplibSocket(object): return self._wbuffer -class FakeHTTPConnection(object): - """A fake httplib.HTTPConnection for netapp tests - - Requests made via this connection actually get translated and routed into - the fake Dfm handler above, we then turn the response into - the httplib.HTTPResponse that the caller expects. - """ - def __init__(self, host, timeout=None): - self.host = host - - def request(self, method, path, data=None, headers=None): - if not headers: - headers = {} - req_str = '%s %s HTTP/1.1\r\n' % (method, path) - for key, value in headers.iteritems(): - req_str += "%s: %s\r\n" % (key, value) - if data: - req_str += '\r\n%s' % data - - # NOTE(vish): normally the http transport normailizes from unicode - sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) - # NOTE(vish): stop the server from trying to look up address from - # the fake socket - FakeDfmServerHandler.address_string = lambda x: '127.0.0.1' - self.app = FakeDfmServerHandler(sock, '127.0.0.1:8088', None) - - self.sock = FakeHttplibSocket(sock.result) - self.http_response = httplib.HTTPResponse(self.sock) - - def set_debuglevel(self, level): - pass - - def getresponse(self): - self.http_response.begin() - return self.http_response - - def getresponsebody(self): - return self.sock.result - - -class NetAppDriverTestCase(test.TestCase): - """Test case for NetAppISCSIDriver""" - STORAGE_SERVICE = 'Openstack Service' - STORAGE_SERVICE_PREFIX = 'Openstack Service-' - PROJECT_ID = 'testproj' - VOLUME_NAME = 'volume-00000001' - VOLUME_TYPE = '' - VOLUME_SIZE = 2147483648L # 2 GB - INITIATOR = 'iqn.1993-08.org.debian:01:23456789' - - def setUp(self): - super(NetAppDriverTestCase, self).setUp() - self.tempdir = tempfile.mkdtemp() - self.flags(lock_path=self.tempdir) - driver = iscsi.NetAppISCSIDriver(configuration=create_configuration()) - self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection) - driver._create_client(wsdl_url='http://localhost:8088/dfm.wsdl', - login='root', password='password', - hostname='localhost', port=8088, cache=False) - driver._set_storage_service(self.STORAGE_SERVICE) - driver._set_storage_service_prefix(self.STORAGE_SERVICE_PREFIX) - driver._set_vfiler('') - self.driver = driver - - def tearDown(self): - shutil.rmtree(self.tempdir) - super(NetAppDriverTestCase, self).tearDown() - - def test_connect(self): - self.driver.check_for_setup_error() - - def test_create_destroy(self): - self.driver._discover_luns() - self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID, - self.VOLUME_TYPE, self.VOLUME_SIZE) - self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID) - - def test_destroy_uncreated_volume(self): - self.driver._remove_destroy('fake-nonexistent-volume', self.PROJECT_ID) - - def test_map_unmap(self): - self.driver._discover_luns() - self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID, - self.VOLUME_TYPE, self.VOLUME_SIZE) - volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID, - 'id': 0, 'provider_auth': None} - updates = self.driver._get_export(volume) - self.assertTrue(updates['provider_location']) - volume['provider_location'] = updates['provider_location'] - connector = {'initiator': self.INITIATOR} - connection_info = self.driver.initialize_connection(volume, connector) - self.assertEqual(connection_info['driver_volume_type'], 'iscsi') - properties = connection_info['data'] - self.driver.terminate_connection(volume, connector) - self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID) - - def test_clone(self): - self.driver._discover_luns() - self.driver._clone_lun(0, '/vol/vol/qtree/src', '/vol/vol/qtree/dst', - False) - - def test_clone_fail(self): - self.driver._discover_luns() - self.driver._is_clone_done(0, '0', 'xxx') - - def test_cloned_volume_size_fail(self): - volume_clone_fail = {'name': 'fail', 'size': '2'} - volume_src = {'name': 'source_vol', 'size': '1'} - try: - self.driver.create_cloned_volume(volume_clone_fail, - volume_src) - raise AssertionError() - except VolumeBackendAPIException: - pass - - -WSDL_HEADER_CMODE = """ - -""" - -WSDL_TYPES_CMODE = """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - """ - -WSDL_TRAILER_CMODE = """ - - - - -""" - -RESPONSE_PREFIX_CMODE = """ - -""" - -RESPONSE_SUFFIX_CMODE = """""" - -CMODE_APIS = ['ProvisionLun', 'DestroyLun', 'CloneLun', 'MapLun', 'UnmapLun', - 'ListLuns', 'GetLunTargetDetails'] - - -class FakeCMODEServerHandler(FakeHTTPRequestHandler): - """HTTP handler that fakes enough stuff to allow the driver to run""" - - def do_GET(s): - """Respond to a GET request.""" - if '/ntap_cloud.wsdl' != s.path: - s.send_response(404) - s.end_headers - return - s.send_response(200) - s.send_header("Content-Type", "application/wsdl+xml") - s.end_headers() - out = s.wfile - out.write(WSDL_HEADER_CMODE) - out.write(WSDL_TYPES_CMODE) - for api in CMODE_APIS: - out.write('' % api) - out.write('' % api) - out.write('') - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - for api in CMODE_APIS: - out.write('' % api) - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - out.write('') - out.write('') - for api in CMODE_APIS: - out.write('' % api) - out.write('') - out.write('') - out.write('') - out.write('') - out.write('') - out.write(WSDL_TRAILER_CMODE) - - def do_POST(s): - """Respond to a POST request.""" - if '/ws/ntapcloud' != s.path: - s.send_response(404) - s.end_headers - return - request_xml = s.rfile.read(int(s.headers['Content-Length'])) - ntap_ns = 'http://cloud.netapp.com/' - nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/', - 'na': ntap_ns} - root = etree.fromstring(request_xml) - - body = root.xpath('/soapenv:Envelope/soapenv:Body', - namespaces=nsmap)[0] - request = body.getchildren()[0] - tag = request.tag - if not tag.startswith('{' + ntap_ns + '}'): - s.send_response(500) - s.end_headers - return - api = tag[(2 + len(ntap_ns)):] - if 'ProvisionLun' == api: - body = """ - lun120 - 1d9c006c-a406-42f6-a23f-5ed7a6dc33e3 - OsType - linux - """ - elif 'DestroyLun' == api: - body = """""" - elif 'CloneLun' == api: - body = """ - snapshot12 - 98ea1791d228453899d422b4611642c3 - OsType - linux - """ - elif 'MapLun' == api: - body = """""" - elif 'Unmap' == api: - body = """""" - elif 'ListLuns' == api: - body = """ - - lun1 - 20 - asdjdnsd - - """ - elif 'GetLunTargetDetails' == api: - body = """ - -
1.2.3.4
- 3260 - 1000 - iqn.199208.com.netapp:sn.123456789 - 0 -
-
""" - else: - # Unknown API - s.send_response(500) - s.end_headers - return - s.send_response(200) - s.send_header("Content-Type", "text/xml; charset=utf-8") - s.end_headers() - s.wfile.write(RESPONSE_PREFIX_CMODE) - s.wfile.write(body) - s.wfile.write(RESPONSE_SUFFIX_CMODE) - - -class FakeCmodeHTTPConnection(object): - """A fake httplib.HTTPConnection for netapp tests - - Requests made via this connection actually get translated and routed into - the fake Dfm handler above, we then turn the response into - the httplib.HTTPResponse that the caller expects. - """ - def __init__(self, host, timeout=None): - self.host = host - - def request(self, method, path, data=None, headers=None): - if not headers: - headers = {} - req_str = '%s %s HTTP/1.1\r\n' % (method, path) - for key, value in headers.iteritems(): - req_str += "%s: %s\r\n" % (key, value) - if data: - req_str += '\r\n%s' % data - - # NOTE(vish): normally the http transport normailizes from unicode - sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) - # NOTE(vish): stop the server from trying to look up address from - # the fake socket - FakeCMODEServerHandler.address_string = lambda x: '127.0.0.1' - self.app = FakeCMODEServerHandler(sock, '127.0.0.1:8080', None) - - self.sock = FakeHttplibSocket(sock.result) - self.http_response = httplib.HTTPResponse(self.sock) - - def set_debuglevel(self, level): - pass - - def getresponse(self): - self.http_response.begin() - return self.http_response - - def getresponsebody(self): - return self.sock.result - - -class NetAppCmodeISCSIDriverTestCase(test.TestCase): - """Test case for NetAppISCSIDriver""" - volume = {'name': 'lun1', 'size': 2, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'lun1', - 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None} - snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1', - 'volume_size': 2, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None} - snapshot_fail = {'name': 'snapshot2', 'size': 2, 'volume_name': 'lun1', - 'volume_size': 1, 'project_id': 'project'} - volume_sec = {'name': 'vol_snapshot', 'size': 2, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'lun1', - 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None} - volume_clone_fail = {'name': 'cl_fail', 'size': 1, 'volume_name': 'fail', - 'os_type': 'linux', 'provider_location': 'cl_fail', - 'id': 'lun1', 'provider_auth': None, - 'project_id': 'project', 'display_name': None, - 'display_description': 'lun1', - 'volume_type_id': None} - connector = {'initiator': 'iqn.1993-08.org.debian:01:10'} - - def setUp(self): - super(NetAppCmodeISCSIDriverTestCase, self).setUp() - self._custom_setup() - - def _custom_setup(self): - driver = iscsi.NetAppCmodeISCSIDriver( - configuration=create_configuration()) - self.stubs.Set(httplib, 'HTTPConnection', FakeCmodeHTTPConnection) - driver._create_client(wsdl_url='http://localhost:8080/ntap_cloud.wsdl', - login='root', password='password', - hostname='localhost', port=8080, cache=False) - self.driver = driver - - def test_connect(self): - self.driver.check_for_setup_error() - - def test_create_destroy(self): - self.driver.create_volume(self.volume) - self.driver.delete_volume(self.volume) - - def test_create_vol_snapshot_destroy(self): - self.driver.create_volume(self.volume) - self.driver.create_snapshot(self.snapshot) - self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot) - self.driver.delete_snapshot(self.snapshot) - self.driver.delete_volume(self.volume) - - def test_map_unmap(self): - self.driver.create_volume(self.volume) - updates = self.driver.create_export(None, self.volume) - self.assertTrue(updates['provider_location']) - self.volume['provider_location'] = updates['provider_location'] - - connection_info = self.driver.initialize_connection(self.volume, - self.connector) - self.assertEqual(connection_info['driver_volume_type'], 'iscsi') - properties = connection_info['data'] - if not properties: - raise AssertionError('Target portal is none') - self.driver.terminate_connection(self.volume, self.connector) - self.driver.delete_volume(self.volume) - - def test_fail_vol_from_snapshot_creation(self): - self.driver.create_volume(self.volume) - try: - self.driver.create_volume_from_snapshot(self.volume, - self.snapshot_fail) - raise AssertionError() - except VolumeBackendAPIException: - pass - finally: - self.driver.delete_volume(self.volume) - - def test_cloned_volume_destroy(self): - self.driver.create_volume(self.volume) - self.driver.create_cloned_volume(self.snapshot, self.volume) - self.driver.delete_volume(self.snapshot) - self.driver.delete_volume(self.volume) - - def test_fail_cloned_volume_creation(self): - self.driver.create_volume(self.volume) - try: - self.driver.create_cloned_volume(self.volume_clone_fail, - self.volume) - raise AssertionError() - except VolumeBackendAPIException: - pass - finally: - self.driver.delete_volume(self.volume) - - RESPONSE_PREFIX_DIRECT_CMODE = """ """ @@ -1792,6 +379,16 @@ class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler): 1 19 """ + elif 'vserver-get-iter' == api: + body = """ + + vserver + node + + + 1""" + elif 'ems-autosupport-log' == api: + body = """""" else: # Unknown API s.send_response(500) @@ -1861,9 +458,32 @@ class FakeDirectCmodeHTTPConnection(object): return self.sock.result -class NetAppDirectCmodeISCSIDriverTestCase(NetAppCmodeISCSIDriverTestCase): +class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase): """Test case for NetAppISCSIDriver""" + volume = {'name': 'lun1', 'size': 2, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'lun1', + 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1', + 'volume_size': 2, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + snapshot_fail = {'name': 'snapshot2', 'size': 2, 'volume_name': 'lun1', + 'volume_size': 1, 'project_id': 'project'} + volume_sec = {'name': 'vol_snapshot', 'size': 2, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'lun1', + 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + volume_clone_fail = {'name': 'cl_fail', 'size': 1, 'volume_name': 'fail', + 'os_type': 'linux', 'provider_location': 'cl_fail', + 'id': 'lun1', 'provider_auth': None, + 'project_id': 'project', 'display_name': None, + 'display_description': 'lun1', + 'volume_type_id': None} + connector = {'initiator': 'iqn.1993-08.org.debian:01:10'} vol_fail = {'name': 'lun_fail', 'size': 10000, 'volume_name': 'lun1', 'os_type': 'linux', 'provider_location': 'lun1', 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', @@ -1872,20 +492,85 @@ class NetAppDirectCmodeISCSIDriverTestCase(NetAppCmodeISCSIDriverTestCase): def setUp(self): super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp() + self._custom_setup() def _custom_setup(self): - driver = iscsi.NetAppDirectCmodeISCSIDriver( - configuration=create_configuration()) + configuration = self._set_config(create_configuration()) + driver = common.NetAppDriver(configuration=configuration) self.stubs.Set(httplib, 'HTTPConnection', FakeDirectCmodeHTTPConnection) - driver._create_client(transport_type='http', - login='admin', password='pass', - hostname='127.0.0.1', - port='80') - driver.vserver = 'openstack' - driver.client.set_api_version(1, 15) + driver.do_setup(context='') + client = driver.client + client.set_api_version(1, 15) self.driver = driver + def _set_config(self, configuration): + configuration.netapp_storage_protocol = 'iscsi' + configuration.netapp_login = 'admin' + configuration.netapp_password = 'pass' + configuration.netapp_server_hostname = '127.0.0.1' + configuration.netapp_transport_type = 'http' + configuration.netapp_server_port = '80' + configuration.netapp_vserver = 'openstack' + return configuration + + def test_connect(self): + self.driver.check_for_setup_error() + + def test_create_destroy(self): + self.driver.create_volume(self.volume) + self.driver.delete_volume(self.volume) + + def test_create_vol_snapshot_destroy(self): + self.driver.create_volume(self.volume) + self.driver.create_snapshot(self.snapshot) + self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot) + self.driver.delete_snapshot(self.snapshot) + self.driver.delete_volume(self.volume) + + def test_map_unmap(self): + self.driver.create_volume(self.volume) + updates = self.driver.create_export(None, self.volume) + self.assertTrue(updates['provider_location']) + self.volume['provider_location'] = updates['provider_location'] + + connection_info = self.driver.initialize_connection(self.volume, + self.connector) + self.assertEqual(connection_info['driver_volume_type'], 'iscsi') + properties = connection_info['data'] + if not properties: + raise AssertionError('Target portal is none') + self.driver.terminate_connection(self.volume, self.connector) + self.driver.delete_volume(self.volume) + + def test_fail_vol_from_snapshot_creation(self): + self.driver.create_volume(self.volume) + try: + self.driver.create_volume_from_snapshot(self.volume, + self.snapshot_fail) + raise AssertionError() + except VolumeBackendAPIException: + pass + finally: + self.driver.delete_volume(self.volume) + + def test_cloned_volume_destroy(self): + self.driver.create_volume(self.volume) + self.driver.create_cloned_volume(self.snapshot, self.volume) + self.driver.delete_volume(self.snapshot) + self.driver.delete_volume(self.volume) + + def test_fail_cloned_volume_creation(self): + self.driver.create_volume(self.volume) + try: + self.driver.create_cloned_volume(self.volume_clone_fail, + self.volume) + raise AssertionError() + except VolumeBackendAPIException: + pass + finally: + self.driver.delete_volume(self.volume) + def test_map_by_creating_igroup(self): self.driver.create_volume(self.volume) updates = self.driver.create_export(None, self.volume) @@ -1903,6 +588,49 @@ class NetAppDirectCmodeISCSIDriverTestCase(NetAppCmodeISCSIDriverTestCase): self.assertRaises(VolumeBackendAPIException, self.driver.create_volume, self.vol_fail) + def test_vol_stats(self): + self.driver.get_volume_stats(refresh=True) + + +class NetAppDriverNegativeTestCase(test.TestCase): + """Test case for NetAppDriver""" + + def setUp(self): + super(NetAppDriverNegativeTestCase, self).setUp() + + def test_incorrect_family(self): + configuration = create_configuration() + configuration.netapp_storage_family = 'xyz_abc' + try: + driver = common.NetAppDriver(configuration=configuration) + raise AssertionError('Wrong storage family is getting accepted.') + except InvalidInput: + pass + + def test_incorrect_protocol(self): + configuration = create_configuration() + configuration.netapp_storage_family = 'ontap' + configuration.netapp_storage_protocol = 'ontap' + try: + driver = common.NetAppDriver(configuration=configuration) + raise AssertionError('Wrong storage protocol is getting accepted.') + except InvalidInput: + pass + + def test_non_netapp_driver(self): + configuration = create_configuration() + common.netapp_unified_plugin_registry['test_family'] =\ + {'iscsi': 'cinder.volume.drivers.arbitrary.IscsiDriver'} + configuration.netapp_storage_family = 'test_family' + configuration.netapp_storage_protocol = 'iscsi' + try: + driver = common.NetAppDriver(configuration=configuration) + raise AssertionError('Non NetApp driver is getting instantiated.') + except InvalidInput: + pass + finally: + common.netapp_unified_plugin_registry.pop('test_family') + class FakeDirect7MODEServerHandler(FakeHTTPRequestHandler): """HTTP handler that fakes enough stuff to allow the driver to run""" @@ -2243,6 +971,8 @@ class FakeDirect7MODEServerHandler(FakeHTTPRequestHandler): """ elif 'lun-set-space-reservation-info' == api: body = """""" + elif 'ems-autosupport-log' == api: + body = """""" else: # Unknown API s.send_response(500) @@ -2306,18 +1036,25 @@ class NetAppDirect7modeISCSIDriverTestCase_NV( super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp() def _custom_setup(self): - driver = iscsi.NetAppDirect7modeISCSIDriver( - configuration=create_configuration()) - self.stubs.Set(httplib, - 'HTTPConnection', FakeDirect7modeHTTPConnection) - driver._create_client(transport_type='http', - login='admin', password='pass', - hostname='127.0.0.1', - port='80') - driver.vfiler = None - driver.volume_list = None + configuration = self._set_config(create_configuration()) + driver = common.NetAppDriver(configuration=configuration) + self.stubs.Set(httplib, 'HTTPConnection', + FakeDirect7modeHTTPConnection) + driver.do_setup(context='') + client = driver.client + client.set_api_version(1, 7) self.driver = driver + def _set_config(self, configuration): + configuration.netapp_storage_family = 'ontap_7mode' + configuration.netapp_storage_protocol = 'iscsi' + configuration.netapp_login = 'admin' + configuration.netapp_password = 'pass' + configuration.netapp_server_hostname = '127.0.0.1' + configuration.netapp_transport_type = 'http' + configuration.netapp_server_port = '80' + return configuration + def test_create_on_select_vol(self): self.driver.volume_list = ['vol0', 'vol1'] self.driver.create_volume(self.volume) @@ -2347,15 +1084,22 @@ class NetAppDirect7modeISCSIDriverTestCase_WV( super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp() def _custom_setup(self): - driver = iscsi.NetAppDirect7modeISCSIDriver( - configuration=create_configuration()) + configuration = self._set_config(create_configuration()) + driver = common.NetAppDriver(configuration=configuration) self.stubs.Set(httplib, 'HTTPConnection', FakeDirect7modeHTTPConnection) - driver._create_client(transport_type='http', - login='admin', password='pass', - hostname='127.0.0.1', - port='80') - driver.vfiler = 'vfiler' - driver.client.set_api_version(1, 7) - driver.volume_list = None + driver.do_setup(context='') + client = driver.client + client.set_api_version(1, 7) self.driver = driver + + def _set_config(self, configuration): + configuration.netapp_storage_family = 'ontap_7mode' + configuration.netapp_storage_protocol = 'iscsi' + configuration.netapp_login = 'admin' + configuration.netapp_password = 'pass' + configuration.netapp_server_hostname = '127.0.0.1' + configuration.netapp_transport_type = 'http' + configuration.netapp_server_port = '80' + configuration.netapp_vfiler = 'openstack' + return configuration diff --git a/cinder/tests/test_netapp_nfs.py b/cinder/tests/test_netapp_nfs.py index 71b0934e2..5a4b16f96 100644 --- a/cinder/tests/test_netapp_nfs.py +++ b/cinder/tests/test_netapp_nfs.py @@ -14,7 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs).""" +"""Unit tests for the NetApp-specific NFS driver module.""" from cinder import context from cinder import exception @@ -23,15 +23,12 @@ from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.netapp import api from cinder.volume.drivers.netapp import nfs as netapp_nfs -from cinder.volume.drivers import nfs from lxml import etree from mox import IgnoreArg from mox import IsA from mox import MockObject import mox -import suds -import types def create_configuration(): @@ -74,63 +71,11 @@ class FakeResponce(object): self.Reason = 'Sample error' -class NetappNfsDriverTestCase(test.TestCase): - """Test case for NetApp specific NFS clone driver.""" - +class NetappDirectCmodeNfsDriverTestCase(test.TestCase): + """Test direct NetApp C Mode driver.""" def setUp(self): - super(NetappNfsDriverTestCase, self).setUp() - - self._driver = netapp_nfs.NetAppNFSDriver( - configuration=create_configuration()) - - def test_check_for_setup_error(self): - mox = self.mox - drv = self._driver - required_flags = ['netapp_wsdl_url', - 'netapp_login', - 'netapp_password', - 'netapp_server_hostname', - 'netapp_server_port'] - - # set required flags - for flag in required_flags: - setattr(drv.configuration, flag, None) - - # check exception raises when flags are not set - self.assertRaises(exception.CinderException, - drv.check_for_setup_error) - - # set required flags - for flag in required_flags: - setattr(drv.configuration, flag, 'val') - - mox.StubOutWithMock(nfs.NfsDriver, 'check_for_setup_error') - nfs.NfsDriver.check_for_setup_error() - mox.ReplayAll() - - drv.check_for_setup_error() - - mox.VerifyAll() - - # restore initial FLAGS - for flag in required_flags: - delattr(drv.configuration, flag) - - def test_do_setup(self): - mox = self.mox - drv = self._driver - - mox.StubOutWithMock(drv, 'check_for_setup_error') - mox.StubOutWithMock(drv, '_get_client') - - drv.check_for_setup_error() - drv._get_client() - - mox.ReplayAll() - - drv.do_setup(IsA(context.RequestContext)) - - mox.VerifyAll() + super(NetappDirectCmodeNfsDriverTestCase, self).setUp() + self._custom_setup() def test_create_snapshot(self): """Test snapshot can be created and deleted.""" @@ -174,212 +119,6 @@ class NetappNfsDriverTestCase(test.TestCase): mox.VerifyAll() - def _prepare_delete_snapshot_mock(self, snapshot_exists): - drv = self._driver - mox = self.mox - - mox.StubOutWithMock(drv, '_get_provider_location') - mox.StubOutWithMock(drv, '_volume_not_present') - - if snapshot_exists: - mox.StubOutWithMock(drv, '_execute') - mox.StubOutWithMock(drv, '_get_volume_path') - - drv._get_provider_location(IgnoreArg()) - drv._volume_not_present(IgnoreArg(), - IgnoreArg()).AndReturn(not snapshot_exists) - - if snapshot_exists: - drv._get_volume_path(IgnoreArg(), IgnoreArg()) - drv._execute('rm', None, run_as_root=True) - - mox.ReplayAll() - - return mox - - def test_delete_existing_snapshot(self): - drv = self._driver - mox = self._prepare_delete_snapshot_mock(True) - - drv.delete_snapshot(FakeSnapshot()) - - mox.VerifyAll() - - def test_delete_missing_snapshot(self): - drv = self._driver - mox = self._prepare_delete_snapshot_mock(False) - - drv.delete_snapshot(FakeSnapshot()) - - mox.VerifyAll() - - def _prepare_clone_mock(self, status): - drv = self._driver - mox = self.mox - - volume = FakeVolume() - setattr(volume, 'provider_location', '127.0.0.1:/nfs') - - drv._client = MockObject(suds.client.Client) - drv._client.factory = MockObject(suds.client.Factory) - drv._client.service = MockObject(suds.client.ServiceSelector) - - # ApiProxy() method is generated by ServiceSelector at runtime from the - # XML, so mocking is impossible. - setattr(drv._client.service, - 'ApiProxy', - types.MethodType(lambda *args, **kwargs: FakeResponce(status), - suds.client.ServiceSelector)) - mox.StubOutWithMock(drv, '_get_host_id') - mox.StubOutWithMock(drv, '_get_full_export_path') - - drv._get_host_id(IgnoreArg()).AndReturn('10') - drv._get_full_export_path(IgnoreArg(), IgnoreArg()).AndReturn('/nfs') - - return mox - - def test_successfull_clone_volume(self): - drv = self._driver - mox = self._prepare_clone_mock('passed') - # set required flags - setattr(drv.configuration, 'synchronous_snapshot_create', False) - mox.ReplayAll() - - volume_name = 'volume_name' - clone_name = 'clone_name' - volume_id = volume_name + str(hash(volume_name)) - - drv._clone_volume(volume_name, clone_name, volume_id) - - mox.VerifyAll() - - def test_failed_clone_volume(self): - drv = self._driver - mox = self._prepare_clone_mock('failed') - - mox.ReplayAll() - - volume_name = 'volume_name' - clone_name = 'clone_name' - volume_id = volume_name + str(hash(volume_name)) - - self.assertRaises(exception.CinderException, - drv._clone_volume, - volume_name, clone_name, volume_id) - - mox.VerifyAll() - - def test_cloned_volume_size_fail(self): - volume_clone_fail = FakeVolume(1) - volume_src = FakeVolume(2) - try: - self._driver.create_cloned_volume(volume_clone_fail, - volume_src) - raise AssertionError() - except exception.CinderException: - pass - - -class NetappCmodeNfsDriverTestCase(test.TestCase): - """Test case for NetApp C Mode specific NFS clone driver""" - - def setUp(self): - super(NetappCmodeNfsDriverTestCase, self).setUp() - self._custom_setup() - - def _custom_setup(self): - self._driver = netapp_nfs.NetAppCmodeNfsDriver( - configuration=create_configuration()) - - def test_check_for_setup_error(self): - mox = self.mox - drv = self._driver - required_flags = [ - 'netapp_wsdl_url', - 'netapp_login', - 'netapp_password', - 'netapp_server_hostname', - 'netapp_server_port'] - - # set required flags - for flag in required_flags: - setattr(drv.configuration, flag, None) - # check exception raises when flags are not set - self.assertRaises(exception.CinderException, - drv.check_for_setup_error) - - # set required flags - for flag in required_flags: - setattr(drv.configuration, flag, 'val') - - mox.ReplayAll() - - drv.check_for_setup_error() - - mox.VerifyAll() - - # restore initial FLAGS - for flag in required_flags: - delattr(drv.configuration, flag) - - def test_do_setup(self): - mox = self.mox - drv = self._driver - - mox.StubOutWithMock(drv, 'check_for_setup_error') - mox.StubOutWithMock(drv, '_get_client') - - drv.check_for_setup_error() - drv._get_client() - - mox.ReplayAll() - - drv.do_setup(IsA(context.RequestContext)) - - mox.VerifyAll() - - def test_create_snapshot(self): - """Test snapshot can be created and deleted""" - mox = self.mox - drv = self._driver - - mox.StubOutWithMock(drv, '_clone_volume') - drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) - mox.ReplayAll() - - drv.create_snapshot(FakeSnapshot()) - - mox.VerifyAll() - - def test_create_volume_from_snapshot(self): - """Tests volume creation from snapshot""" - drv = self._driver - mox = self.mox - volume = FakeVolume(1) - snapshot = FakeSnapshot(2) - - self.assertRaises(exception.CinderException, - drv.create_volume_from_snapshot, - volume, - snapshot) - - snapshot = FakeSnapshot(1) - - location = '127.0.0.1:/nfs' - expected_result = {'provider_location': location} - mox.StubOutWithMock(drv, '_clone_volume') - mox.StubOutWithMock(drv, '_get_volume_location') - drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) - drv._get_volume_location(IgnoreArg()).AndReturn(location) - - mox.ReplayAll() - - loc = drv.create_volume_from_snapshot(volume, snapshot) - - self.assertEquals(loc, expected_result) - - mox.VerifyAll() - def _prepare_delete_snapshot_mock(self, snapshot_exists): drv = self._driver mox = self.mox @@ -419,44 +158,6 @@ class NetappCmodeNfsDriverTestCase(test.TestCase): mox.VerifyAll() - def _prepare_clone_mock(self, status): - drv = self._driver - mox = self.mox - - volume = FakeVolume() - setattr(volume, 'provider_location', '127.0.0.1:/nfs') - - drv._client = MockObject(suds.client.Client) - drv._client.factory = MockObject(suds.client.Factory) - drv._client.service = MockObject(suds.client.ServiceSelector) - # CloneNasFile method is generated by ServiceSelector at runtime from - # the - # XML, so mocking is impossible. - setattr(drv._client.service, - 'CloneNasFile', - types.MethodType(lambda *args, **kwargs: FakeResponce(status), - suds.client.ServiceSelector)) - mox.StubOutWithMock(drv, '_get_host_ip') - mox.StubOutWithMock(drv, '_get_export_path') - - drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1') - drv._get_export_path(IgnoreArg()).AndReturn('/nfs') - return mox - - def test_clone_volume(self): - drv = self._driver - mox = self._prepare_clone_mock('passed') - - mox.ReplayAll() - - volume_name = 'volume_name' - clone_name = 'clone_name' - volume_id = volume_name + str(hash(volume_name)) - - drv._clone_volume(volume_name, clone_name, volume_id) - - mox.VerifyAll() - def test_cloned_volume_size_fail(self): volume_clone_fail = FakeVolume(1) volume_src = FakeVolume(2) @@ -467,12 +168,12 @@ class NetappCmodeNfsDriverTestCase(test.TestCase): except exception.CinderException: pass - -class NetappDirectCmodeNfsDriverTestCase(NetappCmodeNfsDriverTestCase): - """Test direct NetApp C Mode driver""" def _custom_setup(self): + kwargs = {} + kwargs['netapp_mode'] = 'proxy' + kwargs['configuration'] = create_configuration() self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver( - configuration=create_configuration()) + **kwargs) def test_check_for_setup_error(self): mox = self.mox @@ -591,7 +292,7 @@ class NetappDirectCmodeNfsDriverTestCase(NetappCmodeNfsDriverTestCase): class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase): - """Test direct NetApp C Mode driver""" + """Test direct NetApp C Mode driver.""" def _custom_setup(self): self._driver = netapp_nfs.NetAppDirect7modeNfsDriver( configuration=create_configuration()) diff --git a/cinder/volume/drivers/netapp/common.py b/cinder/volume/drivers/netapp/common.py new file mode 100644 index 000000000..219cb26c9 --- /dev/null +++ b/cinder/volume/drivers/netapp/common.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unified driver for NetApp storage systems. + +Supports call to multiple storage systems of different families and protocols. +""" + +from cinder import exception +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.volume.drivers.netapp.options import netapp_proxy_opts +from oslo.config import cfg + + +LOG = logging.getLogger(__name__) + + +CONF = cfg.CONF +CONF.register_opts(netapp_proxy_opts) + + +#NOTE(singn): Holds family:{protocol:driver} registration information. +#Plug in new families and protocols to support new drivers. +#No other code modification required. +netapp_unified_plugin_registry =\ + {'ontap_cluster': + { + 'iscsi': + 'cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver', + 'nfs': 'cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver' + }, 'ontap_7mode': + { + 'iscsi': + 'cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver', + 'nfs': + 'cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver' + }, + } + +#NOTE(singn): Holds family:protocol information. +#Protocol represents the default protocol driver option +#in case no protocol is specified by the user in configuration. +netapp_family_default =\ + { + 'ontap_cluster': 'nfs', + 'ontap_7mode': 'nfs' + } + + +class NetAppDriver(object): + """"NetApp unified block storage driver. + + Acts as a mediator to NetApp storage drivers. + Proxies requests based on the storage family and protocol configured. + Override the proxy driver method by adding method in this driver. + """ + + def __init__(self, *args, **kwargs): + super(NetAppDriver, self).__init__() + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(netapp_proxy_opts) + else: + raise exception.InvalidInput( + reason=_("Required configuration not found")) + self.driver = NetAppDriverFactory.create_driver( + self.configuration.netapp_storage_family, + self.configuration.netapp_storage_protocol, + *args, **kwargs) + + def __setattr__(self, name, value): + """Sets the attribute.""" + if getattr(self, 'driver', None): + self.driver.__setattr__(name, value) + return + object.__setattr__(self, name, value) + + def __getattr__(self, name): + """"Gets the attribute.""" + drv = object.__getattribute__(self, 'driver') + return getattr(drv, name) + + +class NetAppDriverFactory(object): + """Factory to instantiate appropriate NetApp driver.""" + + @staticmethod + def create_driver( + storage_family, storage_protocol, *args, **kwargs): + """"Creates an appropriate driver based on family and protocol.""" + fmt = {'storage_family': storage_family, + 'storage_protocol': storage_protocol} + LOG.info(_('Requested unified config: %(storage_family)s and ' + '%(storage_protocol)s') % fmt) + storage_family = storage_family.lower() + family_meta = netapp_unified_plugin_registry.get(storage_family) + if not family_meta: + raise exception.InvalidInput( + reason=_('Storage family %s is not supported') + % storage_family) + if not storage_protocol: + storage_protocol = netapp_family_default.get(storage_family) + if not storage_protocol: + msg_fmt = {'storage_family': storage_family} + raise exception.InvalidInput( + reason=_('No default storage protocol found' + ' for storage family %(storage_family)s') + % msg_fmt) + storage_protocol = storage_protocol.lower() + driver_loc = family_meta.get(storage_protocol) + if not driver_loc: + msg_fmt = {'storage_protocol': storage_protocol, + 'storage_family': storage_family} + raise exception.InvalidInput( + reason=_('Protocol %(storage_protocol)s is not supported' + ' for storage family %(storage_family)s') + % msg_fmt) + NetAppDriverFactory.check_netapp_driver(driver_loc) + kwargs = kwargs or {} + kwargs['netapp_mode'] = 'proxy' + driver = importutils.import_object(driver_loc, *args, **kwargs) + LOG.info(_('NetApp driver of family %(storage_family)s and protocol' + ' %(storage_protocol)s loaded') % fmt) + return driver + + @staticmethod + def check_netapp_driver(location): + """Checks if the driver requested is a netapp driver.""" + if location.find(".netapp.") == -1: + raise exception.InvalidInput( + reason=_("Only loading netapp drivers supported.")) diff --git a/cinder/volume/drivers/netapp/iscsi.py b/cinder/volume/drivers/netapp/iscsi.py index a68e3dd66..936c4df1e 100644 --- a/cinder/volume/drivers/netapp/iscsi.py +++ b/cinder/volume/drivers/netapp/iscsi.py @@ -16,1122 +16,43 @@ # License for the specific language governing permissions and limitations # under the License. """ -Volume driver for NetApp storage systems. - -This driver requires NetApp OnCommand 5.0 and one or more Data -ONTAP 7-mode storage systems with installed iSCSI licenses. +Volume driver for NetApp iSCSI storage systems. +This driver requires NetApp Clustered Data ONTAP or 7-mode +storage systems with installed iSCSI licenses. """ import time import uuid -from oslo.config import cfg -import suds -from suds import client -from suds.sax import text - from cinder import exception from cinder.openstack.common import log as logging -from cinder import utils from cinder.volume import driver from cinder.volume.drivers.netapp.api import NaApiError from cinder.volume.drivers.netapp.api import NaElement from cinder.volume.drivers.netapp.api import NaServer +from cinder.volume.drivers.netapp.options import netapp_7mode_opts +from cinder.volume.drivers.netapp.options import netapp_basicauth_opts +from cinder.volume.drivers.netapp.options import netapp_cluster_opts +from cinder.volume.drivers.netapp.options import netapp_connection_opts +from cinder.volume.drivers.netapp.options import netapp_provisioning_opts +from cinder.volume.drivers.netapp.options import netapp_transport_opts +from cinder.volume.drivers.netapp.utils import provide_ems +from cinder.volume.drivers.netapp.utils import validate_instantiation from cinder.volume import volume_types +from oslo.config import cfg -LOG = logging.getLogger(__name__) -netapp_opts = [ - cfg.StrOpt('netapp_wsdl_url', - default=None, - help='URL of the WSDL file for the DFM/Webservice server'), - cfg.StrOpt('netapp_login', - default=None, - help='User name for the DFM/Controller server'), - cfg.StrOpt('netapp_password', - default=None, - help='Password for the DFM/Controller server', - secret=True), - cfg.StrOpt('netapp_server_hostname', - default=None, - help='Hostname for the DFM/Controller server'), - cfg.IntOpt('netapp_server_port', - default=8088, - help='Port number for the DFM/Controller server'), - cfg.StrOpt('netapp_storage_service', - default=None, - help=('Storage service to use for provisioning ' - '(when volume_type=None)')), - cfg.StrOpt('netapp_storage_service_prefix', - default=None, - help=('Prefix of storage service name to use for ' - 'provisioning (volume_type name will be appended)')), - cfg.StrOpt('netapp_vfiler', - default=None, - help='Vfiler to use for provisioning'), - cfg.StrOpt('netapp_transport_type', - default='http', - help='Transport type protocol'), - cfg.StrOpt('netapp_vserver', - default='openstack', - help='Cluster vserver to use for provisioning'), - cfg.FloatOpt('netapp_size_multiplier', - default=1.2, - help='Volume size multiplier to ensure while creation'), - cfg.StrOpt('netapp_volume_list', - default='', - help='Comma separated eligible volumes for provisioning on' - ' 7 mode'), ] +LOG = logging.getLogger(__name__) CONF = cfg.CONF -CONF.register_opts(netapp_opts) - - -class DfmDataset(object): - def __init__(self, id, name, project, type): - self.id = id - self.name = name - self.project = project - self.type = type - - -class DfmLun(object): - def __init__(self, dataset, lunpath, id): - self.dataset = dataset - self.lunpath = lunpath - self.id = id - - -class NetAppISCSIDriver(driver.ISCSIDriver): - """NetApp iSCSI volume driver.""" - - IGROUP_PREFIX = 'openstack-' - DATASET_PREFIX = 'OpenStack_' - DATASET_METADATA_PROJECT_KEY = 'OpenStackProject' - DATASET_METADATA_VOL_TYPE_KEY = 'OpenStackVolType' - - def __init__(self, *args, **kwargs): - super(NetAppISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(netapp_opts) - self.discovered_luns = [] - self.discovered_datasets = [] - self.lun_table = {} - - def _check_fail(self, request, response): - """Utility routine to handle checking ZAPI failures.""" - if 'failed' == response.Status: - msg = _('API %(name)s failed: %(reason)s') - msg_fmt = {'name': request.Name, 'reason': response.Reason} - raise exception.VolumeBackendAPIException(msg % msg_fmt) - - def _create_client(self, **kwargs): - """Instantiate a web services client. - - This method creates a "suds" client to make web services calls to the - DFM server. Note that the WSDL file is quite large and may take - a few seconds to parse. - """ - wsdl_url = kwargs['wsdl_url'] - LOG.debug(_('Using WSDL: %s') % wsdl_url) - if kwargs['cache']: - self.client = client.Client(wsdl_url, username=kwargs['login'], - password=kwargs['password']) - else: - self.client = client.Client(wsdl_url, username=kwargs['login'], - password=kwargs['password'], - cache=None) - soap_url = 'http://%s:%s/apis/soap/v1' % (kwargs['hostname'], - kwargs['port']) - LOG.debug(_('Using DFM server: %s') % soap_url) - self.client.set_options(location=soap_url) - - def _set_storage_service(self, storage_service): - """Set the storage service to use for provisioning.""" - LOG.debug(_('Using storage service: %s') % storage_service) - self.storage_service = storage_service - - def _set_storage_service_prefix(self, storage_service_prefix): - """Set the storage service prefix to use for provisioning.""" - LOG.debug(_('Using storage service prefix: %s') % - storage_service_prefix) - self.storage_service_prefix = storage_service_prefix - - def _set_vfiler(self, vfiler): - """Set the vfiler to use for provisioning.""" - LOG.debug(_('Using vfiler: %s') % vfiler) - self.vfiler = vfiler - - def _check_flags(self): - """Ensure that the flags we care about are set.""" - required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', - 'netapp_server_hostname', 'netapp_server_port'] - for flag in required_flags: - if not getattr(self.configuration, flag, None): - raise exception.InvalidInput(reason=_('%s is not set') % flag) - if not (self.configuration.netapp_storage_service or - self.configuration.netapp_storage_service_prefix): - raise exception.InvalidInput( - reason=_('Either ' - 'netapp_storage_service or ' - 'netapp_storage_service_prefix must ' - 'be set')) - - def do_setup(self, context): - """Setup the NetApp Volume driver. - - Called one time by the manager after the driver is loaded. - Validate the flags we care about and setup the suds (web services) - client. - """ - self._check_flags() - self._create_client( - wsdl_url=self.configuration.netapp_wsdl_url, - login=self.configuration.netapp_login, - password=self.configuration.netapp_password, - hostname=self.configuration.netapp_server_hostname, - port=self.configuration.netapp_server_port, cache=True) - self._set_storage_service(self.configuration.netapp_storage_service) - self._set_storage_service_prefix( - self.configuration.netapp_storage_service_prefix) - self._set_vfiler(self.configuration.netapp_vfiler) - - def check_for_setup_error(self): - """Check that the driver is working and can communicate. - - Invoke a web services API to make sure we can talk to the server. - Also perform the discovery of datasets and LUNs from DFM. - """ - self.client.service.DfmAbout() - LOG.debug(_("Connected to DFM server")) - self._discover_luns() - - def _get_datasets(self): - """Get the list of datasets from DFM.""" - server = self.client.service - res = server.DatasetListInfoIterStart(IncludeMetadata=True) - tag = res.Tag - datasets = [] - try: - while True: - res = server.DatasetListInfoIterNext(Tag=tag, Maximum=100) - if not res.Datasets: - break - datasets.extend(res.Datasets.DatasetInfo) - finally: - server.DatasetListInfoIterEnd(Tag=tag) - return datasets - - def _discover_dataset_luns(self, dataset, volume): - """Discover all of the LUNs in a dataset.""" - server = self.client.service - res = server.DatasetMemberListInfoIterStart( - DatasetNameOrId=dataset.id, - IncludeExportsInfo=True, - IncludeIndirect=True, - MemberType='lun_path') - tag = res.Tag - suffix = None - if volume: - suffix = '/' + volume - try: - while True: - res = server.DatasetMemberListInfoIterNext(Tag=tag, - Maximum=100) - if (not hasattr(res, 'DatasetMembers') or - not res.DatasetMembers): - break - for member in res.DatasetMembers.DatasetMemberInfo: - if suffix and not member.MemberName.endswith(suffix): - continue - # MemberName is the full LUN path in this format: - # host:/volume/qtree/lun - lun = DfmLun(dataset, member.MemberName, member.MemberId) - self.discovered_luns.append(lun) - finally: - server.DatasetMemberListInfoIterEnd(Tag=tag) - - def _discover_luns(self): - """Discover the LUNs from DFM. - - Discover all of the OpenStack-created datasets and LUNs in the DFM - database. - """ - datasets = self._get_datasets() - self.discovered_datasets = [] - self.discovered_luns = [] - for dataset in datasets: - if not dataset.DatasetName.startswith(self.DATASET_PREFIX): - continue - if (not hasattr(dataset, 'DatasetMetadata') or - not dataset.DatasetMetadata): - continue - project = None - type = None - for field in dataset.DatasetMetadata.DfmMetadataField: - if field.FieldName == self.DATASET_METADATA_PROJECT_KEY: - project = field.FieldValue - elif field.FieldName == self.DATASET_METADATA_VOL_TYPE_KEY: - type = field.FieldValue - if not project: - continue - ds = DfmDataset(dataset.DatasetId, dataset.DatasetName, - project, type) - self.discovered_datasets.append(ds) - self._discover_dataset_luns(ds, None) - msg = (_("Discovered %(dataset_count)s datasets and %(lun_count)s" - "LUNs") % {'dataset_count': len(self.discovered_datasets), - 'lun_count': len(self.discovered_luns)}) - LOG.debug(msg) - self.lun_table = {} - - def _get_job_progress(self, job_id): - """Get progress of one running DFM job. - - Obtain the latest progress report for the job and return the - list of progress events. - """ - server = self.client.service - res = server.DpJobProgressEventListIterStart(JobId=job_id) - tag = res.Tag - event_list = [] - try: - while True: - res = server.DpJobProgressEventListIterNext(Tag=tag, - Maximum=100) - if not hasattr(res, 'ProgressEvents'): - break - event_list += res.ProgressEvents.DpJobProgressEventInfo - finally: - server.DpJobProgressEventListIterEnd(Tag=tag) - return event_list - - def _wait_for_job(self, job_id): - """Wait until a job terminates. - - Poll the job until it completes or an error is detected. Return the - final list of progress events if it completes successfully. - """ - while True: - events = self._get_job_progress(job_id) - for event in events: - if event.EventStatus == 'error': - msg = _('Job failed: %s') % (event.ErrorMessage) - raise exception.VolumeBackendAPIException(data=msg) - if event.EventType == 'job-end': - return events - time.sleep(5) - - def _dataset_name(self, project, ss_type): - """Return the dataset name for a given project and volume type.""" - _project = project.replace(' ', '_').replace('-', '_') - dataset_name = self.DATASET_PREFIX + _project - if not ss_type: - return dataset_name - _type = ss_type.replace(' ', '_').replace('-', '_') - return dataset_name + '_' + _type - - def _get_dataset(self, dataset_name): - """Lookup a dataset by name in the list of discovered datasets.""" - for dataset in self.discovered_datasets: - if dataset.name == dataset_name: - return dataset - return None - - def _create_dataset(self, dataset_name, project, ss_type): - """Create a new dataset using the storage service. - - The export settings are set to create iSCSI LUNs aligned for Linux. - Returns the ID of the new dataset. - """ - if ss_type and not self.storage_service_prefix: - msg = _('Attempt to use volume_type without specifying ' - 'netapp_storage_service_prefix flag.') - raise exception.VolumeBackendAPIException(data=msg) - if not (ss_type or self.storage_service): - msg = _('You must set the netapp_storage_service flag in order to ' - 'create volumes with no volume_type.') - raise exception.VolumeBackendAPIException(data=msg) - storage_service = self.storage_service - if ss_type: - storage_service = self.storage_service_prefix + ss_type - - factory = self.client.factory - - lunmap = factory.create('DatasetLunMappingInfo') - lunmap.IgroupOsType = 'linux' - export = factory.create('DatasetExportInfo') - export.DatasetExportProtocol = 'iscsi' - export.DatasetLunMappingInfo = lunmap - detail = factory.create('StorageSetInfo') - detail.DpNodeName = 'Primary data' - detail.DatasetExportInfo = export - if hasattr(self, 'vfiler') and self.vfiler: - detail.ServerNameOrId = self.vfiler - details = factory.create('ArrayOfStorageSetInfo') - details.StorageSetInfo = [detail] - field1 = factory.create('DfmMetadataField') - field1.FieldName = self.DATASET_METADATA_PROJECT_KEY - field1.FieldValue = project - field2 = factory.create('DfmMetadataField') - field2.FieldName = self.DATASET_METADATA_VOL_TYPE_KEY - field2.FieldValue = ss_type - metadata = factory.create('ArrayOfDfmMetadataField') - metadata.DfmMetadataField = [field1, field2] - - res = self.client.service.StorageServiceDatasetProvision( - StorageServiceNameOrId=storage_service, - DatasetName=dataset_name, - AssumeConfirmation=True, - StorageSetDetails=details, - DatasetMetadata=metadata) - - ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type) - self.discovered_datasets.append(ds) - return ds - - @utils.synchronized('netapp_dfm', external=True) - def _provision(self, name, description, project, ss_type, size): - """Provision a LUN through provisioning manager. - - The LUN will be created inside a dataset associated with the project. - If the dataset doesn't already exist, we create it using the storage - service specified in the cinder conf. - """ - dataset_name = self._dataset_name(project, ss_type) - dataset = self._get_dataset(dataset_name) - if not dataset: - dataset = self._create_dataset(dataset_name, project, ss_type) - - info = self.client.factory.create('ProvisionMemberRequestInfo') - info.Name = name - if description: - info.Description = description - info.Size = size - info.MaximumSnapshotSpace = 2 * long(size) - - server = self.client.service - lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset.id) - try: - server.DatasetProvisionMember(EditLockId=lock_id, - ProvisionMemberRequestInfo=info) - res = server.DatasetEditCommit(EditLockId=lock_id, - AssumeConfirmation=True) - except (suds.WebFault, Exception): - server.DatasetEditRollback(EditLockId=lock_id) - msg = _('Failed to provision dataset member') - raise exception.VolumeBackendAPIException(data=msg) - - lun_id = None - lunpath = None - - for info in res.JobIds.JobInfo: - events = self._wait_for_job(info.JobId) - for event in events: - if event.EventType != 'lun-create': - continue - lunpath = event.ProgressLunInfo.LunName - lun_id = event.ProgressLunInfo.LunPathId - - if not lun_id: - msg = _('No LUN was created by the provision job') - raise exception.VolumeBackendAPIException(data=msg) - - lun = DfmLun(dataset, lunpath, lun_id) - self.discovered_luns.append(lun) - self.lun_table[name] = lun - - def _get_ss_type(self, volume): - """Get the storage service type for a volume.""" - id = volume['volume_type_id'] - if not id: - return None - volume_type = volume_types.get_volume_type(None, id) - if not volume_type: - return None - return volume_type['name'] - - @utils.synchronized('netapp_dfm', external=True) - def _remove_destroy(self, name, project): - """Remove the LUN from the dataset, also destroying it. - - Remove the LUN from the dataset and destroy the actual LUN and Qtree - on the storage system. - """ - try: - lun = self._lookup_lun_for_volume(name, project) - lun_details = self._get_lun_details(lun.id) - except exception.VolumeBackendAPIException: - LOG.debug(_("No entry in LUN table for volume %(name)s."), - {'name': name}) - return - - member = self.client.factory.create('DatasetMemberParameter') - member.ObjectNameOrId = lun.id - members = self.client.factory.create('ArrayOfDatasetMemberParameter') - members.DatasetMemberParameter = [member] - - server = self.client.service - lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id) - try: - server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True, - DatasetMemberParameters=members) - res = server.DatasetEditCommit(EditLockId=lock_id, - AssumeConfirmation=True) - except (suds.WebFault, Exception): - server.DatasetEditRollback(EditLockId=lock_id) - msg = _('Failed to remove and delete dataset LUN member') - raise exception.VolumeBackendAPIException(data=msg) - - for info in res.JobIds.JobInfo: - self._wait_for_job(info.JobId) - - # Note: it's not possible to delete Qtree & his LUN in one transaction - member.ObjectNameOrId = lun_details.QtreeId - lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id) - try: - server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True, - DatasetMemberParameters=members) - server.DatasetEditCommit(EditLockId=lock_id, - AssumeConfirmation=True) - except (suds.WebFault, Exception): - server.DatasetEditRollback(EditLockId=lock_id) - msg = _('Failed to remove and delete dataset Qtree member') - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume(self, volume): - """Driver entry point for creating a new volume.""" - default_size = '104857600' # 100 MB - gigabytes = 1073741824L # 2^30 - name = volume['name'] - project = volume['project_id'] - display_name = volume['display_name'] - display_description = volume['display_description'] - description = None - if display_name: - if display_description: - description = display_name + "\n" + display_description - else: - description = display_name - elif display_description: - description = display_description - if int(volume['size']) == 0: - size = default_size - else: - size = str(int(volume['size']) * gigabytes) - ss_type = self._get_ss_type(volume) - self._provision(name, description, project, ss_type, size) - - def _lookup_lun_for_volume(self, name, project): - """Lookup the LUN that corresponds to the give volume. - - Initial lookups involve a table scan of all of the discovered LUNs, - but later lookups are done instantly from the hashtable. - """ - if name in self.lun_table: - return self.lun_table[name] - lunpath_suffix = '/' + name - for lun in self.discovered_luns: - if lun.dataset.project != project: - continue - if lun.lunpath.endswith(lunpath_suffix): - self.lun_table[name] = lun - return lun - msg = _("No entry in LUN table for volume %s") % (name) - raise exception.VolumeBackendAPIException(data=msg) - - def delete_volume(self, volume): - """Driver entry point for destroying existing volumes.""" - name = volume['name'] - project = volume['project_id'] - self._remove_destroy(name, project) - - def _get_lun_details(self, lun_id): - """Given the ID of a LUN, get the details about that LUN.""" - server = self.client.service - res = server.LunListInfoIterStart(ObjectNameOrId=lun_id) - tag = res.Tag - try: - res = server.LunListInfoIterNext(Tag=tag, Maximum=1) - if hasattr(res, 'Luns') and res.Luns.LunInfo: - return res.Luns.LunInfo[0] - finally: - server.LunListInfoIterEnd(Tag=tag) - msg = _('Failed to get LUN details for LUN ID %s') - raise exception.VolumeBackendAPIException(data=msg % lun_id) - - def _get_host_details(self, host_id): - """Given the ID of a host, get the details about it. - - A "host" is a storage system here. - """ - server = self.client.service - res = server.HostListInfoIterStart(ObjectNameOrId=host_id) - tag = res.Tag - try: - res = server.HostListInfoIterNext(Tag=tag, Maximum=1) - if hasattr(res, 'Hosts') and res.Hosts.HostInfo: - return res.Hosts.HostInfo[0] - finally: - server.HostListInfoIterEnd(Tag=tag) - msg = _('Failed to get host details for host ID %s') - raise exception.VolumeBackendAPIException(data=msg % host_id) - - def _get_iqn_for_host(self, host_id): - """Get the iSCSI Target Name for a storage system.""" - request = self.client.factory.create('Request') - request.Name = 'iscsi-node-get-name' - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - return response.Results['node-name'][0] - - def _api_elem_is_empty(self, elem): - """Return true if the API element should be considered empty. - - Helper routine to figure out if a list returned from a proxy API - is empty. This is necessary because the API proxy produces nasty - looking XML. - """ - if type(elem) is not list: - return True - if 0 == len(elem): - return True - child = elem[0] - if isinstance(child, text.Text): - return True - if type(child) is str: - return True - return False - - def _get_target_portal_for_host(self, host_id, host_address): - """Get iSCSI target portal for a storage system. - - Get the iSCSI Target Portal details for a particular IP address - on a storage system. - """ - request = self.client.factory.create('Request') - request.Name = 'iscsi-portal-list-info' - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - portal = {} - portals = response.Results['iscsi-portal-list-entries'] - if self._api_elem_is_empty(portals): - return portal - portal_infos = portals[0]['iscsi-portal-list-entry-info'] - for portal_info in portal_infos: - portal['address'] = portal_info['ip-address'][0] - portal['port'] = portal_info['ip-port'][0] - portal['portal'] = portal_info['tpgroup-tag'][0] - if host_address == portal['address']: - break - return portal - - def _get_export(self, volume): - """Get the iSCSI export details for a volume. - - Looks up the LUN in DFM based on the volume and project name, then get - the LUN's ID. We store that value in the database instead of the iSCSI - details because we will not have the true iSCSI details until masking - time (when initialize_connection() is called). - """ - name = volume['name'] - project = volume['project_id'] - lun = self._lookup_lun_for_volume(name, project) - return {'provider_location': lun.id} - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - return self._get_export(volume) - - def create_export(self, context, volume): - """Driver entry point to get the export info for a new volume.""" - return self._get_export(volume) - - def remove_export(self, context, volume): - """Driver exntry point to remove an export for a volume. - - Since exporting is idempotent in this driver, we have nothing - to do for unexporting. - """ - pass - - def _find_igroup_for_initiator(self, host_id, initiator_name): - """Get the igroup for an initiator. - - Look for an existing igroup (initiator group) on the storage system - containing a given iSCSI initiator and return the name of the igroup. - """ - request = self.client.factory.create('Request') - request.Name = 'igroup-list-info' - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - igroups = response.Results['initiator-groups'] - if self._api_elem_is_empty(igroups): - return None - igroup_infos = igroups[0]['initiator-group-info'] - for igroup_info in igroup_infos: - if ('iscsi' != igroup_info['initiator-group-type'][0] or - 'linux' != igroup_info['initiator-group-os-type'][0]): - continue - igroup_name = igroup_info['initiator-group-name'][0] - if not igroup_name.startswith(self.IGROUP_PREFIX): - continue - initiators = igroup_info['initiators'][0]['initiator-info'] - for initiator in initiators: - if initiator_name == initiator['initiator-name'][0]: - return igroup_name - return None - - def _create_igroup(self, host_id, initiator_name): - """Create a new igroup. - - Create a new igroup (initiator group) on the storage system to hold - the given iSCSI initiator. The group will only have 1 member and will - be named "openstack-${initiator_name}". - """ - igroup_name = self.IGROUP_PREFIX + initiator_name - request = self.client.factory.create('Request') - request.Name = 'igroup-create' - igroup_create_xml = ( - '%s' - 'iscsi' - 'linuxlinux') - request.Args = text.Raw(igroup_create_xml % igroup_name) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - request = self.client.factory.create('Request') - request.Name = 'igroup-add' - igroup_add_xml = ( - '%s' - '%s') - request.Args = text.Raw(igroup_add_xml % (igroup_name, initiator_name)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - return igroup_name - - def _get_lun_mappping(self, host_id, lunpath, igroup_name): - """Get the mapping between a LUN and an igroup. - - Check if a given LUN is already mapped to the given igroup (initiator - group). If the LUN is mapped, also return the LUN number for the - mapping. - """ - request = self.client.factory.create('Request') - request.Name = 'lun-map-list-info' - request.Args = text.Raw('%s' % (lunpath)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - igroups = response.Results['initiator-groups'] - if self._api_elem_is_empty(igroups): - return {'mapped': False} - igroup_infos = igroups[0]['initiator-group-info'] - for igroup_info in igroup_infos: - if igroup_name == igroup_info['initiator-group-name'][0]: - return {'mapped': True, 'lun_num': igroup_info['lun-id'][0]} - return {'mapped': False} - - def _map_initiator(self, host_id, lunpath, igroup_name): - """Map a LUN to an igroup. - - Map the given LUN to the given igroup (initiator group). Return the LUN - number that the LUN was mapped to (the filer will choose the lowest - available number). - """ - request = self.client.factory.create('Request') - request.Name = 'lun-map' - lun_map_xml = ('%s' - '%s') - request.Args = text.Raw(lun_map_xml % (igroup_name, lunpath)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - return response.Results['lun-id-assigned'][0] - - def _unmap_initiator(self, host_id, lunpath, igroup_name): - """Unmap the given LUN from the given igroup (initiator group).""" - request = self.client.factory.create('Request') - request.Name = 'lun-unmap' - lun_unmap_xml = ('%s' - '%s') - request.Args = text.Raw(lun_unmap_xml % (igroup_name, lunpath)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - - def _ensure_initiator_mapped(self, host_id, lunpath, initiator_name): - """Ensure that a LUN is mapped to a particular initiator. - - Check if a LUN is mapped to a given initiator already and create - the mapping if it is not. A new igroup will be created if needed. - Returns the LUN number for the mapping between the LUN and initiator - in both cases. - """ - lunpath = '/vol/' + lunpath - igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) - if not igroup_name: - igroup_name = self._create_igroup(host_id, initiator_name) - - mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) - if mapping['mapped']: - return mapping['lun_num'] - return self._map_initiator(host_id, lunpath, igroup_name) - - def _ensure_initiator_unmapped(self, host_id, lunpath, initiator_name): - """Ensure that a LUN is not mapped to a particular initiator. - - Check if a LUN is mapped to a given initiator and remove the - mapping if it is. This does not destroy the igroup. - """ - lunpath = '/vol/' + lunpath - igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) - if not igroup_name: - return - - mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) - if mapping['mapped']: - self._unmap_initiator(host_id, lunpath, igroup_name) - - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance. - - Do the LUN masking on the storage system so the initiator can access - the LUN on the target. Also return the iSCSI properties so the - initiator can find the LUN. This implementation does not call - _get_iscsi_properties() to get the properties because cannot store the - LUN number in the database. We only find out what the LUN number will - be during this method call so we construct the properties dictionary - ourselves. - """ - initiator_name = connector['initiator'] - lun_id = volume['provider_location'] - if not lun_id: - msg = _("No LUN ID for volume %s") % volume['name'] - raise exception.VolumeBackendAPIException(data=msg) - lun = self._get_lun_details(lun_id) - lun_num = self._ensure_initiator_mapped(lun.HostId, lun.LunPath, - initiator_name) - host = self._get_host_details(lun.HostId) - portal = self._get_target_portal_for_host(host.HostId, - host.HostAddress) - if not portal: - msg = _('Failed to get target portal for filer: %s') - raise exception.VolumeBackendAPIException(data=msg % host.HostName) - - iqn = self._get_iqn_for_host(host.HostId) - if not iqn: - msg = _('Failed to get target IQN for filer: %s') - raise exception.VolumeBackendAPIException(data=msg % host.HostName) - - properties = {} - properties['target_discovered'] = False - (address, port) = (portal['address'], portal['port']) - properties['target_portal'] = '%s:%s' % (address, port) - properties['target_iqn'] = iqn - properties['target_lun'] = lun_num - properties['volume_id'] = volume['id'] - - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance. - - Unmask the LUN on the storage system so the given intiator can no - longer access it. - """ - initiator_name = connector['initiator'] - lun_id = volume['provider_location'] - if not lun_id: - msg = _('No LUN ID for volume %s') % volume['name'] - raise exception.VolumeBackendAPIException(data=msg) - lun = self._get_lun_details(lun_id) - self._ensure_initiator_unmapped(lun.HostId, lun.LunPath, - initiator_name) - - def _is_clone_done(self, host_id, clone_op_id, volume_uuid): - """Check the status of a clone operation. - - Return True if done, False otherwise. - """ - request = self.client.factory.create('Request') - request.Name = 'clone-list-status' - clone_list_status_xml = ( - '' - '%s' - '%s' - '') - request.Args = text.Raw(clone_list_status_xml % (clone_op_id, - volume_uuid)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - if isinstance(response.Results, text.Text): - return False - status = response.Results['status'] - if self._api_elem_is_empty(status): - return False - ops_info = status[0]['ops-info'][0] - state = ops_info['clone-state'][0] - return 'completed' == state - - def _clone_lun(self, host_id, src_path, dest_path, snap): - """Create a clone of a NetApp LUN. - - The clone initially consumes no space and is not space reserved. - """ - request = self.client.factory.create('Request') - request.Name = 'clone-start' - clone_start_xml = ( - '%s%s' - '%s') - if snap: - no_snap = 'false' - else: - no_snap = 'true' - request.Args = text.Raw(clone_start_xml % (src_path, no_snap, - dest_path)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - clone_id = response.Results['clone-id'][0] - clone_id_info = clone_id['clone-id-info'][0] - clone_op_id = clone_id_info['clone-op-id'][0] - volume_uuid = clone_id_info['volume-uuid'][0] - while not self._is_clone_done(host_id, clone_op_id, volume_uuid): - time.sleep(5) - - def _refresh_dfm_luns(self, host_id): - """Refresh the LUN list for one filer in DFM.""" - server = self.client.service - refresh_started_at = time.time() - monitor_names = self.client.factory.create('ArrayOfMonitorName') - monitor_names.MonitorName = ['file_system', 'lun'] - server.DfmObjectRefresh(ObjectNameOrId=host_id, - MonitorNames=monitor_names) - - max_wait = 10 * 60 # 10 minutes - - while True: - if time.time() - refresh_started_at > max_wait: - msg = _('Failed to get LUN list. Is the DFM host' - ' time-synchronized with Cinder host?') - raise exception.VolumeBackendAPIException(msg) - - LOG.info('Refreshing LUN list on DFM...') - time.sleep(15) - res = server.DfmMonitorTimestampList(HostNameOrId=host_id) - timestamps = dict((t.MonitorName, t.LastMonitoringTimestamp) - for t in res.DfmMonitoringTimestamp) - ts_fs = timestamps['file_system'] - ts_lun = timestamps['lun'] - - if ts_fs > refresh_started_at and ts_lun > refresh_started_at: - return # both monitor jobs finished - elif ts_fs == 0 or ts_lun == 0: - pass # lun or file_system is still in progress, wait - else: - monitor_names.MonitorName = [] - if ts_fs <= refresh_started_at: - monitor_names.MonitorName.append('file_system') - if ts_lun <= refresh_started_at: - monitor_names.MonitorName.append('lun') - LOG.debug('Rerunning refresh for monitors: ' + - str(monitor_names.MonitorName)) - server.DfmObjectRefresh(ObjectNameOrId=host_id, - MonitorNames=monitor_names) - - def _destroy_lun(self, host_id, lun_path): - """Destroy a LUN on the filer.""" - request = self.client.factory.create('Request') - request.Name = 'lun-offline' - path_xml = '%s' - request.Args = text.Raw(path_xml % lun_path) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - request = self.client.factory.create('Request') - request.Name = 'lun-destroy' - request.Args = text.Raw(path_xml % lun_path) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - - def _resize_volume(self, host_id, vol_name, new_size): - """Resize the volume by the amount requested.""" - request = self.client.factory.create('Request') - request.Name = 'volume-size' - volume_size_xml = ( - '%s%s') - request.Args = text.Raw(volume_size_xml % (vol_name, new_size)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - - def _create_qtree(self, host_id, vol_name, qtree_name): - """Create a qtree the filer.""" - request = self.client.factory.create('Request') - request.Name = 'qtree-create' - qtree_create_xml = ( - '0755%s%s') - request.Args = text.Raw(qtree_create_xml % (vol_name, qtree_name)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - - def create_snapshot(self, snapshot): - """Driver entry point for creating a snapshot. - - This driver implements snapshots by using efficient single-file - (LUN) cloning. - """ - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - project = snapshot['project_id'] - lun = self._lookup_lun_for_volume(vol_name, project) - lun_id = lun.id - lun = self._get_lun_details(lun_id) - extra_gb = snapshot['volume_size'] - new_size = '+%dg' % extra_gb - self._resize_volume(lun.HostId, lun.VolumeName, new_size) - # LunPath is the partial LUN path in this format: volume/qtree/lun - lun_path = str(lun.LunPath) - lun_name = lun_path[lun_path.rfind('/') + 1:] - qtree_path = '/vol/%s/%s' % (lun.VolumeName, lun.QtreeName) - src_path = '%s/%s' % (qtree_path, lun_name) - dest_path = '%s/%s' % (qtree_path, snapshot_name) - self._clone_lun(lun.HostId, src_path, dest_path, True) - - def delete_snapshot(self, snapshot): - """Driver entry point for deleting a snapshot.""" - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - project = snapshot['project_id'] - lun = self._lookup_lun_for_volume(vol_name, project) - lun_id = lun.id - lun = self._get_lun_details(lun_id) - lun_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, - snapshot_name) - self._destroy_lun(lun.HostId, lun_path) - extra_gb = snapshot['volume_size'] - new_size = '-%dg' % extra_gb - self._resize_volume(lun.HostId, lun.VolumeName, new_size) - - def create_volume_from_snapshot(self, volume, snapshot): - """Driver entry point for creating a new volume from a snapshot. - - Many would call this "cloning" and in fact we use cloning to implement - this feature. - """ - vol_size = volume['size'] - snap_size = snapshot['volume_size'] - if vol_size != snap_size: - msg = _('Cannot create volume of size %(vol_size)s from ' - 'snapshot of size %(snap_size)s') - msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size} - raise exception.VolumeBackendAPIException(msg % msg_fmt) - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - project = snapshot['project_id'] - lun = self._lookup_lun_for_volume(vol_name, project) - lun_id = lun.id - dataset = lun.dataset - old_type = dataset.type - new_type = self._get_ss_type(volume) - if new_type != old_type: - msg = _('Cannot create volume of type %(new_type)s from ' - 'snapshot of type %(old_type)s') - msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size} - raise exception.VolumeBackendAPIException(msg % msg_fmt) - lun = self._get_lun_details(lun_id) - extra_gb = vol_size - new_size = '+%dg' % extra_gb - self._resize_volume(lun.HostId, lun.VolumeName, new_size) - clone_name = volume['name'] - self._create_qtree(lun.HostId, lun.VolumeName, clone_name) - src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, - snapshot_name) - dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name) - self._clone_lun(lun.HostId, src_path, dest_path, False) - self._refresh_dfm_luns(lun.HostId) - self._discover_dataset_luns(dataset, clone_name) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - vol_size = volume['size'] - src_vol_size = src_vref['size'] - if vol_size != src_vol_size: - msg = _('Cannot create clone of size %(vol_size)s from ' - 'volume of size %(src_vol_size)s') - msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size} - raise exception.VolumeBackendAPIException(msg % msg_fmt) - src_vol_name = src_vref['name'] - project = src_vref['project_id'] - lun = self._lookup_lun_for_volume(src_vol_name, project) - lun_id = lun.id - dataset = lun.dataset - old_type = dataset.type - new_type = self._get_ss_type(volume) - if new_type != old_type: - msg = (_('Cannot create clone of type %(new_type)s from ' - 'volume of type %(old_type)s') % - {'new_type': new_type, 'old_type': old_type}) - raise exception.VolumeBackendAPIException(data=msg) - lun = self._get_lun_details(lun_id) - extra_gb = vol_size - new_size = '+%dg' % extra_gb - self._resize_volume(lun.HostId, lun.VolumeName, new_size) - clone_name = volume['name'] - self._create_qtree(lun.HostId, lun.VolumeName, clone_name) - src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, - src_vol_name) - dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name) - self._clone_lun(lun.HostId, src_path, dest_path, False) - self._refresh_dfm_luns(lun.HostId) - self._discover_dataset_luns(dataset, clone_name) - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_status() - - return self._stats - - def _update_volume_status(self): - """Retrieve status info from volume group.""" - - LOG.debug(_("Updating volume status")) - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or 'NetApp_iSCSI_7mode' - data["vendor_name"] = 'NetApp' - data["driver_version"] = '1.0' - data["storage_protocol"] = 'iSCSI' - - data['total_capacity_gb'] = 'infinite' - data['free_capacity_gb'] = 'infinite' - data['reserved_percentage'] = 0 - data['QoS_support'] = False - self._stats = data +CONF.register_opts(netapp_connection_opts) +CONF.register_opts(netapp_transport_opts) +CONF.register_opts(netapp_basicauth_opts) +CONF.register_opts(netapp_cluster_opts) +CONF.register_opts(netapp_7mode_opts) +CONF.register_opts(netapp_provisioning_opts) class NetAppLun(object): @@ -1148,367 +69,16 @@ class NetAppLun(object): if prop in self.metadata: return self.metadata[prop] name = self.name - msg = _("No metadata property %(prop)s defined for the LUN " - "%(name)s") % {'prop': prop, 'name': name} - LOG.debug(msg) + msg = _("No metadata property %(prop)s defined for the" + " LUN %(name)s") + msg_fmt = {'prop': prop, 'name': name} + LOG.debug(msg % msg_fmt) def __str__(self, *args, **kwargs): return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\ % (self.handle, self.name, self.size, self.metadata) -class NetAppCmodeISCSIDriver(driver.ISCSIDriver): - """NetApp C-mode iSCSI volume driver.""" - - def __init__(self, *args, **kwargs): - super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(netapp_opts) - self.lun_table = {} - - def _create_client(self, **kwargs): - """Instantiate a web services client. - - This method creates a "suds" client to make web services calls to the - DFM server. Note that the WSDL file is quite large and may take - a few seconds to parse. - """ - wsdl_url = kwargs['wsdl_url'] - LOG.debug(_('Using WSDL: %s') % wsdl_url) - if kwargs['cache']: - self.client = client.Client(wsdl_url, username=kwargs['login'], - password=kwargs['password']) - else: - self.client = client.Client(wsdl_url, username=kwargs['login'], - password=kwargs['password'], - cache=None) - - def _check_flags(self): - """Ensure that the flags we care about are set.""" - required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', - 'netapp_server_hostname', 'netapp_server_port'] - for flag in required_flags: - if not getattr(self.configuration, flag, None): - msg = _('%s is not set') % flag - raise exception.InvalidInput(data=msg) - - def do_setup(self, context): - """Setup the NetApp Volume driver. - - Called one time by the manager after the driver is loaded. - Validate the flags we care about and setup the suds (web services) - client. - """ - self._check_flags() - self._create_client( - wsdl_url=self.configuration.netapp_wsdl_url, - login=self.configuration.netapp_login, - password=self.configuration.netapp_password, - hostname=self.configuration.netapp_server_hostname, - port=self.configuration.netapp_server_port, cache=True) - - def check_for_setup_error(self): - """Check that the driver is working and can communicate. - - Discovers the LUNs on the NetApp server. - """ - self.lun_table = {} - luns = self.client.service.ListLuns() - for lun in luns: - meta_dict = {} - if hasattr(lun, 'Metadata'): - meta_dict = self._create_dict_from_meta(lun.Metadata) - discovered_lun = NetAppLun(lun.Handle, - lun.Name, - lun.Size, - meta_dict) - self._add_lun_to_table(discovered_lun) - LOG.debug(_("Success getting LUN list from server")) - - def create_volume(self, volume): - """Driver entry point for creating a new volume.""" - default_size = '104857600' # 100 MB - gigabytes = 1073741824L # 2^30 - name = volume['name'] - if int(volume['size']) == 0: - size = default_size - else: - size = str(int(volume['size']) * gigabytes) - extra_args = {} - extra_args['OsType'] = 'linux' - extra_args['QosType'] = self._get_qos_type(volume) - extra_args['Container'] = volume['project_id'] - extra_args['Display'] = volume['display_name'] - extra_args['Description'] = volume['display_description'] - extra_args['SpaceReserved'] = True - server = self.client.service - metadata = self._create_metadata_list(extra_args) - lun = server.ProvisionLun(Name=name, Size=size, - Metadata=metadata) - LOG.debug(_("Created LUN with name %s") % name) - self._add_lun_to_table( - NetAppLun(lun.Handle, - lun.Name, - lun.Size, - self._create_dict_from_meta(lun.Metadata))) - - def delete_volume(self, volume): - """Driver entry point for destroying existing volumes.""" - name = volume['name'] - handle = self._get_lun_handle(name) - if not handle: - LOG.warn(_("No entry in LUN table for volume %(name)s."), - {'name': name}) - return - self.client.service.DestroyLun(Handle=handle) - LOG.debug(_("Destroyed LUN %s") % handle) - self.lun_table.pop(name) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - handle = self._get_lun_handle(volume['name']) - return {'provider_location': handle} - - def create_export(self, context, volume): - """Driver entry point to get the export info for a new volume.""" - handle = self._get_lun_handle(volume['name']) - return {'provider_location': handle} - - def remove_export(self, context, volume): - """Driver exntry point to remove an export for a volume. - - Since exporting is idempotent in this driver, we have nothing - to do for unexporting. - """ - pass - - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance. - - Do the LUN masking on the storage system so the initiator can access - the LUN on the target. Also return the iSCSI properties so the - initiator can find the LUN. This implementation does not call - _get_iscsi_properties() to get the properties because cannot store the - LUN number in the database. We only find out what the LUN number will - be during this method call so we construct the properties dictionary - ourselves. - """ - initiator_name = connector['initiator'] - handle = volume['provider_location'] - server = self.client.service - server.MapLun(Handle=handle, InitiatorType="iscsi", - InitiatorName=initiator_name) - LOG.debug(_("Mapped LUN %(handle)s to the initiator " - "%(initiator_name)s"), - {'handle': handle, 'initiator_name': initiator_name}) - - target_details_list = server.GetLunTargetDetails( - Handle=handle, - InitiatorType="iscsi", - InitiatorName=initiator_name) - LOG.debug(_("Succesfully fetched target details for LUN %(handle)s and" - " initiator %(initiator_name)s"), - {'handle': handle, 'initiator_name': initiator_name}) - - if not target_details_list: - msg = _('Failed to get LUN target details for the LUN %s') - raise exception.VolumeBackendAPIException(data=msg % handle) - target_details = target_details_list[0] - if not target_details.Address and target_details.Port: - msg = _('Failed to get target portal for the LUN %s') - raise exception.VolumeBackendAPIException(data=msg % handle) - iqn = target_details.Iqn - if not iqn: - msg = _('Failed to get target IQN for the LUN %s') - raise exception.VolumeBackendAPIException(data=msg % handle) - - properties = {} - properties['target_discovered'] = False - (address, port) = (target_details.Address, target_details.Port) - properties['target_portal'] = '%s:%s' % (address, port) - properties['target_iqn'] = iqn - properties['target_lun'] = target_details.LunNumber - properties['volume_id'] = volume['id'] - - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance. - - Unmask the LUN on the storage system so the given intiator can no - longer access it. - """ - initiator_name = connector['initiator'] - handle = volume['provider_location'] - self.client.service.UnmapLun(Handle=handle, InitiatorType="iscsi", - InitiatorName=initiator_name) - msg = _("Unmapped LUN %(handle)s from the initiator " - "%(initiator_name)s") % {'handle': handle, - 'initiator_name': initiator_name} - LOG.debug(msg) - - def create_snapshot(self, snapshot): - """Driver entry point for creating a snapshot. - - This driver implements snapshots by using efficient single-file - (LUN) cloning. - """ - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - lun = self.lun_table[vol_name] - extra_args = {'SpaceReserved': False} - self._clone_lun(lun.handle, snapshot_name, extra_args) - - def delete_snapshot(self, snapshot): - """Driver entry point for deleting a snapshot.""" - name = snapshot['name'] - handle = self._get_lun_handle(name) - if not handle: - LOG.warn(_("No entry in LUN table for snapshot %(name)s."), - {'name': name}) - return - self.client.service.DestroyLun(Handle=handle) - LOG.debug(_("Destroyed LUN %s") % handle) - self.lun_table.pop(snapshot['name']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Driver entry point for creating a new volume from a snapshot. - - Many would call this "cloning" and in fact we use cloning to implement - this feature. - """ - vol_size = volume['size'] - snap_size = snapshot['volume_size'] - if vol_size != snap_size: - msg = _('Cannot create volume of size %(vol_size)s from ' - 'snapshot of size %(snap_size)s') - msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size} - raise exception.VolumeBackendAPIException(msg % msg_fmt) - snapshot_name = snapshot['name'] - lun = self.lun_table[snapshot_name] - new_name = volume['name'] - extra_args = {} - extra_args['OsType'] = 'linux' - extra_args['QosType'] = self._get_qos_type(volume) - extra_args['Container'] = volume['project_id'] - extra_args['Display'] = volume['display_name'] - extra_args['Description'] = volume['display_description'] - extra_args['SpaceReserved'] = True - self._clone_lun(lun.handle, new_name, extra_args) - - def _get_qos_type(self, volume): - """Get the storage service type for a volume.""" - type_id = volume['volume_type_id'] - if not type_id: - return None - volume_type = volume_types.get_volume_type(None, type_id) - if not volume_type: - return None - return volume_type['name'] - - def _add_lun_to_table(self, lun): - """Adds LUN to cache table.""" - if not isinstance(lun, NetAppLun): - msg = _("Object is not a NetApp LUN.") - raise exception.VolumeBackendAPIException(data=msg) - self.lun_table[lun.name] = lun - - def _clone_lun(self, handle, new_name, extra_args): - """Clone LUN with the given handle to the new name.""" - server = self.client.service - metadata = self._create_metadata_list(extra_args) - lun = server.CloneLun(Handle=handle, NewName=new_name, - Metadata=metadata) - LOG.debug(_("Cloned LUN with new name %s") % new_name) - self._add_lun_to_table( - NetAppLun(lun.Handle, - lun.Name, - lun.Size, - self._create_dict_from_meta(lun.Metadata))) - - def _create_metadata_list(self, extra_args): - """Creates metadata from kwargs.""" - metadata = [] - for key in extra_args.keys(): - meta = self.client.factory.create("Metadata") - meta.Key = key - meta.Value = extra_args[key] - metadata.append(meta) - return metadata - - def _get_lun_handle(self, name): - """Get the details for a LUN from our cache table.""" - if name not in self.lun_table: - LOG.warn(_("Could not find handle for LUN named %s") % name) - return None - return self.lun_table[name].handle - - def _create_dict_from_meta(self, metadata): - """Creates dictionary from metadata array.""" - meta_dict = {} - if not metadata: - return meta_dict - for meta in metadata: - meta_dict[meta.Key] = meta.Value - return meta_dict - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - vol_size = volume['size'] - src_vol = self.lun_table[src_vref['name']] - src_vol_size = src_vref['size'] - if vol_size != src_vol_size: - msg = _('Cannot clone volume of size %(vol_size)s from ' - 'src volume of size %(src_vol_size)s') - msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size} - raise exception.VolumeBackendAPIException(msg % msg_fmt) - new_name = volume['name'] - extra_args = {} - extra_args['OsType'] = 'linux' - extra_args['QosType'] = self._get_qos_type(volume) - extra_args['Container'] = volume['project_id'] - extra_args['Display'] = volume['display_name'] - extra_args['Description'] = volume['display_description'] - extra_args['SpaceReserved'] = True - self._clone_lun(src_vol.handle, new_name, extra_args) - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_status() - - return self._stats - - def _update_volume_status(self): - """Retrieve status info from volume group.""" - - LOG.debug(_("Updating volume status")) - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or 'NetApp_iSCSI_Cluster' - data["vendor_name"] = 'NetApp' - data["driver_version"] = '1.0' - data["storage_protocol"] = 'iSCSI' - - data['total_capacity_gb'] = 'infinite' - data['free_capacity_gb'] = 'infinite' - data['reserved_percentage'] = 100 - data['QoS_support'] = False - self._stats = data - - class NetAppDirectISCSIDriver(driver.ISCSIDriver): """NetApp Direct iSCSI volume driver.""" @@ -1519,7 +89,11 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): def __init__(self, *args, **kwargs): super(NetAppDirectISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(netapp_opts) + validate_instantiation(**kwargs) + self.configuration.append_config_values(netapp_connection_opts) + self.configuration.append_config_values(netapp_basicauth_opts) + self.configuration.append_config_values(netapp_transport_opts) + self.configuration.append_config_values(netapp_provisioning_opts) self.lun_table = {} def _create_client(self, **kwargs): @@ -1595,8 +169,9 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): name = volume['name'] metadata = self._get_lun_attr(name, 'metadata') if not metadata: - LOG.warn(_("No entry in LUN table for volume/snapshot %(name)s."), - {'name': name}) + msg = _("No entry in LUN table for volume/snapshot %(name)s.") + msg_fmt = {'name': name} + LOG.warn(msg % msg_fmt) return lun_destroy = NaElement.create_node_with_children( 'lun-destroy', @@ -1638,15 +213,15 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): initiator_name = connector['initiator'] name = volume['name'] lun_id = self._map_lun(name, initiator_name, 'iscsi', None) - msg = (_("Mapped LUN %(name)s to the initiator %(initiator_name)s") % - {'name': name, 'initiator_name': initiator_name}) - LOG.debug(msg) + msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s") + msg_fmt = {'name': name, 'initiator_name': initiator_name} + LOG.debug(msg % msg_fmt) iqn = self._get_iscsi_service_details() target_details_list = self._get_target_details() - msg = (_("Succesfully fetched target details for LUN %(name)s and " - "initiator %(initiator_name)s") % - {'name': name, 'initiator_name': initiator_name}) - LOG.debug(msg) + msg = _("Succesfully fetched target details for LUN %(name)s and " + "initiator %(initiator_name)s") + msg_fmt = {'name': name, 'initiator_name': initiator_name} + LOG.debug(msg % msg_fmt) if not target_details_list: msg = _('Failed to get LUN target details for the LUN %s') @@ -1711,10 +286,10 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): vol_size = volume['size'] snap_size = snapshot['volume_size'] if vol_size != snap_size: - msg = (_('Cannot create volume of size %(vol_size)s from ' - 'snapshot of size %(snap_size)s') % - {'vol_size': vol_size, 'snap_size': snap_size}) - raise exception.VolumeBackendAPIException(data=msg) + msg = _('Cannot create volume of size %(vol_size)s from ' + 'snapshot of size %(snap_size)s') + msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size} + raise exception.VolumeBackendAPIException(data=msg % msg_fmt) snapshot_name = snapshot['name'] new_name = volume['name'] self._clone_lun(snapshot_name, new_name, 'true') @@ -1730,10 +305,10 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] self._unmap_lun(path, initiator_name) - msg = (_("Unmapped LUN %(name)s from the initiator " - "%(initiator_name)s") % - {'name': name, 'initiator_name': initiator_name}) - LOG.debug(msg) + msg = _("Unmapped LUN %(name)s from the initiator " + "%(initiator_name)s") + msg_fmt = {'name': name, 'initiator_name': initiator_name} + LOG.debug(msg % msg_fmt) def _get_ontapi_version(self): """Gets the supported ontapi version.""" @@ -1823,10 +398,11 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): result = self.client.invoke_successfully(lun_map, True) return result.get_child_content('lun-id-assigned') except NaApiError as e: - msg = (_("Error mapping lun. Code :%(code)s," - " Message:%(message)s") % - {'code': e.code, 'message': e.message}) - LOG.warn(msg) + code = e.code + message = e.message + msg = _('Error mapping lun. Code :%(code)s, Message:%(message)s') + msg_fmt = {'code': code, 'message': message} + LOG.warn(msg % msg_fmt) (igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator) if lun_id is not None: return lun_id @@ -1843,10 +419,10 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): try: self.client.invoke_successfully(lun_unmap, True) except NaApiError as e: - msg = (_("Error unmapping lun. Code :%(code)s," - " Message:%(message)s") % - {'code': e.code, 'message': e.message}) - LOG.warn(msg) + msg = _("Error unmapping lun. Code :%(code)s," + " Message:%(message)s") + msg_fmt = {'code': e.code, 'message': e.message} + LOG.warn(msg % msg_fmt) # if the lun is already unmapped if e.code == '13115' or e.code == '9016': pass @@ -1950,10 +526,10 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver): src_vol = self.lun_table[src_vref['name']] src_vol_size = src_vref['size'] if vol_size != src_vol_size: - msg = (_("Cannot clone volume of size %(vol_size)s from " - "src volume of size %(src_vol_size)s") % - {'vol_size': vol_size, 'src_vol_size': src_vol_size}) - raise exception.VolumeBackendAPIException(data=msg) + msg = _('Cannot clone volume of size %(vol_size)s from ' + 'src volume of size %(src_vol_size)s') + msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size} + raise exception.VolumeBackendAPIException(data=msg % msg_fmt) new_name = volume['name'] self._clone_lun(src_vol.name, new_name, 'true') @@ -1977,6 +553,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): def __init__(self, *args, **kwargs): super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_cluster_opts) def _do_custom_setup(self): """Does custom setup for ontap cluster.""" @@ -2254,9 +831,10 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): LOG.debug(_("Updating volume status")) data = {} + netapp_backend = 'NetApp_iSCSI_Cluster_direct' backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = (backend_name - or 'NetApp_iSCSI_Cluster_direct') + data["volume_backend_name"] = ( + backend_name or netapp_backend) data["vendor_name"] = 'NetApp' data["driver_version"] = '1.0' data["storage_protocol"] = 'iSCSI' @@ -2265,6 +843,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): data['free_capacity_gb'] = 'infinite' data['reserved_percentage'] = 100 data['QoS_support'] = False + provide_ems(self, self.client, data, netapp_backend) self._stats = data @@ -2273,6 +852,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): def __init__(self, *args, **kwargs): super(NetAppDirect7modeISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_7mode_opts) def _do_custom_setup(self): """Does custom setup depending on the type of filer.""" @@ -2393,8 +973,8 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): if luns: lun_list.extend(luns) except NaApiError: - LOG.warn(_("Error finding luns for volume %(vol)s." - " Verify volume exists."), {'vol': vol}) + LOG.warn(_("Error finding luns for volume %s." + " Verify volume exists.") % (vol)) else: luns = self._get_vol_luns(None) lun_list.extend(luns) @@ -2495,15 +1075,14 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): break else: if clone_ops_info: + fmt = {'name': name, 'new_name': new_name} if clone_ops_info.get_child_content('clone-state')\ == 'completed': LOG.debug(_("Clone operation with src %(name)s" - " and dest %(new_name)s completed"), - {'name': name, 'new_name': new_name}) + " and dest %(new_name)s completed") % fmt) else: LOG.debug(_("Clone operation with src %(name)s" - " and dest %(new_name)s failed"), - {'name': name, 'new_name': new_name}) + " and dest %(new_name)s failed") % fmt) raise NaApiError( clone_ops_info.get_child_content('error'), clone_ops_info.get_child_content('reason')) @@ -2535,9 +1114,10 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): LOG.debug(_("Updating volume status")) data = {} + netapp_backend = 'NetApp_iSCSI_7mode_direct' backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = (backend_name - or 'NetApp_iSCSI_7mode_direct') + data["volume_backend_name"] = ( + backend_name or 'NetApp_iSCSI_7mode_direct') data["vendor_name"] = 'NetApp' data["driver_version"] = '1.0' data["storage_protocol"] = 'iSCSI' @@ -2546,4 +1126,6 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): data['free_capacity_gb'] = 'infinite' data['reserved_percentage'] = 100 data['QoS_support'] = False + provide_ems(self, self.client, data, netapp_backend, + server_type="7mode") self._stats = data diff --git a/cinder/volume/drivers/netapp/nfs.py b/cinder/volume/drivers/netapp/nfs.py index 05322d74c..79b3edfbe 100644 --- a/cinder/volume/drivers/netapp/nfs.py +++ b/cinder/volume/drivers/netapp/nfs.py @@ -22,52 +22,52 @@ import copy import os import time -from oslo.config import cfg -import suds -from suds.sax import text - from cinder import exception from cinder.openstack.common import log as logging from cinder.volume.drivers.netapp.api import NaApiError from cinder.volume.drivers.netapp.api import NaElement from cinder.volume.drivers.netapp.api import NaServer -from cinder.volume.drivers.netapp.iscsi import netapp_opts +from cinder.volume.drivers.netapp.options import netapp_basicauth_opts +from cinder.volume.drivers.netapp.options import netapp_connection_opts +from cinder.volume.drivers.netapp.options import netapp_transport_opts +from cinder.volume.drivers.netapp.utils import provide_ems +from cinder.volume.drivers.netapp.utils import validate_instantiation from cinder.volume.drivers import nfs +from oslo.config import cfg -LOG = logging.getLogger(__name__) -netapp_nfs_opts = [ - cfg.IntOpt('synchronous_snapshot_create', - default=0, - help='Does snapshot creation call returns immediately')] +LOG = logging.getLogger(__name__) CONF = cfg.CONF -CONF.register_opts(netapp_nfs_opts) +CONF.register_opts(netapp_connection_opts) +CONF.register_opts(netapp_transport_opts) +CONF.register_opts(netapp_basicauth_opts) class NetAppNFSDriver(nfs.NfsDriver): - """Executes commands relating to Volumes.""" + """Base class for NetApp NFS driver. + Executes commands relating to Volumes. + """ def __init__(self, *args, **kwargs): # NOTE(vish): db is set by Manager + validate_instantiation(**kwargs) self._execute = None self._context = None super(NetAppNFSDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(netapp_opts) - self.configuration.append_config_values(netapp_nfs_opts) + self.configuration.append_config_values(netapp_connection_opts) + self.configuration.append_config_values(netapp_basicauth_opts) + self.configuration.append_config_values(netapp_transport_opts) def set_execute(self, execute): self._execute = execute def do_setup(self, context): - self._context = context - self.check_for_setup_error() - self._client = self._get_client() + raise NotImplementedError() def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" - self._check_dfm_flags() - super(NetAppNFSDriver, self).check_for_setup_error() + raise NotImplementedError() def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" @@ -75,10 +75,10 @@ class NetAppNFSDriver(nfs.NfsDriver): snap_size = snapshot.volume_size if vol_size != snap_size: - msg = (_('Cannot create volume of size %(vol_size)s from ' - 'snapshot of size %(snap_size)s') % - {'vol_size': vol_size, 'snap_size': snap_size}) - raise exception.CinderException(msg) + msg = _('Cannot create volume of size %(vol_size)s from ' + 'snapshot of size %(snap_size)s') + msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size} + raise exception.CinderException(msg % msg_fmt) self._clone_volume(snapshot.name, volume.name, snapshot.volume_id) share = self._get_volume_location(snapshot.volume_id) @@ -101,95 +101,22 @@ class NetAppNFSDriver(nfs.NfsDriver): self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name), run_as_root=True) - def _check_dfm_flags(self): - """Raises error if any required configuration flag for OnCommand proxy - is missing. - """ - required_flags = ['netapp_wsdl_url', - 'netapp_login', - 'netapp_password', - 'netapp_server_hostname', - 'netapp_server_port'] - for flag in required_flags: - if not getattr(self.configuration, flag, None): - raise exception.CinderException(_('%s is not set') % flag) - def _get_client(self): - """Creates SOAP _client for ONTAP-7 DataFabric Service.""" - client = suds.client.Client( - self.configuration.netapp_wsdl_url, - username=self.configuration.netapp_login, - password=self.configuration.netapp_password) - soap_url = 'http://%s:%s/apis/soap/v1' % ( - self.configuration.netapp_server_hostname, - self.configuration.netapp_server_port) - client.set_options(location=soap_url) - - return client + """Creates client for server.""" + raise NotImplementedError() def _get_volume_location(self, volume_id): - """Returns NFS mount address as :""" + """Returns NFS mount address as :.""" nfs_server_ip = self._get_host_ip(volume_id) export_path = self._get_export_path(volume_id) return (nfs_server_ip + ':' + export_path) def _clone_volume(self, volume_name, clone_name, volume_id): """Clones mounted volume with OnCommand proxy API.""" - host_id = self._get_host_id(volume_id) - export_path = self._get_full_export_path(volume_id, host_id) - - request = self._client.factory.create('Request') - request.Name = 'clone-start' - - clone_start_args = ('%s/%s' - '%s/%s') - - request.Args = text.Raw(clone_start_args % (export_path, - volume_name, - export_path, - clone_name)) - - resp = self._client.service.ApiProxy(Target=host_id, - Request=request) - - if (resp.Status == 'passed' and - self.configuration.synchronous_snapshot_create): - clone_id = resp.Results['clone-id'][0] - clone_id_info = clone_id['clone-id-info'][0] - clone_operation_id = int(clone_id_info['clone-op-id'][0]) - - self._wait_for_clone_finished(clone_operation_id, host_id) - elif resp.Status == 'failed': - raise exception.CinderException(resp.Reason) - - def _wait_for_clone_finished(self, clone_operation_id, host_id): - """ - Polls ONTAP7 for clone status. Returns once clone is finished. - :param clone_operation_id: Identifier of ONTAP clone operation - """ - clone_list_options = ('' - '' - '%d' - '' - '' - '') - - request = self._client.factory.create('Request') - request.Name = 'clone-list-status' - request.Args = text.Raw(clone_list_options % clone_operation_id) - - resp = self._client.service.ApiProxy(Target=host_id, Request=request) - - while resp.Status != 'passed': - time.sleep(1) - resp = self._client.service.ApiProxy(Target=host_id, - Request=request) + raise NotImplementedError() def _get_provider_location(self, volume_id): - """ - Returns provider location for given volume - :param volume_id: - """ + """Returns provider location for given volume.""" volume = self.db.volume_get(self._context, volume_id) return volume.provider_location @@ -201,38 +128,6 @@ class NetAppNFSDriver(nfs.NfsDriver): """Returns NFS export path for the given volume.""" return self._get_provider_location(volume_id).split(':')[1] - def _get_host_id(self, volume_id): - """Returns ID of the ONTAP-7 host.""" - host_ip = self._get_host_ip(volume_id) - server = self._client.service - - resp = server.HostListInfoIterStart(ObjectNameOrId=host_ip) - tag = resp.Tag - - try: - res = server.HostListInfoIterNext(Tag=tag, Maximum=1) - if hasattr(res, 'Hosts') and res.Hosts.HostInfo: - return res.Hosts.HostInfo[0].HostId - finally: - server.HostListInfoIterEnd(Tag=tag) - - def _get_full_export_path(self, volume_id, host_id): - """Returns full path to the NFS share, e.g. /vol/vol0/home.""" - export_path = self._get_export_path(volume_id) - command_args = '%s' - - request = self._client.factory.create('Request') - request.Name = 'nfs-exportfs-storage-path' - request.Args = text.Raw(command_args % export_path) - - resp = self._client.service.ApiProxy(Target=host_id, - Request=request) - - if resp.Status == 'passed': - return resp.Results['actual-pathname'][0] - elif resp.Status == 'failed': - raise exception.CinderException(resp.Reason) - def _volume_not_present(self, nfs_mount, volume_name): """Check if volume exists.""" try: @@ -262,7 +157,8 @@ class NetAppNFSDriver(nfs.NfsDriver): def _get_volume_path(self, nfs_share, volume_name): """Get volume path (local fs path) for given volume name on given nfs - share + share. + @param nfs_share string, example 172.18.194.100:/var/nfs @param volume_name string, example volume-91ee65ec-c473-4391-8c09-162b00c68a8c @@ -276,10 +172,10 @@ class NetAppNFSDriver(nfs.NfsDriver): src_vol_size = src_vref.size if vol_size != src_vol_size: - msg = (_('Cannot create clone of size %(vol_size)s from ' - 'volume of size %(src_vol_size)s') % - {'vol_size': vol_size, 'src_vol_size': src_vol_size}) - raise exception.CinderException(msg) + msg = _('Cannot create clone of size %(vol_size)s from ' + 'volume of size %(src_vol_size)s') + msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size} + raise exception.CinderException(msg % msg_fmt) self._clone_volume(src_vref.name, volume.name, src_vref.id) share = self._get_volume_location(src_vref.id) @@ -290,71 +186,6 @@ class NetAppNFSDriver(nfs.NfsDriver): """Retrieve status info from volume group.""" super(NetAppNFSDriver, self)._update_volume_status() - backend_name = self.configuration.safe_get('volume_backend_name') - self._stats["volume_backend_name"] = (backend_name or - 'NetApp_NFS_7mode') - self._stats["vendor_name"] = 'NetApp' - self._stats["driver_version"] = '1.0' - - -class NetAppCmodeNfsDriver (NetAppNFSDriver): - """Executes commands related to volumes on c mode.""" - - def __init__(self, *args, **kwargs): - super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs) - - def do_setup(self, context): - self._context = context - self.check_for_setup_error() - self._client = self._get_client() - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - self._check_flags() - - def _clone_volume(self, volume_name, clone_name, volume_id): - """Clones mounted volume with NetApp Cloud Services.""" - host_ip = self._get_host_ip(volume_id) - export_path = self._get_export_path(volume_id) - LOG.debug(_("Cloning with params ip %(host_ip)s, exp_path" - "%(export_path)s, vol %(volume_name)s, " - "clone_name %(clone_name)s"), - {'host_ip': host_ip, 'export_path': export_path, - 'volume_name': volume_name, 'clone_name': clone_name}) - self._client.service.CloneNasFile(host_ip, export_path, - volume_name, clone_name) - - def _check_flags(self): - """Raises error if any required configuration flag for NetApp Cloud - Webservices is missing. - """ - required_flags = ['netapp_wsdl_url', - 'netapp_login', - 'netapp_password', - 'netapp_server_hostname', - 'netapp_server_port'] - for flag in required_flags: - if not getattr(self.configuration, flag, None): - raise exception.CinderException(_('%s is not set') % flag) - - def _get_client(self): - """Creates SOAP _client for NetApp Cloud service.""" - client = suds.client.Client( - self.configuration.netapp_wsdl_url, - username=self.configuration.netapp_login, - password=self.configuration.netapp_password) - return client - - def _update_volume_status(self): - """Retrieve status info from volume group.""" - super(NetAppCmodeNfsDriver, self)._update_volume_status() - - backend_name = self.configuration.safe_get('volume_backend_name') - self._stats["volume_backend_name"] = (backend_name or - 'NetApp_NFS_Cluster') - self._stats["vendor_name"] = 'NetApp' - self._stats["driver_version"] = '1.0' - class NetAppDirectNfsDriver (NetAppNFSDriver): """Executes commands related to volumes on NetApp filer.""" @@ -409,26 +240,10 @@ class NetAppDirectNfsDriver (NetAppNFSDriver): if not isinstance(elem, NaElement): raise ValueError('Expects NaElement') - def _invoke_successfully(self, na_element, vserver=None): - """Invoke the api for successful result. - - If vserver is present then invokes vserver/vfiler api - else filer/Cluster api. - :param vserver: vserver/vfiler name. - """ - self._is_naelement(na_element) - server = copy.copy(self._client) - if vserver: - server.set_vserver(vserver) - else: - server.set_vserver(None) - result = server.invoke_successfully(na_element, True) - return result - def _get_ontapi_version(self): """Gets the supported ontapi version.""" ontapi_version = NaElement('system-get-ontapi-version') - res = self._invoke_successfully(ontapi_version, False) + res = self._client.invoke_successfully(ontapi_version, False) major = res.get_child_content('major-version') minor = res.get_child_content('minor-version') return (major, minor) @@ -447,6 +262,22 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver): (major, minor) = self._get_ontapi_version() client.set_api_version(major, minor) + def _invoke_successfully(self, na_element, vserver=None): + """Invoke the api for successful result. + + If vserver is present then invokes vserver api + else Cluster api. + :param vserver: vserver name. + """ + self._is_naelement(na_element) + server = copy.copy(self._client) + if vserver: + server.set_vserver(vserver) + else: + server.set_vserver(None) + result = server.invoke_successfully(na_element, True) + return result + def _clone_volume(self, volume_name, clone_name, volume_id): """Clones mounted volume on NetApp Cluster.""" host_ip = self._get_host_ip(volume_id) @@ -495,17 +326,18 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver): vols = attr_list.get_children() vol_id = vols[0].get_child_by_name('volume-id-attributes') return vol_id.get_child_content('name') - raise exception.NotFound(_("No volume on cluster with vserver" - "%(vserver)s and junction path " - "%(junction)s"), {'vserver': vserver, - 'junction': junction}) + msg_fmt = {'vserver': vserver, 'junction': junction} + raise exception.NotFound(_("""No volume on cluster with vserver + %(vserver)s and junction path %(junction)s + """) % msg_fmt) def _clone_file(self, volume, src_path, dest_path, vserver=None): """Clones file on vserver.""" - LOG.debug(_("Cloning with params volume %(volume)s,src %(src_path)s," - "dest %(dest_path)s, vserver %(vserver)s"), - {'volume': volume, 'src_path': src_path, - 'dest_path': dest_path, 'vserver': vserver}) + msg = _("""Cloning with params volume %(volume)s,src %(src_path)s, + dest %(dest_path)s, vserver %(vserver)s""") + msg_fmt = {'volume': volume, 'src_path': src_path, + 'dest_path': dest_path, 'vserver': vserver} + LOG.debug(msg % msg_fmt) clone_create = NaElement.create_node_with_children( 'clone-create', **{'volume': volume, 'source-path': src_path, @@ -515,12 +347,13 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver): def _update_volume_status(self): """Retrieve status info from volume group.""" super(NetAppDirectCmodeNfsDriver, self)._update_volume_status() - + netapp_backend = 'NetApp_NFS_cluster_direct' backend_name = self.configuration.safe_get('volume_backend_name') self._stats["volume_backend_name"] = (backend_name or - 'NetApp_NFS_cluster_direct') + netapp_backend) self._stats["vendor_name"] = 'NetApp' self._stats["driver_version"] = '1.0' + provide_ems(self, self._client, self._stats, netapp_backend) class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver): @@ -534,6 +367,22 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver): (major, minor) = self._get_ontapi_version() client.set_api_version(major, minor) + def _invoke_successfully(self, na_element, vfiler=None): + """Invoke the api for successful result. + + If vfiler is present then invokes vfiler api + else filer api. + :param vfiler: vfiler name. + """ + self._is_naelement(na_element) + server = copy.copy(self._client) + if vfiler: + server.set_vfiler(vfiler) + else: + server.set_vfiler(None) + result = server.invoke_successfully(na_element, True) + return result + def _clone_volume(self, volume_name, clone_name, volume_id): """Clones mounted volume with NetApp filer.""" export_path = self._get_export_path(volume_id) @@ -565,8 +414,9 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver): :returns: clone-id """ - LOG.debug(_("Cloning with src %(src_path)s, dest %(dest_path)s"), - {'src_path': src_path, 'dest_path': dest_path}) + msg_fmt = {'src_path': src_path, 'dest_path': dest_path} + LOG.debug(_("""Cloning with src %(src_path)s, dest %(dest_path)s""") + % msg_fmt) clone_start = NaElement.create_node_with_children( 'clone-start', **{'source-path': src_path, @@ -629,9 +479,11 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver): def _update_volume_status(self): """Retrieve status info from volume group.""" super(NetAppDirect7modeNfsDriver, self)._update_volume_status() - + netapp_backend = 'NetApp_NFS_7mode_direct' backend_name = self.configuration.safe_get('volume_backend_name') self._stats["volume_backend_name"] = (backend_name or 'NetApp_NFS_7mode_direct') self._stats["vendor_name"] = 'NetApp' self._stats["driver_version"] = '1.0' + provide_ems(self, self._client, self._stats, netapp_backend, + server_type="7mode") diff --git a/cinder/volume/drivers/netapp/options.py b/cinder/volume/drivers/netapp/options.py new file mode 100644 index 000000000..469837ac5 --- /dev/null +++ b/cinder/volume/drivers/netapp/options.py @@ -0,0 +1,77 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Contains configuration options for NetApp drivers. + +Common place to hold configuration options for all NetApp drivers. +Options need to be grouped into granular units to be able to be reused +by different modules and classes. This does not restrict declaring options in +individual modules. If options are not re usable then can be declared in +individual modules. It is recommended to Keep options at a single +place to ensure re usability and better management of configuration options. +""" + +from oslo.config import cfg + +netapp_proxy_opts = [ + cfg.StrOpt('netapp_storage_family', + default='ontap_cluster', + help='Storage family type.'), + cfg.StrOpt('netapp_storage_protocol', + default=None, + help='Storage protocol type.'), ] + +netapp_connection_opts = [ + cfg.StrOpt('netapp_server_hostname', + default=None, + help='Host name for the storage controller'), + cfg.IntOpt('netapp_server_port', + default=80, + help='Port number for the storage controller'), ] + +netapp_transport_opts = [ + cfg.StrOpt('netapp_transport_type', + default='http', + help='Transport type protocol'), ] + +netapp_basicauth_opts = [ + cfg.StrOpt('netapp_login', + default=None, + help='User name for the storage controller'), + cfg.StrOpt('netapp_password', + default=None, + help='Password for the storage controller', + secret=True), ] + +netapp_provisioning_opts = [ + cfg.FloatOpt('netapp_size_multiplier', + default=1.2, + help='Volume size multiplier to ensure while creation'), + cfg.StrOpt('netapp_volume_list', + default=None, + help='Comma separated volumes to be used for provisioning'), ] + +netapp_cluster_opts = [ + cfg.StrOpt('netapp_vserver', + default='openstack', + help='Cluster vserver to use for provisioning'), ] + +netapp_7mode_opts = [ + cfg.StrOpt('netapp_vfiler', + default=None, + help='Vfiler to use for provisioning'), ] diff --git a/cinder/volume/drivers/netapp/utils.py b/cinder/volume/drivers/netapp/utils.py new file mode 100644 index 000000000..3a6c0abe8 --- /dev/null +++ b/cinder/volume/drivers/netapp/utils.py @@ -0,0 +1,120 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Utilities for NetApp drivers. + +This module contains common utilities to be used by one or more +NetApp drivers to achieve the desired functionality. +""" + +import copy +import socket + +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder.volume.drivers.netapp.api import NaApiError +from cinder.volume.drivers.netapp.api import NaElement + +LOG = logging.getLogger(__name__) + + +def provide_ems(requester, server, stats, netapp_backend, + server_type="cluster"): + """Provide ems with volume stats for the requester. + + :param server_type: cluster or 7mode. + """ + def _create_ems(stats, netapp_backend, server_type): + """Create ems api request.""" + ems_log = NaElement('ems-autosupport-log') + host = socket.getfqdn() or 'Cinder_node' + dest = "cluster node" if server_type == "cluster"\ + else "7 mode controller" + ems_log.add_new_child('computer-name', host) + ems_log.add_new_child('event-id', '0') + ems_log.add_new_child('event-source', + 'Cinder driver %s' % netapp_backend) + ems_log.add_new_child('app-version', stats.get('driver_version', + 'Undefined')) + ems_log.add_new_child('category', 'provisioning') + ems_log.add_new_child('event-description', + 'OpenStack volume created on %s' % dest) + ems_log.add_new_child('log-level', '6') + ems_log.add_new_child('auto-support', 'true') + return ems_log + + def _create_vs_get(): + """Create vs_get api request.""" + vs_get = NaElement('vserver-get-iter') + vs_get.add_new_child('max-records', '1') + query = NaElement('query') + query.add_node_with_children('vserver-info', + **{'vserver-type': 'node'}) + vs_get.add_child_elem(query) + desired = NaElement('desired-attributes') + desired.add_node_with_children( + 'vserver-info', **{'vserver-name': '', 'vserver-type': ''}) + vs_get.add_child_elem(desired) + return vs_get + + def _get_cluster_node(na_server): + """Get the cluster node for ems.""" + na_server.set_vserver(None) + vs_get = _create_vs_get() + res = na_server.invoke_successfully(vs_get) + if (res.get_child_content('num-records') and + int(res.get_child_content('num-records')) > 0): + attr_list = res.get_child_by_name('attributes-list') + vs_info = attr_list.get_child_by_name('vserver-info') + vs_name = vs_info.get_child_content('vserver-name') + return vs_name + raise NaApiError(code='Not found', message='No records found') + + do_ems = True + if hasattr(requester, 'last_ems'): + sec_limit = 604800 + if not (timeutils.is_older_than(requester.last_ems, sec_limit) or + timeutils.is_older_than(requester.last_ems, sec_limit - 59)): + do_ems = False + if do_ems: + na_server = copy.copy(server) + na_server.set_timeout(25) + ems = _create_ems(stats, netapp_backend, server_type) + try: + if server_type == "cluster": + node = _get_cluster_node(na_server) + na_server.set_vserver(node) + else: + na_server.set_vfiler(None) + na_server.invoke_successfully(ems, True) + requester.last_ems = timeutils.utcnow() + LOG.debug(_("ems executed successfully.")) + except NaApiError as e: + LOG.debug(_("Failed to invoke ems. Message : %s") % e) + + +def validate_instantiation(**kwargs): + """Checks if a driver is instantiated other than by the unified driver. + + Helps check direct instantiation of netapp drivers. + Call this function in every netapp block driver constructor. + """ + if kwargs and kwargs.get('netapp_mode') == 'proxy': + return + LOG.warn(_("It is not the recommended way to use drivers by NetApp. " + "Please use NetAppDriver to achieve the functionality.")) diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index b533cb597..2430495a5 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -83,12 +83,6 @@ MAPPING = { 'cinder.volume.drivers.san.solaris.SolarisISCSIDriver', 'cinder.volume.san.HpSanISCSIDriver': 'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver', - 'cinder.volume.netapp.NetAppISCSIDriver': - 'cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver', - 'cinder.volume.netapp.NetAppCmodeISCSIDriver': - 'cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver', - 'cinder.volume.netapp_nfs.NetAppNFSDriver': - 'cinder.volume.drivers.netapp.nfs.NetAppNFSDriver', 'cinder.volume.nfs.NfsDriver': 'cinder.volume.drivers.nfs.NfsDriver', 'cinder.volume.solidfire.SolidFire': -- 2.45.2