import BaseHTTPServer
import httplib
-import StringIO
-
from lxml import etree
+import StringIO
-from cinder.exception import InvalidInput
-from cinder.exception import VolumeBackendAPIException
+from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
+from cinder.volume.drivers.netapp import ssc_utils
+from cinder.volume.drivers.netapp import utils
LOG = logging.getLogger("cinder.volume.driver")
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
+ vol1 = ssc_utils.NetAppVolume('lun1', 'openstack')
+ vol1.state['vserver_root'] = False
+ vol1.state['status'] = 'online'
+ vol1.state['junction_active'] = True
+ vol1.space['size_avl_bytes'] = '4000000000'
+ vol1.space['size_total_bytes'] = '5000000000'
+ vol1.space['space-guarantee-enabled'] = False
+ vol1.space['space-guarantee'] = 'file'
+ vol1.space['thin_provisioned'] = True
+ vol1.mirror['mirrored'] = True
+ vol1.qos['qos_policy_group'] = None
+ vol1.aggr['name'] = 'aggr1'
+ vol1.aggr['junction'] = '/vola'
+ vol1.sis['dedup'] = True
+ vol1.sis['compression'] = True
+ vol1.aggr['raid_type'] = 'raiddp'
+ vol1.aggr['ha_policy'] = 'cfo'
+ vol1.aggr['disk_type'] = 'SSD'
+ ssc_map = {'mirrored': set([vol1]), 'dedup': set([vol1]),
+ 'compression': set([vol1]),
+ 'thin': set([vol1]), 'all': set([vol1])}
def setUp(self):
super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
+ self.stubs.Set(
+ ssc_utils, 'refresh_cluster_ssc', lambda a, b, c: None)
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(httplib, 'HTTPConnection',
client = driver.client
client.set_api_version(1, 15)
self.driver = driver
+ self.driver.ssc_vols = self.ssc_map
def _set_config(self, configuration):
configuration.netapp_storage_protocol = 'iscsi'
self.driver.create_volume_from_snapshot(self.volume,
self.snapshot_fail)
raise AssertionError()
- except VolumeBackendAPIException:
+ except exception.VolumeBackendAPIException:
pass
finally:
self.driver.delete_volume(self.volume)
self.driver.create_cloned_volume(self.volume_clone_fail,
self.volume)
raise AssertionError()
- except VolumeBackendAPIException:
+ except exception.VolumeBackendAPIException:
pass
finally:
self.driver.delete_volume(self.volume)
raise AssertionError('Target portal is none')
def test_fail_create_vol(self):
- self.assertRaises(VolumeBackendAPIException,
+ self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, self.vol_fail)
def test_vol_stats(self):
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Wrong storage family is getting accepted.')
- except InvalidInput:
+ except exception.InvalidInput:
pass
def test_incorrect_protocol(self):
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Wrong storage protocol is getting accepted.')
- except InvalidInput:
+ except exception.InvalidInput:
pass
def test_non_netapp_driver(self):
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Non NetApp driver is getting instantiated.')
- except InvalidInput:
+ except exception.InvalidInput:
pass
finally:
common.netapp_unified_plugin_registry.pop('test_family')
success = False
try:
self.driver.create_volume(self.volume)
- except VolumeBackendAPIException:
+ except exception.VolumeBackendAPIException:
success = True
pass
finally:
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NetApp, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Unit tests for the NetApp-specific ssc module."""
+
+import BaseHTTPServer
+import copy
+import httplib
+from lxml import etree
+import mox
+from mox import IgnoreArg
+from mox import IsA
+from mox import MockObject
+import StringIO
+
+from cinder import context
+from cinder import exception
+from cinder import test
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.netapp import api
+from cinder.volume.drivers.netapp import ssc_utils
+
+
+class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """HTTP handler that doesn't spam the log."""
+
+ def log_message(self, format, *args):
+ pass
+
+
+class FakeHttplibSocket(object):
+ """A fake socket implementation for httplib.HTTPResponse."""
+ def __init__(self, value):
+ self._rbuffer = StringIO.StringIO(value)
+ self._wbuffer = StringIO.StringIO('')
+ oldclose = self._wbuffer.close
+
+ def newclose():
+ self.result = self._wbuffer.getvalue()
+ oldclose()
+ self._wbuffer.close = newclose
+
+ def makefile(self, mode, _other):
+ """Returns the socket's internal buffer"""
+ if mode == 'r' or mode == 'rb':
+ return self._rbuffer
+ if mode == 'w' or mode == 'wb':
+ return self._wbuffer
+
+
+RESPONSE_PREFIX_DIRECT_CMODE = """<?xml version='1.0' encoding='UTF-8' ?>
+<!DOCTYPE netapp SYSTEM 'file:/etc/netapp_gx.dtd'>"""
+
+RESPONSE_PREFIX_DIRECT = """
+<netapp version='1.15' xmlns='http://www.netapp.com/filer/admin'>"""
+
+RESPONSE_SUFFIX_DIRECT = """</netapp>"""
+
+
+class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler):
+ """HTTP handler that fakes enough stuff to allow the driver to run."""
+
+ def do_GET(s):
+ """Respond to a GET request."""
+ if '/servlets/netapp.servlets.admin.XMLrequest_filer' != s.path:
+ s.send_response(404)
+ s.end_headers
+ return
+ s.send_response(200)
+ s.send_header("Content-Type", "text/xml; charset=utf-8")
+ s.end_headers()
+ out = s.wfile
+ out.write('<netapp version="1.15">'
+ '<results reason="Not supported method type"'
+ ' status="failed" errno="Not_Allowed"/></netapp>')
+
+ def do_POST(s):
+ """Respond to a POST request."""
+ if '/servlets/netapp.servlets.admin.XMLrequest_filer' != s.path:
+ s.send_response(404)
+ s.end_headers
+ return
+ request_xml = s.rfile.read(int(s.headers['Content-Length']))
+ root = etree.fromstring(request_xml)
+ body = [x for x in root.iterchildren()]
+ request = body[0]
+ tag = request.tag
+ api = etree.QName(tag).localname or tag
+ if 'volume-get-iter' == api:
+ body = """<results status="passed"><attributes-list>
+ <volume-attributes>
+ <volume-id-attributes>
+ <name>iscsi</name>
+ <owning-vserver-name>Openstack</owning-vserver-name>
+ <containing-aggregate-name>aggr0
+ </containing-aggregate-name>
+ <junction-path>/iscsi</junction-path>
+ <type>rw</type>
+ </volume-id-attributes>
+ <volume-space-attributes>
+ <size-available>214748364</size-available>
+ <size-total>224748364</size-total>
+ <space-guarantee-enabled>enabled</space-guarantee-enabled>
+ <space-guarantee>file</space-guarantee>
+ </volume-space-attributes>
+ <volume-state-attributes>
+ <is-cluster-volume>true
+ </is-cluster-volume>
+ <is-vserver-root>false</is-vserver-root>
+ <state>online</state>
+ <is-inconsistent>false</is-inconsistent>
+ <is-invalid>false</is-invalid>
+ <is-junction-active>true</is-junction-active>
+ </volume-state-attributes>
+ </volume-attributes>
+ <volume-attributes>
+ <volume-id-attributes>
+ <name>nfsvol</name>
+ <owning-vserver-name>Openstack
+ </owning-vserver-name>
+ <containing-aggregate-name>aggr0
+ </containing-aggregate-name>
+ <junction-path>/nfs</junction-path>
+ <type>rw</type>
+ </volume-id-attributes>
+ <volume-space-attributes>
+ <size-available>14748364</size-available>
+ <size-total>24748364</size-total>
+ <space-guarantee-enabled>enabled
+ </space-guarantee-enabled>
+ <space-guarantee>volume</space-guarantee>
+ </volume-space-attributes>
+ <volume-state-attributes>
+ <is-cluster-volume>true
+ </is-cluster-volume>
+ <is-vserver-root>false</is-vserver-root>
+ <state>online</state>
+ <is-inconsistent>false</is-inconsistent>
+ <is-invalid>false</is-invalid>
+ <is-junction-active>true</is-junction-active>
+ </volume-state-attributes>
+ </volume-attributes>
+ <volume-attributes>
+ <volume-id-attributes>
+ <name>nfsvol2</name>
+ <owning-vserver-name>Openstack
+ </owning-vserver-name>
+ <containing-aggregate-name>aggr0
+ </containing-aggregate-name>
+ <junction-path>/nfs2</junction-path>
+ <type>rw</type>
+ </volume-id-attributes>
+ <volume-space-attributes>
+ <size-available>14748364</size-available>
+ <size-total>24748364</size-total>
+ <space-guarantee-enabled>enabled
+ </space-guarantee-enabled>
+ <space-guarantee>volume</space-guarantee>
+ </volume-space-attributes>
+ <volume-state-attributes>
+ <is-cluster-volume>true
+ </is-cluster-volume>
+ <is-vserver-root>false</is-vserver-root>
+ <state>online</state>
+ <is-inconsistent>true</is-inconsistent>
+ <is-invalid>true</is-invalid>
+ <is-junction-active>true</is-junction-active>
+ </volume-state-attributes>
+ </volume-attributes>
+ <volume-attributes>
+ <volume-id-attributes>
+ <name>nfsvol3</name>
+ <owning-vserver-name>Openstack
+ </owning-vserver-name>
+ <containing-aggregate-name>aggr0
+ </containing-aggregate-name>
+ <junction-path>/nfs3</junction-path>
+ <type>rw</type>
+ </volume-id-attributes>
+ <volume-space-attributes>
+ <space-guarantee-enabled>enabled
+ </space-guarantee-enabled>
+ <space-guarantee>volume
+ </space-guarantee>
+ </volume-space-attributes>
+ <volume-state-attributes>
+ <is-cluster-volume>true
+ </is-cluster-volume>
+ <is-vserver-root>false</is-vserver-root>
+ <state>online</state>
+ <is-inconsistent>false</is-inconsistent>
+ <is-invalid>false</is-invalid>
+ <is-junction-active>true</is-junction-active>
+ </volume-state-attributes>
+ </volume-attributes>
+ </attributes-list>
+ <num-records>4</num-records></results>"""
+ elif 'aggr-options-list-info' == api:
+ body = """<results status="passed">
+ <options>
+ <aggr-option-info>
+ <name>ha_policy</name>
+ <value>cfo</value>
+ </aggr-option-info>
+ <aggr-option-info>
+ <name>raidtype</name>
+ <value>raid_dp</value>
+ </aggr-option-info>
+ </options>
+ </results>"""
+ elif 'sis-get-iter' == api:
+ body = """<results status="passed">
+ <attributes-list>
+ <sis-status-info>
+ <path>/vol/iscsi</path>
+ <is-compression-enabled>
+ true
+ </is-compression-enabled>
+ <state>enabled</state>
+ </sis-status-info>
+ </attributes-list>
+ </results>"""
+ elif 'storage-disk-get-iter' == api:
+ body = """<results status="passed">
+ <attributes-list>
+ <storage-disk-info>
+ <disk-raid-info>
+ <effective-disk-type>SATA</effective-disk-type>
+ </disk-raid-info>
+ </storage-disk-info>
+ </attributes-list>
+ </results>"""
+ else:
+ # Unknown API
+ s.send_response(500)
+ s.end_headers
+ return
+ s.send_response(200)
+ s.send_header("Content-Type", "text/xml; charset=utf-8")
+ s.end_headers()
+ s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE)
+ s.wfile.write(RESPONSE_PREFIX_DIRECT)
+ s.wfile.write(body)
+ s.wfile.write(RESPONSE_SUFFIX_DIRECT)
+
+
+class FakeDirectCmodeHTTPConnection(object):
+ """A fake httplib.HTTPConnection for netapp tests.
+
+ Requests made via this connection actually get translated and routed into
+ the fake direct handler above, we then turn the response into
+ the httplib.HTTPResponse that the caller expects.
+ """
+ def __init__(self, host, timeout=None):
+ self.host = host
+
+ def request(self, method, path, data=None, headers=None):
+ if not headers:
+ headers = {}
+ req_str = '%s %s HTTP/1.1\r\n' % (method, path)
+ for key, value in headers.iteritems():
+ req_str += "%s: %s\r\n" % (key, value)
+ if data:
+ req_str += '\r\n%s' % data
+
+ # NOTE(vish): normally the http transport normailizes from unicode
+ sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
+ # NOTE(vish): stop the server from trying to look up address from
+ # the fake socket
+ FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1'
+ self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None)
+
+ self.sock = FakeHttplibSocket(sock.result)
+ self.http_response = httplib.HTTPResponse(self.sock)
+
+ def set_debuglevel(self, level):
+ pass
+
+ def getresponse(self):
+ self.http_response.begin()
+ return self.http_response
+
+ def getresponsebody(self):
+ return self.sock.result
+
+
+def createNetAppVolume(**kwargs):
+ vol = ssc_utils.NetAppVolume(kwargs['name'], kwargs['vs'])
+ vol.state['vserver_root'] = kwargs.get('vs_root')
+ vol.state['status'] = kwargs.get('status')
+ vol.state['junction_active'] = kwargs.get('junc_active')
+ vol.space['size_avl_bytes'] = kwargs.get('avl_byt')
+ vol.space['size_total_bytes'] = kwargs.get('total_byt')
+ vol.space['space-guarantee-enabled'] = kwargs.get('sg_enabled')
+ vol.space['space-guarantee'] = kwargs.get('sg')
+ vol.space['thin_provisioned'] = kwargs.get('thin')
+ vol.mirror['mirrored'] = kwargs.get('mirrored')
+ vol.qos['qos_policy_group'] = kwargs.get('qos')
+ vol.aggr['name'] = kwargs.get('aggr_name')
+ vol.aggr['junction'] = kwargs.get('junction')
+ vol.sis['dedup'] = kwargs.get('dedup')
+ vol.sis['compression'] = kwargs.get('compression')
+ vol.aggr['raid_type'] = kwargs.get('raid')
+ vol.aggr['ha_policy'] = kwargs.get('ha')
+ vol.aggr['disk_type'] = kwargs.get('disk')
+ return vol
+
+
+class SscUtilsTestCase(test.TestCase):
+ """Test ssc utis."""
+ vol1 = createNetAppVolume(name='vola', vs='openstack',
+ vs_root=False, status='online', junc_active=True,
+ avl_byt='1000', total_byt='1500',
+ sg_enabled=False,
+ sg='file', thin=False, mirrored=False,
+ qos=None, aggr_name='aggr1', junction='/vola',
+ dedup=False, compression=False,
+ raid='raiddp', ha='cfo', disk='SSD')
+
+ vol2 = createNetAppVolume(name='volb', vs='openstack',
+ vs_root=False, status='online', junc_active=True,
+ avl_byt='2000', total_byt='2500',
+ sg_enabled=True,
+ sg='file', thin=True, mirrored=False,
+ qos=None, aggr_name='aggr2', junction='/volb',
+ dedup=True, compression=False,
+ raid='raid4', ha='cfo', disk='SSD')
+
+ vol3 = createNetAppVolume(name='volc', vs='openstack',
+ vs_root=False, status='online', junc_active=True,
+ avl_byt='3000', total_byt='3500',
+ sg_enabled=True,
+ sg='volume', thin=True, mirrored=False,
+ qos=None, aggr_name='aggr1', junction='/volc',
+ dedup=True, compression=True,
+ raid='raiddp', ha='cfo', disk='SAS')
+
+ vol4 = createNetAppVolume(name='vold', vs='openstack',
+ vs_root=False, status='online', junc_active=True,
+ avl_byt='4000', total_byt='4500',
+ sg_enabled=False,
+ sg='none', thin=False, mirrored=False,
+ qos=None, aggr_name='aggr1', junction='/vold',
+ dedup=False, compression=False,
+ raid='raiddp', ha='cfo', disk='SSD')
+
+ vol5 = createNetAppVolume(name='vole', vs='openstack',
+ vs_root=False, status='online', junc_active=True,
+ avl_byt='5000', total_byt='5500',
+ sg_enabled=True,
+ sg='none', thin=False, mirrored=True,
+ qos=None, aggr_name='aggr2', junction='/vole',
+ dedup=True, compression=False,
+ raid='raid4', ha='cfo', disk='SAS')
+
+ def setUp(self):
+ super(SscUtilsTestCase, self).setUp()
+ self.stubs.Set(httplib, 'HTTPConnection',
+ FakeDirectCmodeHTTPConnection)
+
+ def test_cl_vols_ssc_all(self):
+ """Test cluster ssc for all vols."""
+ mox = self.mox
+ na_server = api.NaServer('127.0.0.1')
+ vserver = 'openstack'
+ test_vols = set([copy.deepcopy(self.vol1),
+ copy.deepcopy(self.vol2), copy.deepcopy(self.vol3)])
+ sis = {'vola': {'dedup': False, 'compression': False},
+ 'volb': {'dedup': True, 'compression': False}}
+
+ mox.StubOutWithMock(ssc_utils, 'query_cluster_vols_for_ssc')
+ mox.StubOutWithMock(ssc_utils, 'get_sis_vol_dict')
+ mox.StubOutWithMock(ssc_utils, 'query_aggr_options')
+ mox.StubOutWithMock(ssc_utils, 'query_aggr_storage_disk')
+ ssc_utils.query_cluster_vols_for_ssc(
+ na_server, vserver, None).AndReturn(test_vols)
+ ssc_utils.get_sis_vol_dict(na_server, vserver, None).AndReturn(sis)
+ raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
+ ssc_utils.query_aggr_options(
+ na_server, IgnoreArg()).AndReturn(raiddp)
+ ssc_utils.query_aggr_storage_disk(
+ na_server, IgnoreArg()).AndReturn('SSD')
+ raid4 = {'ha_policy': 'cfo', 'raid_type': 'raid4'}
+ ssc_utils.query_aggr_options(
+ na_server, IgnoreArg()).AndReturn(raid4)
+ ssc_utils.query_aggr_storage_disk(
+ na_server, IgnoreArg()).AndReturn('SAS')
+ mox.ReplayAll()
+
+ res_vols = ssc_utils.get_cluster_vols_with_ssc(
+ na_server, vserver, volume=None)
+
+ mox.VerifyAll()
+ for vol in res_vols:
+ if vol.id['name'] == 'volc':
+ self.assertEqual(vol.sis['compression'], False)
+ self.assertEqual(vol.sis['dedup'], False)
+ else:
+ pass
+
+ def test_cl_vols_ssc_single(self):
+ """Test cluster ssc for single vol."""
+ mox = self.mox
+ na_server = api.NaServer('127.0.0.1')
+ vserver = 'openstack'
+ test_vols = set([copy.deepcopy(self.vol1)])
+ sis = {'vola': {'dedup': False, 'compression': False}}
+
+ mox.StubOutWithMock(ssc_utils, 'query_cluster_vols_for_ssc')
+ mox.StubOutWithMock(ssc_utils, 'get_sis_vol_dict')
+ mox.StubOutWithMock(ssc_utils, 'query_aggr_options')
+ mox.StubOutWithMock(ssc_utils, 'query_aggr_storage_disk')
+ ssc_utils.query_cluster_vols_for_ssc(
+ na_server, vserver, 'vola').AndReturn(test_vols)
+ ssc_utils.get_sis_vol_dict(
+ na_server, vserver, 'vola').AndReturn(sis)
+ raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
+ ssc_utils.query_aggr_options(
+ na_server, 'aggr1').AndReturn(raiddp)
+ ssc_utils.query_aggr_storage_disk(na_server, 'aggr1').AndReturn('SSD')
+ mox.ReplayAll()
+
+ res_vols = ssc_utils.get_cluster_vols_with_ssc(
+ na_server, vserver, volume='vola')
+
+ mox.VerifyAll()
+ self.assertEqual(len(res_vols), 1)
+
+ def test_get_cluster_ssc(self):
+ """Test get cluster ssc map."""
+ mox = self.mox
+ na_server = api.NaServer('127.0.0.1')
+ vserver = 'openstack'
+ test_vols = set(
+ [self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
+
+ mox.StubOutWithMock(ssc_utils, 'get_cluster_vols_with_ssc')
+ ssc_utils.get_cluster_vols_with_ssc(
+ na_server, vserver).AndReturn(test_vols)
+ mox.ReplayAll()
+
+ res_map = ssc_utils.get_cluster_ssc(na_server, vserver)
+
+ mox.VerifyAll()
+ self.assertEqual(len(res_map['mirrored']), 1)
+ self.assertEqual(len(res_map['dedup']), 3)
+ self.assertEqual(len(res_map['compression']), 1)
+ self.assertEqual(len(res_map['thin']), 2)
+ self.assertEqual(len(res_map['all']), 5)
+
+ def test_vols_for_boolean_specs(self):
+ """Test ssc for boolean specs."""
+ test_vols = set(
+ [self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
+ ssc_map = {'mirrored': set([self.vol1]),
+ 'dedup': set([self.vol1, self.vol2, self.vol3]),
+ 'compression': set([self.vol3, self.vol4]),
+ 'thin': set([self.vol5, self.vol2]), 'all': test_vols}
+ test_map = {'mirrored': ('netapp_mirrored', 'netapp_unmirrored'),
+ 'dedup': ('netapp_dedup', 'netapp_nodedup'),
+ 'compression': ('netapp_compression',
+ 'netapp_nocompression'),
+ 'thin': ('netapp_thin_provisioned',
+ 'netapp_thick_provisioned')}
+ for type in test_map.keys():
+ # type
+ extra_specs = {test_map[type][0]: 'true'}
+ res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
+ self.assertEqual(len(res), len(ssc_map[type]))
+ # opposite type
+ extra_specs = {test_map[type][1]: 'true'}
+ res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
+ self.assertEqual(len(res), len(ssc_map['all'] - ssc_map[type]))
+ # both types
+ extra_specs =\
+ {test_map[type][0]: 'true', test_map[type][1]: 'true'}
+ res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
+ self.assertEqual(len(res), len(ssc_map['all']))
+
+ def test_vols_for_optional_specs(self):
+ """Test ssc for optional specs."""
+ test_vols =\
+ set([self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
+ ssc_map = {'mirrored': set([self.vol1]),
+ 'dedup': set([self.vol1, self.vol2, self.vol3]),
+ 'compression': set([self.vol3, self.vol4]),
+ 'thin': set([self.vol5, self.vol2]), 'all': test_vols}
+ extra_specs =\
+ {'netapp_dedup': 'true',
+ 'netapp:raid_type': 'raid4', 'netapp:disk_type': 'SSD'}
+ res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
+ self.assertEqual(len(res), 1)
+
+ def test_query_cl_vols_for_ssc(self):
+ na_server = api.NaServer('127.0.0.1')
+ na_server.set_api_version(1, 15)
+ vols = ssc_utils.query_cluster_vols_for_ssc(na_server, 'Openstack')
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ if vol.id['name'] != 'iscsi' or vol.id['name'] != 'nfsvol':
+ pass
+ else:
+ raise exception.InvalidVolume('Invalid volume returned.')
+
+ def test_query_aggr_options(self):
+ na_server = api.NaServer('127.0.0.1')
+ aggr_attribs = ssc_utils.query_aggr_options(na_server, 'aggr0')
+ if aggr_attribs:
+ self.assertEqual(aggr_attribs['ha_policy'], 'cfo')
+ self.assertEqual(aggr_attribs['raid_type'], 'raid_dp')
+ else:
+ raise exception.InvalidParameterValue("Incorrect aggr options")
+
+ def test_query_aggr_storage_disk(self):
+ na_server = api.NaServer('127.0.0.1')
+ eff_disk_type = ssc_utils.query_aggr_storage_disk(na_server, 'aggr0')
+ self.assertEqual(eff_disk_type, 'SATA')
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.GiB, 2 * units.GiB,
2 * units.GiB))
+ drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
+ AndReturn((5 * units.GiB, 2 * units.GiB,
+ 2 * units.GiB))
+ drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
+ AndReturn((10 * units.GiB, 3 * units.GiB,
+ 1 * units.GiB))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.GiB, 3 * units.GiB,
1 * units.GiB))
def _create_certificate_auth_handler(self):
raise NotImplementedError()
+ def __str__(self):
+ return "server: %s" % (self._host)
+
class NaElement(object):
"""Class wraps basic building block for NetApp api request."""
return self._element.tag
def set_content(self, text):
- """Set the text for the element."""
+ """Set the text string for the element."""
self._element.text = text
def get_content(self):
return etree.tostring(self._element, method=method, encoding=encoding,
pretty_print=pretty)
+ def __getitem__(self, key):
+ """Dict getter method for NaElement.
+
+ Returns NaElement list if present,
+ text value in case no NaElement node
+ children or attribute value if present.
+ """
+
+ child = self.get_child_by_name(key)
+ if child:
+ if child.get_children():
+ return child
+ else:
+ return child.get_content()
+ elif self.has_attr(key):
+ return self.get_attr(key)
+ raise KeyError(_('No element by given name %s.') % (key))
+
+ def __setitem__(self, key, value):
+ """Dict setter method for NaElement."""
+ if key:
+ if value:
+ if isinstance(value, NaElement):
+ child = NaElement(key)
+ child.add_child_elem(value)
+ self.add_child_elem(child)
+ elif isinstance(value, str):
+ child = self.get_child_by_name(key)
+ if child:
+ child.set_content(value)
+ else:
+ self.add_new_child(key, value)
+ elif isinstance(value, dict):
+ child = NaElement(key)
+ child.translate_struct(value)
+ self.add_child_elem(child)
+ else:
+ raise TypeError(_('Not a valid value for NaElement.'))
+ else:
+ self.add_child_elem(NaElement(key))
+ else:
+ raise KeyError(_('NaElement name cannot be null.'))
+
+ def translate_struct(self, data_struct):
+ """Convert list, tuple, dict to NaElement and appends.
+
+ Useful for NaElement queries which have unique
+ query parameters.
+ """
+
+ if isinstance(data_struct, list) or isinstance(data_struct, tuple):
+ for el in data_struct:
+ self.add_child_elem(NaElement(el))
+ elif isinstance(data_struct, dict):
+ for k in data_struct.keys():
+ child = NaElement(k)
+ if (isinstance(data_struct[k], dict) or
+ isinstance(data_struct[k], list) or
+ isinstance(data_struct[k], tuple)):
+ child.translate_struct(data_struct[k])
+ else:
+ if data_struct[k]:
+ child.set_content(str(data_struct[k]))
+ self.add_child_elem(child)
+ else:
+ raise ValueError(_('Type cannot be converted into NaElement.'))
+
class NaApiError(Exception):
"""Base exception class for NetApp api errors."""
'%(storage_protocol)s') % fmt)
storage_family = storage_family.lower()
family_meta = netapp_unified_plugin_registry.get(storage_family)
- if not family_meta:
+ if family_meta is None:
raise exception.InvalidInput(
reason=_('Storage family %s is not supported')
% storage_family)
- if not storage_protocol:
+ if storage_protocol is None:
storage_protocol = netapp_family_default.get(storage_family)
- if not storage_protocol:
- msg_fmt = {'storage_family': storage_family}
+ fmt['storage_protocol'] = storage_protocol
+ if storage_protocol is None:
raise exception.InvalidInput(
reason=_('No default storage protocol found'
' for storage family %(storage_family)s')
- % msg_fmt)
+ % fmt)
storage_protocol = storage_protocol.lower()
driver_loc = family_meta.get(storage_protocol)
- if not driver_loc:
- msg_fmt = {'storage_protocol': storage_protocol,
- 'storage_family': storage_family}
+ if driver_loc is None:
raise exception.InvalidInput(
reason=_('Protocol %(storage_protocol)s is not supported'
' for storage family %(storage_family)s')
- % msg_fmt)
+ % fmt)
NetAppDriverFactory.check_netapp_driver(driver_loc)
kwargs = kwargs or {}
kwargs['netapp_mode'] = 'proxy'
storage systems with installed iSCSI licenses.
"""
+import copy
import sys
import time
import uuid
from cinder import exception
from cinder.openstack.common import log as logging
+from cinder import units
+from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
+from cinder.volume.drivers.netapp import ssc_utils
+from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume.drivers.netapp.utils import provide_ems
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume import volume_types
This method creates NetApp server client for api communication.
"""
+
host_filer = kwargs['hostname']
LOG.debug(_('Using NetApp filer: %s') % host_filer)
self.client = NaServer(host=host_filer,
Validate the flags we care about and setup NetApp
client.
"""
+
self._check_flags()
self._create_client(
transport_type=self.configuration.netapp_transport_type,
Discovers the LUNs on the NetApp server.
"""
+
self.lun_table = {}
self._get_lun_list()
LOG.debug(_("Success getting LUN list from server"))
metadata = {}
metadata['OsType'] = 'linux'
metadata['SpaceReserved'] = 'true'
- self._create_lun_on_eligible_vol(name, size, metadata)
+ extra_specs = get_volume_extra_specs(volume)
+ self._create_lun_on_eligible_vol(name, size, metadata, extra_specs)
LOG.debug(_("Created LUN with name %s") % name)
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, name, size, metadata))
return
lun_destroy = NaElement.create_node_with_children(
'lun-destroy',
- **{'path': metadata['Path'],
- 'force': 'true'})
+ **{'path': metadata['Path'], 'force': 'true'})
self.client.invoke_successfully(lun_destroy, True)
LOG.debug(_("Destroyed LUN %s") % name)
self.lun_table.pop(name)
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
"""
+
pass
def initialize_connection(self, volume, connector):
be during this method call so we construct the properties dictionary
ourselves.
"""
+
initiator_name = connector['initiator']
name = volume['name']
lun_id = self._map_lun(name, initiator_name, 'iscsi', None)
This driver implements snapshots by using efficient single-file
(LUN) cloning.
"""
+
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self.lun_table[vol_name]
Many would call this "cloning" and in fact we use cloning to implement
this feature.
"""
+
vol_size = volume['size']
snap_size = snapshot['volume_size']
if vol_size != snap_size:
Unmask the LUN on the storage system so the given intiator can no
longer access it.
"""
+
initiator_name = connector['initiator']
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
minor = res.get_child_content('minor-version')
return (major, minor)
- def _create_lun_on_eligible_vol(self, name, size, metadata):
+ def _create_lun_on_eligible_vol(self, name, size, metadata,
+ extra_specs=None):
"""Creates an actual lun on filer."""
- req_size = float(size) *\
- float(self.configuration.netapp_size_multiplier)
- volume = self._get_avl_volume_by_size(req_size)
- if not volume:
- msg = _('Failed to get vol with required size for volume: %s')
- raise exception.VolumeBackendAPIException(data=msg % name)
- path = '/vol/%s/%s' % (volume['name'], name)
- lun_create = NaElement.create_node_with_children(
- 'lun-create-by-size',
- **{'path': path, 'size': size,
- 'ostype': metadata['OsType'],
- 'space-reservation-enabled':
- metadata['SpaceReserved']})
- self.client.invoke_successfully(lun_create, True)
- metadata['Path'] = '/vol/%s/%s' % (volume['name'], name)
- metadata['Volume'] = volume['name']
- metadata['Qtree'] = None
-
- def _get_avl_volume_by_size(self, size):
- """Get the available volume by size."""
raise NotImplementedError()
def _get_iscsi_service_details(self):
Populates in the lun table.
"""
+
for lun in api_luns:
meta_dict = self._create_lun_meta(lun)
path = lun.get_child_content('path')
initiator_type, os)
lun_map = NaElement.create_node_with_children(
'lun-map', **{'path': path,
- 'initiator-group': igroup_name})
+ 'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
(igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator)
lun_unmap = NaElement.create_node_with_children(
'lun-unmap',
- **{'path': path,
- 'initiator-group': igroup_name})
+ **{'path': path, 'initiator-group': igroup_name})
try:
self.client.invoke_successfully(lun_unmap, True)
except NaApiError as e:
Creates igroup if not found.
"""
+
igroups = self._get_igroup_by_initiator(initiator=initiator)
igroup_name = None
for igroup in igroups:
igroup_create = NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
- 'initiator-group-type': igroup_type,
- 'os-type': os_type})
+ 'initiator-group-type': igroup_type,
+ 'os-type': os_type})
self.client.invoke_successfully(igroup_create, True)
def _add_igroup_initiator(self, igroup, initiator):
igroup_add = NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
- 'initiator': initiator})
+ 'initiator': initiator})
self.client.invoke_successfully(igroup_add, True)
def _get_qos_type(self, volume):
If 'refresh' is True, run update the stats first.
"""
+
if refresh:
self._update_volume_stats()
class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp C-mode iSCSI volume driver."""
+ DEFAULT_VS = 'openstack'
+
def __init__(self, *args, **kwargs):
super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_cluster_opts)
def _do_custom_setup(self):
"""Does custom setup for ontap cluster."""
self.vserver = self.configuration.netapp_vserver
+ self.vserver = self.vserver if self.vserver else self.DEFAULT_VS
# We set vserver in client permanently.
# To use tunneling enable_tunneling while invoking api
self.client.set_vserver(self.vserver)
self.client.set_api_version(1, 15)
(major, minor) = self._get_ontapi_version()
self.client.set_api_version(major, minor)
+ self.ssc_vols = None
+ self.stale_vols = set()
+ ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
- def _get_avl_volume_by_size(self, size):
- """Get the available volume by size."""
- tag = None
- while True:
- vol_request = self._create_avl_vol_request(self.vserver, tag)
- res = self.client.invoke_successfully(vol_request)
- tag = res.get_child_content('next-tag')
- attr_list = res.get_child_by_name('attributes-list')
- vols = attr_list.get_children()
- for vol in vols:
- vol_space = vol.get_child_by_name('volume-space-attributes')
- avl_size = vol_space.get_child_content('size-available')
- if float(avl_size) >= float(size):
- avl_vol = dict()
- vol_id = vol.get_child_by_name('volume-id-attributes')
- avl_vol['name'] = vol_id.get_child_content('name')
- avl_vol['vserver'] = vol_id.get_child_content(
- 'owning-vserver-name')
- avl_vol['size-available'] = avl_size
- return avl_vol
- if tag is None:
- break
- return None
-
- def _create_avl_vol_request(self, vserver, tag=None):
- vol_get_iter = NaElement('volume-get-iter')
- vol_get_iter.add_new_child('max-records', '100')
- if tag:
- vol_get_iter.add_new_child('tag', tag, True)
- query = NaElement('query')
- vol_get_iter.add_child_elem(query)
- vol_attrs = NaElement('volume-attributes')
- query.add_child_elem(vol_attrs)
- if vserver:
- vol_attrs.add_node_with_children(
- 'volume-id-attributes',
- **{"owning-vserver-name": vserver})
- vol_attrs.add_node_with_children(
- 'volume-state-attributes',
- **{"is-vserver-root": "false", "state": "online"})
- desired_attrs = NaElement('desired-attributes')
- vol_get_iter.add_child_elem(desired_attrs)
- des_vol_attrs = NaElement('volume-attributes')
- desired_attrs.add_child_elem(des_vol_attrs)
- des_vol_attrs.add_node_with_children(
- 'volume-id-attributes',
- **{"name": None, "owning-vserver-name": None})
- des_vol_attrs.add_node_with_children(
- 'volume-space-attributes',
- **{"size-available": None})
- des_vol_attrs.add_node_with_children('volume-state-attributes',
- **{"is-cluster-volume": None,
- "is-vserver-root": None,
- "state": None})
- return vol_get_iter
+ def _create_lun_on_eligible_vol(self, name, size, metadata,
+ extra_specs=None):
+ """Creates an actual lun on filer."""
+ req_size = float(size) *\
+ float(self.configuration.netapp_size_multiplier)
+ volumes = self._get_avl_volumes(req_size, extra_specs)
+ if not volumes:
+ msg = _('Failed to get vol with required'
+ ' size and extra specs for volume: %s')
+ raise exception.VolumeBackendAPIException(data=msg % name)
+ for volume in volumes:
+ try:
+ path = '/vol/%s/%s' % (volume.id['name'], name)
+ lun_create = NaElement.create_node_with_children(
+ 'lun-create-by-size',
+ **{'path': path, 'size': size,
+ 'ostype': metadata['OsType']})
+ self.client.invoke_successfully(lun_create, True)
+ metadata['Path'] = '/vol/%s/%s' % (volume.id['name'], name)
+ metadata['Volume'] = volume.id['name']
+ metadata['Qtree'] = None
+ return
+ except NaApiError:
+ LOG.warn(_("Error provisioning vol %(name)s on %(volume)s")
+ % {'name': name, 'volume': volume.id['name']})
+ finally:
+ self._update_stale_vols(volume=volume)
+
+ def _get_avl_volumes(self, size, extra_specs=None):
+ """Get the available volume by size, extra_specs."""
+ result = []
+ volumes = ssc_utils.get_volumes_for_specs(
+ self.ssc_vols, extra_specs)
+ if volumes:
+ sorted_vols = sorted(volumes, reverse=True)
+ for vol in sorted_vols:
+ if int(vol.space['size_avl_bytes']) >= int(size):
+ result.append(vol)
+ return result
def _get_target_details(self):
"""Gets the target portal details."""
Gets the luns from cluster with vserver.
"""
+
tag = None
while True:
api = NaElement('lun-get-iter')
clone_create = NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': name,
- 'destination-path': new_name,
- 'space-reserve': space_reserved})
+ 'destination-path': new_name, 'space-reserve': space_reserved})
self.client.invoke_successfully(clone_create, True)
LOG.debug(_("Cloned LUN with new name %s") % new_name)
lun = self._get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s'
new_name,
lun[0].get_child_content('size'),
clone_meta))
+ self._update_stale_vols(
+ volume=ssc_utils.NetAppVolume(volume, self.vserver))
def _get_lun_by_args(self, **args):
"""Retrives lun with specified args."""
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSCSI'
- data['total_capacity_gb'] = 'infinite'
- data['free_capacity_gb'] = 'infinite'
+ data['total_capacity_gb'] = 0
+ data['free_capacity_gb'] = 0
data['reserved_percentage'] = 0
data['QoS_support'] = False
+ self._update_cluster_vol_stats(data)
provide_ems(self, self.client, data, netapp_backend)
self._stats = data
+ def _update_cluster_vol_stats(self, data):
+ """Updates vol stats with cluster config."""
+ if self.ssc_vols:
+ data['netapp_mirrored'] = 'true'\
+ if self.ssc_vols['mirrored'] else 'false'
+ data['netapp_unmirrored'] = 'true'\
+ if len(self.ssc_vols['all']) > len(self.ssc_vols['mirrored'])\
+ else 'false'
+ data['netapp_dedup'] = 'true'\
+ if self.ssc_vols['dedup'] else 'false'
+ data['netapp_nodedupe'] = 'true'\
+ if len(self.ssc_vols['all']) > len(self.ssc_vols['dedup'])\
+ else 'false'
+ data['netapp_compression'] = 'true'\
+ if self.ssc_vols['compression'] else False
+ data['netapp_nocompression'] = 'true'\
+ if len(self.ssc_vols['all']) >\
+ len(self.ssc_vols['compression'])\
+ else 'false'
+ data['netapp_thin_provisioned'] = 'true'\
+ if self.ssc_vols['thin'] else 'false'
+ data['netapp_thick_provisioned'] = 'true'\
+ if len(self.ssc_vols['all']) >\
+ len(self.ssc_vols['thin']) else 'false'
+ vol_max = max(self.ssc_vols['all'])
+ data['total_capacity_gb'] =\
+ int(vol_max.space['size_total_bytes']) / units.GiB
+ data['free_capacity_gb'] =\
+ int(vol_max.space['size_avl_bytes']) / units.GiB
+ else:
+ LOG.warn(_("Cluster ssc is not updated. No volume stats found."))
+ ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
+
+ @utils.synchronized('update_stale')
+ def _update_stale_vols(self, volume=None, reset=False):
+ """Populates stale vols with vol and returns set copy if reset."""
+ if volume:
+ self.stale_vols.add(volume)
+ if reset:
+ set_copy = copy.deepcopy(self.stale_vols)
+ self.stale_vols.clear()
+ return set_copy
+
+ @utils.synchronized("refresh_ssc_vols")
+ def refresh_ssc_vols(self, vols):
+ """Refreshes ssc_vols with latest entries."""
+ self.ssc_vols = vols
+
class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp 7-mode iSCSI volume driver."""
self.client.set_api_version(major, minor)
self.client.set_vfiler(self.vfiler)
+ def _create_lun_on_eligible_vol(self, name, size, metadata,
+ extra_specs=None):
+ """Creates an actual lun on filer."""
+ req_size = float(size) *\
+ float(self.configuration.netapp_size_multiplier)
+ volume = self._get_avl_volume_by_size(req_size)
+ if not volume:
+ msg = _('Failed to get vol with required size for volume: %s')
+ raise exception.VolumeBackendAPIException(data=msg % name)
+ path = '/vol/%s/%s' % (volume['name'], name)
+ lun_create = NaElement.create_node_with_children(
+ 'lun-create-by-size',
+ **{'path': path, 'size': size, 'ostype': metadata['OsType'],
+ 'space-reservation-enabled': metadata['SpaceReserved']})
+ self.client.invoke_successfully(lun_create, True)
+ metadata['Path'] = '/vol/%s/%s' % (volume['name'], name)
+ metadata['Volume'] = volume['name']
+ metadata['Qtree'] = None
+
def _get_avl_volume_by_size(self, size):
"""Get the available volume by size."""
vol_request = NaElement('volume-list-info')
clone_start = NaElement.create_node_with_children(
'clone-start',
**{'source-path': path, 'destination-path': clone_path,
- 'no-snap': 'true'})
+ 'no-snap': 'true'})
result = self.client.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
def _update_volume_stats(self):
"""Retrieve status info from volume group."""
-
LOG.debug(_("Updating volume stats"))
data = {}
netapp_backend = 'NetApp_iSCSI_7mode_direct'
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
+from cinder import units
+from cinder import utils
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
+from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
+from cinder.volume.drivers.netapp import ssc_utils
+from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume.drivers.netapp.utils import provide_ems
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume.drivers import nfs
@param volume_name string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
+
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def __init__(self, *args, **kwargs):
super(NetAppDirectCmodeNfsDriver, self).__init__(*args, **kwargs)
+ self.configuration.append_config_values(netapp_cluster_opts)
def _do_custom_setup(self, client):
"""Do the customized set up on client for cluster mode."""
client.set_api_version(1, 15)
(major, minor) = self._get_ontapi_version()
client.set_api_version(major, minor)
+ self.vserver = self.configuration.netapp_vserver
+ self.ssc_vols = None
+ self.stale_vols = set()
+ if self.vserver:
+ self.ssc_enabled = True
+ LOG.warn(_("Shares on vserver %s will only"
+ " be used for provisioning.") % (self.vserver))
+ ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
+ else:
+ self.ssc_enabled = False
+ LOG.warn(_("No vserver set in config. SSC will be disabled."))
def _invoke_successfully(self, na_element, vserver=None):
"""Invoke the api for successful result.
else Cluster api.
:param vserver: vserver name.
"""
+
self._is_naelement(na_element)
server = copy.copy(self._client)
if vserver:
result = server.invoke_successfully(na_element, True)
return result
+ def create_volume(self, volume):
+ """Creates a volume.
+
+ :param volume: volume reference
+ """
+
+ self._ensure_shares_mounted()
+ extra_specs = get_volume_extra_specs(volume)
+ eligible = self._find_containers(volume['size'], extra_specs)
+ if not eligible:
+ raise exception.NfsNoSuitableShareFound(
+ volume_size=volume['size'])
+ for sh in eligible:
+ try:
+ if self.ssc_enabled:
+ volume['provider_location'] = sh.export['path']
+ else:
+ volume['provider_location'] = sh
+ LOG.info(_('casted to %s') % volume['provider_location'])
+ self._do_create_volume(volume)
+ return {'provider_location': volume['provider_location']}
+ except Exception:
+ LOG.warn(_("Exception creating vol %(name)s"
+ " on share %(share)s")
+ % {'name': volume['name'],
+ 'share': volume['provider_location']})
+ volume['provider_location'] = None
+ finally:
+ if self.ssc_enabled:
+ self._update_stale_vols(volume=sh)
+ msg = _("Volume %s could not be created on shares.")
+ raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
+
+ def _find_containers(self, size, extra_specs):
+ """Finds suitable containers for given params."""
+ containers = []
+ if self.ssc_enabled:
+ vols =\
+ ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs)
+ sort_vols = sorted(vols, reverse=True)
+ for vol in sort_vols:
+ if self._is_share_eligible(vol.export['path'], size):
+ containers.append(vol)
+ else:
+ for sh in self._mounted_shares:
+ if self._is_share_eligible(sh, size):
+ total_size, total_available, total_allocated = \
+ self._get_capacity_info(sh)
+ containers.append((sh, total_available))
+ containers = [a for a, b in
+ sorted(containers, key=lambda x: x[1], reverse=True)]
+ return containers
+
def _clone_volume(self, volume_name, clone_name, volume_id):
"""Clones mounted volume on NetApp Cluster."""
host_ip = self._get_host_ip(volume_id)
vol_attrs.add_node_with_children(
'volume-id-attributes',
**{'junction-path': junction,
- 'owning-vserver-name': vserver})
+ 'owning-vserver-name': vserver})
des_attrs = NaElement('desired-attributes')
des_attrs.add_node_with_children('volume-attributes',
**{'volume-id-attributes': None})
clone_create = NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': src_path,
- 'destination-path': dest_path})
+ 'destination-path': dest_path})
self._invoke_successfully(clone_create, vserver)
def _update_volume_stats(self):
netapp_backend)
self._stats["vendor_name"] = 'NetApp'
self._stats["driver_version"] = '1.0'
+ self._update_cluster_vol_stats(self._stats)
provide_ems(self, self._client, self._stats, netapp_backend)
+ def _update_cluster_vol_stats(self, data):
+ """Updates vol stats with cluster config."""
+ if self.ssc_vols:
+ data['netapp_mirrored'] = 'true'\
+ if self.ssc_vols['mirrored'] else 'false'
+ data['netapp_unmirrored'] = 'true'\
+ if len(self.ssc_vols['all']) >\
+ len(self.ssc_vols['mirrored']) else 'false'
+ data['netapp_dedup'] = 'true'\
+ if self.ssc_vols['dedup'] else 'false'
+ data['netapp_nodedupe'] = 'true'\
+ if len(self.ssc_vols['all']) >\
+ len(self.ssc_vols['dedup']) else 'false'
+ data['netapp_compression'] = 'true'\
+ if self.ssc_vols['compression'] else False
+ data['netapp_nocompression'] = 'true'\
+ if len(self.ssc_vols['all']) >\
+ len(self.ssc_vols['compression']) else 'false'
+ data['netapp_thin_provisioned'] = 'true'\
+ if self.ssc_vols['thin'] else 'false'
+ data['netapp_thick_provisioned'] = 'true'\
+ if len(self.ssc_vols['all']) >\
+ len(self.ssc_vols['thin']) else 'false'
+ vol_max = max(self.ssc_vols['all'])
+ data['total_capacity_gb'] =\
+ int(vol_max.space['size_total_bytes']) / units.GiB
+ data['free_capacity_gb'] =\
+ int(vol_max.space['size_avl_bytes']) / units.GiB
+ elif self.ssc_enabled:
+ LOG.warn(_("No cluster ssc stats found."
+ " Wait for next volume stats update."))
+ if self.ssc_enabled:
+ ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
+ else:
+ LOG.warn(_("No vserver set in config. SSC will be disabled."))
+
+ @utils.synchronized('update_stale')
+ def _update_stale_vols(self, volume=None, reset=False):
+ """Populates stale vols with vol and returns set copy."""
+ if volume:
+ self.stale_vols.add(volume)
+ set_copy = self.stale_vols.copy()
+ if reset:
+ self.stale_vols.clear()
+ return set_copy
+
+ @utils.synchronized("refresh_ssc_vols")
+ def refresh_ssc_vols(self, vols):
+ """Refreshes ssc_vols with latest entries."""
+ if not self._mounted_shares:
+ LOG.warn(_("No shares found hence skipping ssc refresh."))
+ return
+ mnt_share_vols = set()
+ for vol in vols['all']:
+ for sh in self._mounted_shares:
+ junction = sh.split(':')[1]
+ if junction == vol.id['junction_path']:
+ mnt_share_vols.add(vol)
+ vol.export['path'] = sh
+ break
+ for key in vols.keys():
+ vols[key] = vols[key] & mnt_share_vols
+ self.ssc_vols = vols
+
class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
"""Executes commands related to volumes on 7 mode."""
else filer api.
:param vfiler: vfiler name.
"""
+
self._is_naelement(na_element)
server = copy.copy(self._client)
if vfiler:
:returns: clone-id
"""
+
msg_fmt = {'src_path': src_path, 'dest_path': dest_path}
LOG.debug(_("""Cloning with src %(src_path)s, dest %(dest_path)s""")
% msg_fmt)
clone_start = NaElement.create_node_with_children(
'clone-start',
**{'source-path': src_path,
- 'destination-path': dest_path,
- 'no-snap': 'true'})
+ 'destination-path': dest_path,
+ 'no-snap': 'true'})
result = self._invoke_successfully(clone_start, None)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
clone_ls_st.add_child_elem(clone_id)
clone_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_op_id,
- 'volume-uuid': vol_uuid})
+ 'volume-uuid': vol_uuid})
task_running = True
while task_running:
result = self._invoke_successfully(clone_ls_st, None)
Invoke this in case of failed clone.
"""
+
clone_clear = NaElement.create_node_with_children(
'clone-clear',
**{'clone-id': clone_id})
netapp_cluster_opts = [
cfg.StrOpt('netapp_vserver',
- default='openstack',
+ default=None,
help='Cluster vserver to use for provisioning'), ]
netapp_7mode_opts = [
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NetApp, Inc.
+# Copyright (c) 2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Storage service catalog utility functions and classes for NetApp systems.
+"""
+
+import copy
+from threading import Timer
+
+from cinder import exception
+from cinder.openstack.common import log as logging
+from cinder.openstack.common import timeutils
+from cinder import utils
+from cinder.volume import driver
+from cinder.volume.drivers.netapp import api
+from cinder.volume.drivers.netapp import utils as na_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NetAppVolume(object):
+ """Represents a NetApp volume.
+
+ Present attributes
+ id - name, vserver, junction_path, type
+ aggr - name, raid_type, ha_policy, disk_type
+ sis - dedup, compression
+ state - status, vserver_root, cluster_volume,
+ inconsistent, invalid, junction_active
+ qos - qos_policy_group
+ space - space-guarantee-enabled, space-guarantee,
+ thin_provisioned, size_avl_bytes, size_total_bytes
+ mirror - mirrored i.e. dp mirror
+ export - path
+ """
+ def __init__(self, name, vserver=None):
+ self.id = {}
+ self.aggr = {}
+ self.sis = {}
+ self.state = {}
+ self.qos = {}
+ self.space = {}
+ self.mirror = {}
+ self.export = {}
+ self.id['name'] = name
+ self.id['vserver'] = vserver
+
+ def __eq__(self, other):
+ """Checks for equality."""
+ if (self.id['name'] == other.id['name'] and
+ self.id['vserver'] == other.id['vserver']):
+ return True
+
+ def __hash__(self):
+ """Computes hash for the object."""
+ return hash(self.id['name'])
+
+ def __cmp__(self, other):
+ """Implements comparison logic for volumes."""
+ self_size_avl = self.space.get('size_avl_bytes')
+ other_size_avl = other.space.get('size_avl_bytes')
+ if self_size_avl is None and other_size_avl is not None:
+ return -1
+ elif self_size_avl is not None and other_size_avl is None:
+ return 1
+ elif self_size_avl is None and other_size_avl is None:
+ return 0
+ elif int(self_size_avl) < int(other_size_avl):
+ return -1
+ elif int(self_size_avl) > int(other_size_avl):
+ return 1
+ else:
+ return 0
+
+ def __str__(self):
+ """Returns human readable form for object."""
+ vol_str = "NetApp Volume id: %s, aggr: %s,"\
+ " space: %s, sis: %s, state: %s, qos: %s"\
+ % (self.id, self.aggr, self.space, self.sis, self.state, self.qos)
+ return vol_str
+
+
+def get_cluster_vols_with_ssc(na_server, vserver, volume=None):
+ """Gets ssc vols for cluster vserver."""
+ volumes = query_cluster_vols_for_ssc(na_server, vserver, volume)
+ sis_vols = get_sis_vol_dict(na_server, vserver, volume)
+ aggrs = {}
+ for vol in volumes:
+ aggr_name = vol.aggr['name']
+ if aggr_name:
+ if aggr_name in aggrs:
+ aggr_attrs = aggrs[aggr_name]
+ else:
+ aggr_attrs = query_aggr_options(na_server, aggr_name)
+ eff_disk_type = query_aggr_storage_disk(na_server, aggr_name)
+ aggr_attrs['disk_type'] = eff_disk_type
+ aggrs[aggr_name] = aggr_attrs
+ vol.aggr['raid_type'] = aggr_attrs.get('raid_type')
+ vol.aggr['ha_policy'] = aggr_attrs.get('ha_policy')
+ vol.aggr['disk_type'] = aggr_attrs.get('disk_type')
+ if vol.id['name'] in sis_vols:
+ vol.sis['dedup'] = sis_vols[vol.id['name']]['dedup']
+ vol.sis['compression'] = sis_vols[vol.id['name']]['compression']
+ else:
+ vol.sis['dedup'] = False
+ vol.sis['compression'] = False
+ if (vol.space['space-guarantee-enabled'] and
+ (vol.space['space-guarantee'] == 'file' or
+ vol.space['space-guarantee'] == 'volume')):
+ vol.space['thin_provisioned'] = False
+ else:
+ vol.space['thin_provisioned'] = True
+ return volumes
+
+
+def query_cluster_vols_for_ssc(na_server, vserver, volume=None):
+ """Queries cluster volumes for ssc."""
+ query = {'volume-attributes': None}
+ volume_id = {'volume-id-attributes': {'owning-vserver-name': vserver}}
+ if volume:
+ volume_id['volume-id-attributes']['name'] = volume
+ query['volume-attributes'] = volume_id
+ des_attr = {'volume-attributes':
+ ['volume-id-attributes',
+ 'volume-mirror-attributes',
+ 'volume-space-attributes',
+ 'volume-state-attributes',
+ 'volume-qos-attributes']}
+ result = na_utils.invoke_api(na_server, api_name='volume-get-iter',
+ api_family='cm', query=query,
+ des_result=des_attr,
+ additional_elems=None,
+ is_iter=True)
+ vols = set()
+ for res in result:
+ records = res.get_child_content('num-records')
+ if records > 0:
+ attr_list = res['attributes-list']
+ vol_attrs = attr_list.get_children()
+ vols_found = create_vol_list(vol_attrs)
+ vols.update(vols_found)
+ return vols
+
+
+def create_vol_list(vol_attrs):
+ """Creates vol list with features from attr list."""
+ vols = set()
+ for v in vol_attrs:
+ try:
+ # name and vserver are mandatory
+ # Absence will skip by giving KeyError.
+ name = v['volume-id-attributes']['name']
+ vserver = v['volume-id-attributes']['owning-vserver-name']
+ vol = NetAppVolume(name, vserver)
+ vol.id['type'] =\
+ v['volume-id-attributes'].get_child_content('type')
+ if vol.id['type'] == "tmp":
+ continue
+ vol.id['junction_path'] =\
+ v['volume-id-attributes'].get_child_content('junction-path')
+ # state attributes mandatory.
+ vol.state['vserver_root'] =\
+ na_utils.to_bool(
+ v['volume-state-attributes'].get_child_content(
+ 'is-vserver-root'))
+ if vol.state['vserver_root']:
+ continue
+ vol.state['status'] =\
+ v['volume-state-attributes'].get_child_content('state')
+ vol.state['inconsistent'] =\
+ na_utils.to_bool(
+ v['volume-state-attributes'].get_child_content(
+ 'is-inconsistent'))
+ vol.state['invalid'] =\
+ na_utils.to_bool(
+ v['volume-state-attributes'].get_child_content(
+ 'is-invalid'))
+ vol.state['junction_active'] =\
+ na_utils.to_bool(
+ v['volume-state-attributes'].get_child_content(
+ 'is-junction-active'))
+ vol.state['cluster_volume'] =\
+ na_utils.to_bool(
+ v['volume-state-attributes'].get_child_content(
+ 'is-cluster-volume'))
+ if (vol.state['status'] != 'online' or
+ vol.state['inconsistent'] or vol.state['invalid']):
+ # offline, invalid and inconsistent volumes are not usable
+ continue
+ # aggr attributes mandatory.
+ vol.aggr['name'] =\
+ v['volume-id-attributes']['containing-aggregate-name']
+ # space attributes mandatory.
+ vol.space['size_avl_bytes'] =\
+ v['volume-space-attributes']['size-available']
+ vol.space['size_total_bytes'] =\
+ v['volume-space-attributes']['size-total']
+ vol.space['space-guarantee-enabled'] =\
+ na_utils.to_bool(
+ v['volume-space-attributes'].get_child_content(
+ 'is-space-guarantee-enabled'))
+ vol.space['space-guarantee'] =\
+ v['volume-space-attributes'].get_child_content(
+ 'space-guarantee')
+ # mirror attributes optional.
+ if v.get_child_by_name('volume-mirror-attributes'):
+ vol.mirror['mirrored'] =\
+ na_utils.to_bool(
+ v['volume-mirror-attributes'].get_child_content(
+ 'is-data-protection-mirror'))
+ else:
+ vol.mirror['mirrored'] = False
+ # qos attributes optional.
+ if v.get_child_by_name('volume-qos-attributes'):
+ vol.qos['qos_policy_group'] =\
+ v['volume-qos-attributes'].get_child_content(
+ 'policy-group-name')
+ else:
+ vol.qos['qos_policy_group'] = None
+ vols.add(vol)
+ except KeyError as e:
+ LOG.debug(_('Unexpected error while creating'
+ ' ssc vol list. Message - %s') % (e.message))
+ continue
+ return vols
+
+
+def query_aggr_options(na_server, aggr_name):
+ """Queries cluster aggr for attributes.
+
+ Currently queries for raid and ha-policy.
+ """
+
+ add_elems = {'aggregate': aggr_name}
+ result = na_utils.invoke_api(na_server,
+ api_name='aggr-options-list-info',
+ api_family='cm', query=None,
+ des_result=None,
+ additional_elems=add_elems,
+ is_iter=False)
+ attrs = {}
+ for res in result:
+ options = res.get_child_by_name('options')
+ if options:
+ op_list = options.get_children()
+ for op in op_list:
+ if op.get_child_content('name') == 'ha_policy':
+ attrs['ha_policy'] = op.get_child_content('value')
+ if op.get_child_content('name') == 'raidtype':
+ attrs['raid_type'] = op.get_child_content('value')
+ return attrs
+
+
+def get_sis_vol_dict(na_server, vserver, volume=None):
+ """Queries sis for volumes.
+
+ If volume is present sis is queried for it.
+ Records dedup and compression enabled.
+ """
+
+ sis_vols = {}
+ query_attr = {'vserver': vserver}
+ if volume:
+ vol_path = '/vol/%s' % (volume)
+ query_attr['path'] = vol_path
+ query = {'sis-status-info': query_attr}
+ result = na_utils.invoke_api(na_server,
+ api_name='sis-get-iter',
+ api_family='cm',
+ query=query,
+ is_iter=True)
+ for res in result:
+ attr_list = res.get_child_by_name('attributes-list')
+ if attr_list:
+ sis_status = attr_list.get_children()
+ for sis in sis_status:
+ path = sis.get_child_content('path')
+ if not path:
+ continue
+ (vol, __, ___) = path.rpartition('/')
+ if not vol:
+ continue
+ v_sis = {}
+ v_sis['compression'] = na_utils.to_bool(
+ sis.get_child_content('is-compression-enabled'))
+ v_sis['dedup'] = na_utils.to_bool(
+ sis.get_child_content('state'))
+ sis_vols[vol] = v_sis
+ return sis_vols
+
+
+def query_aggr_storage_disk(na_server, aggr):
+ """Queries for storage disks assosiated to an aggregate."""
+ query = {'storage-disk-info': {'disk-raid-info':
+ {'disk-aggregate-info':
+ {'aggregate-name': aggr}}}}
+ des_attr = {'storage-disk-info':
+ {'disk-raid-info': ['effective-disk-type']}}
+ result = na_utils.invoke_api(na_server,
+ api_name='storage-disk-get-iter',
+ api_family='cm', query=query,
+ des_result=des_attr,
+ additional_elems=None,
+ is_iter=True)
+ for res in result:
+ attr_list = res.get_child_by_name('attributes-list')
+ if attr_list:
+ storage_disks = attr_list.get_children()
+ for disk in storage_disks:
+ raid_info = disk.get_child_by_name('disk-raid-info')
+ if raid_info:
+ eff_disk_type =\
+ raid_info.get_child_content('effective-disk-type')
+ if eff_disk_type:
+ return eff_disk_type
+ else:
+ continue
+ return 'unknown'
+
+
+def get_cluster_ssc(na_server, vserver):
+ """Provides cluster volumes with ssc."""
+ netapp_volumes = get_cluster_vols_with_ssc(na_server, vserver)
+ mirror_vols = set()
+ dedup_vols = set()
+ compress_vols = set()
+ thin_prov_vols = set()
+ ssc_map = {'mirrored': mirror_vols, 'dedup': dedup_vols,
+ 'compression': compress_vols,
+ 'thin': thin_prov_vols, 'all': netapp_volumes}
+ for vol in netapp_volumes:
+ if vol.sis['dedup']:
+ dedup_vols.add(vol)
+ if vol.sis['compression']:
+ compress_vols.add(vol)
+ if vol.mirror['mirrored']:
+ mirror_vols.add(vol)
+ if vol.space['thin_provisioned']:
+ thin_prov_vols.add(vol)
+ return ssc_map
+
+
+def refresh_cluster_stale_ssc(*args, **kwargs):
+ """Refreshes stale ssc volumes with latest."""
+ backend = args[0]
+ na_server = args[1]
+ vserver = args[2]
+ identity = str(id(backend))
+ lock_pr = '%s_%s' % ('refresh_ssc', identity)
+ try:
+ job_set = na_utils.set_safe_attr(
+ backend, 'refresh_stale_running', True)
+ if not job_set:
+ return
+
+ @utils.synchronized(lock_pr)
+ def refresh_stale_ssc():
+ stale_vols = backend._update_stale_vols(reset=True)
+ LOG.info(_('Running stale ssc refresh job for %(server)s'
+ ' and vserver %(vs)s')
+ % {'server': na_server, 'vs': vserver})
+ # refreshing single volumes can create inconsistency
+ # hence doing manipulations on copy
+ ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
+ refresh_vols = set()
+ expired_vols = set()
+ for vol in stale_vols:
+ name = vol.id['name']
+ res = get_cluster_vols_with_ssc(na_server, vserver, name)
+ if res:
+ refresh_vols.add(res.pop())
+ else:
+ expired_vols.add(vol)
+ for vol in refresh_vols:
+ for k in ssc_vols_copy:
+ vol_set = ssc_vols_copy[k]
+ vol_set.discard(vol)
+ if k == "mirrored" and vol.mirror['mirrored']:
+ vol_set.add(vol)
+ if k == "dedup" and vol.sis['dedup']:
+ vol_set.add(vol)
+ if k == "compression" and vol.sis['compression']:
+ vol_set.add(vol)
+ if k == "thin" and vol.space['thin_provisioned']:
+ vol_set.add(vol)
+ if k == "all":
+ vol_set.add(vol)
+ for vol in expired_vols:
+ for k in ssc_vols_copy:
+ vol_set = ssc_vols_copy[k]
+ vol_set.discard(vol)
+ backend.refresh_ssc_vols(ssc_vols_copy)
+
+ refresh_stale_ssc()
+ finally:
+ na_utils.set_safe_attr(backend, 'refresh_stale_running', False)
+
+
+def get_cluster_latest_ssc(*args, **kwargs):
+ """Updates volumes including ssc."""
+ backend = args[0]
+ na_server = args[1]
+ vserver = args[2]
+ identity = str(id(backend))
+ lock_pr = '%s_%s' % ('refresh_ssc', identity)
+
+ # As this depends on stale job running state
+ # set flag as soon as job starts to avoid
+ # job accumulation.
+ try:
+ job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True)
+ if not job_set:
+ return
+
+ @utils.synchronized(lock_pr)
+ def get_latest_ssc():
+ LOG.info(_('Running cluster latest ssc job for %(server)s'
+ ' and vserver %(vs)s')
+ % {'server': na_server, 'vs': vserver})
+ ssc_vols = get_cluster_ssc(na_server, vserver)
+ backend.refresh_ssc_vols(ssc_vols)
+ backend.ssc_run_time = timeutils.utcnow()
+
+ get_latest_ssc()
+ finally:
+ na_utils.set_safe_attr(backend, 'ssc_job_running', False)
+
+
+def refresh_cluster_ssc(backend, na_server, vserver):
+ """Refresh cluster ssc for backend."""
+ if not isinstance(backend, driver.VolumeDriver):
+ raise exception.InvalidInput(reason=_("Backend not a VolumeDriver."))
+ if not isinstance(na_server, api.NaServer):
+ raise exception.InvalidInput(reason=_("Backend server not NaServer."))
+ delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
+ if getattr(backend, 'ssc_job_running', None):
+ LOG.warn(_('ssc job in progress. Returning... '))
+ return
+ elif (getattr(backend, 'ssc_run_time', None) is None or
+ (backend.ssc_run_time and
+ timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
+ t = Timer(0, get_cluster_latest_ssc,
+ args=[backend, na_server, vserver])
+ t.start()
+ elif getattr(backend, 'refresh_stale_running', None):
+ LOG.warn(_('refresh stale ssc job in progress. Returning... '))
+ return
+ else:
+ if backend.stale_vols:
+ t = Timer(0, refresh_cluster_stale_ssc,
+ args=[backend, na_server, vserver])
+ t.start()
+
+
+def get_volumes_for_specs(ssc_vols, specs):
+ """Shortlists volumes for extra specs provided."""
+ if specs is None or not isinstance(specs, dict):
+ return ssc_vols['all']
+ result = ssc_vols['all']
+ raid_type = specs.get('netapp:raid_type')
+ disk_type = specs.get('netapp:disk_type')
+ qos_policy_group = specs.get('netapp:qos_policy_group')
+ bool_specs_list = ['netapp_mirrored', 'netapp_unmirrored',
+ 'netapp_dedup', 'netapp_nodedup',
+ 'netapp_compression', 'netapp_nocompression',
+ 'netapp_thin_provisioned', 'netapp_thick_provisioned']
+ b_specs = {}
+ for spec in bool_specs_list:
+ b_specs[spec] = na_utils.to_bool(specs.get(spec))\
+ if specs.get(spec) else None
+
+ def _spec_ineffect(b_specs, spec, opp_spec):
+ """If the spec with opposite spec is ineffective."""
+ if ((b_specs[spec] is None and b_specs[opp_spec] is None)
+ or (b_specs[spec] == b_specs[opp_spec])):
+ return True
+ else:
+ return False
+
+ if _spec_ineffect(b_specs, 'netapp_mirrored', 'netapp_unmirrored'):
+ pass
+ else:
+ if b_specs['netapp_mirrored'] or b_specs['netapp_unmirrored'] is False:
+ result = result & ssc_vols['mirrored']
+ else:
+ result = result - ssc_vols['mirrored']
+ if _spec_ineffect(b_specs, 'netapp_dedup', 'netapp_nodedup'):
+ pass
+ else:
+ if b_specs['netapp_dedup'] or b_specs['netapp_nodedup'] is False:
+ result = result & ssc_vols['dedup']
+ else:
+ result = result - ssc_vols['dedup']
+ if _spec_ineffect(b_specs, 'netapp_compression', 'netapp_nocompression'):
+ pass
+ else:
+ if (b_specs['netapp_compression'] or
+ b_specs['netapp_nocompression'] is False):
+ result = result & ssc_vols['compression']
+ else:
+ result = result - ssc_vols['compression']
+ if _spec_ineffect(b_specs, 'netapp_thin_provisioned',
+ 'netapp_thick_provisioned'):
+ pass
+ else:
+ if (b_specs['netapp_thin_provisioned'] or
+ b_specs['netapp_thick_provisioned'] is False):
+ result = result & ssc_vols['thin']
+ else:
+ result = result - ssc_vols['thin']
+ if raid_type or disk_type or qos_policy_group:
+ tmp = copy.deepcopy(result)
+ for vol in tmp:
+ if raid_type:
+ vol_raid = vol.aggr['raid_type']
+ vol_raid = vol_raid.lower() if vol_raid else None
+ if raid_type.lower() != vol_raid:
+ result.discard(vol)
+ if disk_type:
+ vol_dtype = vol.aggr['disk_type']
+ vol_dtype = vol_dtype.lower() if vol_dtype else None
+ if disk_type.lower() != vol_dtype:
+ result.discard(vol)
+ if qos_policy_group:
+ vol_qos = vol.qos['qos_policy_group']
+ vol_qos = vol_qos.lower() if vol_qos else None
+ if qos_policy_group.lower() != vol_qos:
+ result.discard(vol)
+ return result
import copy
import socket
+from cinder import context
+from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
+from cinder import utils
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
+from cinder.volume.drivers.netapp.api import NaServer
+from cinder.volume import volume_types
+
LOG = logging.getLogger(__name__)
return
LOG.warn(_("It is not the recommended way to use drivers by NetApp. "
"Please use NetAppDriver to achieve the functionality."))
+
+
+def invoke_api(na_server, api_name, api_family='cm', query=None,
+ des_result=None, additional_elems=None,
+ is_iter=False, records=0, tag=None,
+ timeout=0, tunnel=None):
+ """Invokes any given api call to a NetApp server.
+
+ :param na_server: na_server instance
+ :param api_name: api name string
+ :param api_family: cm or 7m
+ :param query: api query as dict
+ :param des_result: desired result as dict
+ :param additional_elems: dict other than query and des_result
+ :param is_iter: is iterator api
+ :param records: limit for records, 0 for infinite
+ :param timeout: timeout seconds
+ :param tunnel: tunnel entity, vserver or vfiler name
+ """
+ record_step = 50
+ if not (na_server or isinstance(na_server, NaServer)):
+ msg = _("Requires an NaServer instance.")
+ raise exception.InvalidInput(data=msg)
+ server = copy.copy(na_server)
+ if api_family == 'cm':
+ server.set_vserver(tunnel)
+ else:
+ server.set_vfiler(tunnel)
+ if timeout > 0:
+ server.set_timeout(timeout)
+ iter_records = 0
+ cond = True
+ while cond:
+ na_element = create_api_request(
+ api_name, query, des_result, additional_elems,
+ is_iter, record_step, tag)
+ result = server.invoke_successfully(na_element, True)
+ if is_iter:
+ if records > 0:
+ iter_records = iter_records + record_step
+ if iter_records >= records:
+ cond = False
+ tag_el = result.get_child_by_name('next-tag')
+ tag = tag_el.get_content() if tag_el else None
+ if not tag:
+ cond = False
+ else:
+ cond = False
+ yield result
+
+
+def create_api_request(api_name, query=None, des_result=None,
+ additional_elems=None, is_iter=False,
+ record_step=50, tag=None):
+ """Creates a NetApp api request.
+
+ :param api_name: api name string
+ :param query: api query as dict
+ :param des_result: desired result as dict
+ :param additional_elems: dict other than query and des_result
+ :param is_iter: is iterator api
+ :param record_step: records at a time for iter api
+ :param tag: next tag for iter api
+ """
+ api_el = NaElement(api_name)
+ if query:
+ query_el = NaElement('query')
+ query_el.translate_struct(query)
+ api_el.add_child_elem(query_el)
+ if des_result:
+ res_el = NaElement('desired-attributes')
+ res_el.translate_struct(des_result)
+ api_el.add_child_elem(res_el)
+ if additional_elems:
+ api_el.translate_struct(additional_elems)
+ if is_iter:
+ api_el.add_new_child('max-records', str(record_step))
+ if tag:
+ api_el.add_new_child('tag', tag, True)
+ return api_el
+
+
+def to_bool(val):
+ """Converts true, yes, y, 1 to True, False otherwise."""
+ if val:
+ strg = str(val).lower()
+ if (strg == 'true' or strg == 'y'
+ or strg == 'yes' or strg == 'enabled'
+ or strg == '1'):
+ return True
+ else:
+ return False
+ else:
+ return False
+
+
+@utils.synchronized("safe_set_attr")
+def set_safe_attr(instance, attr, val):
+ """Sets the attribute in a thread safe manner.
+
+ Returns if new val was set on attribute.
+ If attr already had the value then False.
+ """
+
+ if not instance or not attr:
+ return False
+ old_val = getattr(instance, attr, None)
+ if val is None and old_val is None:
+ return False
+ elif val == old_val:
+ return False
+ else:
+ setattr(instance, attr, val)
+ return True
+
+
+def get_volume_extra_specs(volume):
+ """Provides extra specs associated with volume."""
+ ctxt = context.get_admin_context()
+ type_id = volume.get('volume_type_id')
+ specs = None
+ if type_id is not None:
+ volume_type = volume_types.get_volume_type(ctxt, type_id)
+ specs = volume_type.get('extra_specs')
+ return specs
def _find_share(self, volume_size_in_gib):
"""Choose NFS share among available ones for given volume size.
- First validation step: ratio of actual space (used_space / total_space)
- is less than 'nfs_used_ratio'.
-
- Second validation step: apparent space allocated (differs from actual
- space used when using sparse files) and compares the apparent available
- space (total_available * nfs_oversub_ratio) to ensure enough space is
- available for the new volume.
-
For instances with more than one share that meets the criteria, the
share with the least "allocated" space will be selected.
target_share = None
target_share_reserved = 0
- used_ratio = self.configuration.nfs_used_ratio
- oversub_ratio = self.configuration.nfs_oversub_ratio
-
- requested_volume_size = volume_size_in_gib * units.GiB
-
for nfs_share in self._mounted_shares:
+ if not self._is_share_eligible(nfs_share, volume_size_in_gib):
+ continue
total_size, total_available, total_allocated = \
self._get_capacity_info(nfs_share)
- apparent_size = max(0, total_size * oversub_ratio)
- apparent_available = max(0, apparent_size - total_allocated)
- used = (total_size - total_available) / total_size
- if used > used_ratio:
- # NOTE(morganfainberg): We check the used_ratio first since
- # with oversubscription it is possible to not have the actual
- # available space but be within our oversubscription limit
- # therefore allowing this share to still be selected as a valid
- # target.
- LOG.debug(_('%s is above nfs_used_ratio'), nfs_share)
- continue
- if apparent_available <= requested_volume_size:
- LOG.debug(_('%s is above nfs_oversub_ratio'), nfs_share)
- continue
- if total_allocated / total_size >= oversub_ratio:
- LOG.debug(_('%s reserved space is above nfs_oversub_ratio'),
- nfs_share)
- continue
-
if target_share is not None:
if target_share_reserved > total_allocated:
target_share = nfs_share
return target_share
+ def _is_share_eligible(self, nfs_share, volume_size_in_gib):
+ """Verifies NFS share is eligible to host volume with given size.
+
+ First validation step: ratio of actual space (used_space / total_space)
+ is less than 'nfs_used_ratio'. Second validation step: apparent space
+ allocated (differs from actual space used when using sparse files)
+ and compares the apparent available
+ space (total_available * nfs_oversub_ratio) to ensure enough space is
+ available for the new volume.
+
+ :param nfs_share: nfs share
+ :param volume_size_in_gib: int size in GB
+ """
+
+ used_ratio = self.configuration.nfs_used_ratio
+ oversub_ratio = self.configuration.nfs_oversub_ratio
+ requested_volume_size = volume_size_in_gib * units.GiB
+
+ total_size, total_available, total_allocated = \
+ self._get_capacity_info(nfs_share)
+ apparent_size = max(0, total_size * oversub_ratio)
+ apparent_available = max(0, apparent_size - total_allocated)
+ used = (total_size - total_available) / total_size
+ if used > used_ratio:
+ # NOTE(morganfainberg): We check the used_ratio first since
+ # with oversubscription it is possible to not have the actual
+ # available space but be within our oversubscription limit
+ # therefore allowing this share to still be selected as a valid
+ # target.
+ LOG.debug(_('%s is above nfs_used_ratio'), nfs_share)
+ return False
+ if apparent_available <= requested_volume_size:
+ LOG.debug(_('%s is above nfs_oversub_ratio'), nfs_share)
+ return False
+ if total_allocated / total_size >= oversub_ratio:
+ LOG.debug(_('%s reserved space is above nfs_oversub_ratio'),
+ nfs_share)
+ return False
+ return True
+
def _get_mount_point_for_share(self, nfs_share):
"""Needed by parent class."""
return self._remotefsclient.get_mount_point(nfs_share)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
+
:param nfs_share: example 172.18.194.100:/var/nfs
"""
+
mount_point = self._get_mount_point_for_share(nfs_share)
df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point,
#netapp_password=<None>
# Cluster vserver to use for provisioning (string value)
-#netapp_vserver=openstack
+#netapp_vserver=<None>
# Host name for the storage controller (string value)
#netapp_server_hostname=<None>