from cinder import flags
from cinder.openstack.common import importutils
from cinder import test
+from cinder.volume.drivers.solidfire import SolidFire
FLAGS = flags.FLAGS
class VolumeDriverCompatibility(test.TestCase):
"""Test backwards compatibility for volume drivers."""
+ def fake_update_cluster_status(self):
+ return
+
def setUp(self):
super(VolumeDriverCompatibility, self).setUp()
self.manager = importutils.import_object(FLAGS.volume_manager)
super(VolumeDriverCompatibility, self).tearDown()
def _load_driver(self, driver):
+ if 'SolidFire' in driver:
+ # SolidFire driver does update_cluster stat on init
+ self.stubs.Set(SolidFire, '_update_cluster_status',
+ self.fake_update_cluster_status)
self.manager.__init__(volume_driver=driver)
def _driver_module_name(self):
class SolidFireVolumeTestCase(test.TestCase):
def setUp(self):
super(SolidFireVolumeTestCase, self).setUp()
+ self.stubs.Set(SolidFire, '_issue_api_request',
+ self.fake_issue_api_request)
def fake_issue_api_request(obj, method, params):
+ if method is 'GetClusterCapacity':
+ LOG.info('Called Fake GetClusterCapacity...')
+ data = {}
+ data = {'result':
+ {'clusterCapacity': {'maxProvisionedSpace': 99999999,
+ 'usedSpace': 999,
+ 'compressionPercent': 100,
+ 'deDuplicationPercent': 100,
+ 'thinProvisioningPercent': 100}}}
+ return data
+
if method is 'GetClusterInfo':
LOG.info('Called Fake GetClusterInfo...')
results = {'result': {'clusterInfo':
'enable512e': True,
'access': "readWrite",
'status': "active",
- 'attributes': None,
+ 'attributes':None,
'qos': None,
'iqn': test_name}]}}
return result
def fake_volume_get(obj, key, default=None):
return {'qos': 'fast'}
+ def fake_update_cluster_status(self):
+ return
+
def test_create_with_qos_type(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
'volume_type_id': 'fast'}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
+ self.assertNotEqual(model_update, None)
def test_create_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
'volume_type_id': None}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
+ self.assertNotEqual(model_update, None)
def test_create_volume_with_qos(self):
preset_qos = {}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
+ self.assertNotEqual(model_update, None)
def test_create_volume_fails(self):
+ # NOTE(JDG) This test just fakes update_cluster_status
+ # this is inentional for this test
+ self.stubs.Set(SolidFire, '_update_cluster_status',
+ self.fake_update_cluster_status)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
- model_update = sfv.delete_volume(testvol)
+ sfv.delete_volume(testvol)
def test_delete_volume_fails_no_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
try:
- model_update = sfv.delete_volume(testvol)
+ sfv.delete_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_delete_volume_fails_account_lookup(self):
+ # NOTE(JDG) This test just fakes update_cluster_status
+ # this is inentional for this test
+ self.stubs.Set(SolidFire, '_update_cluster_status',
+ self.fake_update_cluster_status)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
sfv._get_cluster_info()
def test_get_cluster_info_fail(self):
+ # NOTE(JDG) This test just fakes update_cluster_status
+ # this is inentional for this test
+ self.stubs.Set(SolidFire, '_update_cluster_status',
+ self.fake_update_cluster_status)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
sfv = SolidFire()
from cinder.volume.drivers.san.san import SanISCSIDriver
from cinder.volume import volume_types
-VERSION = 1.1
+VERSION = 1.2
LOG = logging.getLogger(__name__)
sf_opts = [
'off': None}
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
+ cluster_stats = {}
GB = math.pow(10, 9)
def __init__(self, *args, **kwargs):
super(SolidFire, self).__init__(*args, **kwargs)
+ self._update_cluster_status()
def _issue_api_request(self, method_name, params):
"""All API requests to SolidFire device go through this method.
'is_clone': 'True',
'src_uuid': 'src_uuid'}
+ if qos:
+ attributes['qos'] = qos
+
params = {'volumeID': int(sf_vol['volumeID']),
'name': 'UUID-%s' % v_ref['id'],
'attributes': attributes,
qos[i.key] = int(i.value)
return qos
- def _set_qos_by_volume_type(self, ctxt, type_id):
+ def _set_qos_by_volume_type(self, type_id, ctxt):
qos = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
attributes = {'uuid': volume['id'],
'is_clone': 'False'}
+ if qos:
+ attributes['qos'] = qos
params = {'name': 'UUID-%s' % volume['id'],
'accountID': None,
volume)
return model
+
+ def get_volume_stats(self, refresh=False):
+ """Get volume status.
+
+ If 'refresh' is True, run update first.
+ The name is a bit misleading as
+ the majority of the data here is cluster
+ data
+ """
+ if refresh:
+ self._update_cluster_status()
+
+ return self.cluster_stats
+
+ def _update_cluster_status(self):
+ """Retrieve status info for the Cluster."""
+
+ LOG.debug(_("Updating cluster status info"))
+
+ params = {}
+
+ # NOTE(jdg): The SF api provides an UNBELIEVABLE amount
+ # of stats data, this is just one of the calls
+ results = self._issue_api_request('GetClusterCapacity', params)
+ if 'result' not in results:
+ LOG.error(_('Failed to get updated stats'))
+
+ results = results['result']['clusterCapacity']
+ free_capacity =\
+ results['maxProvisionedSpace'] - results['usedSpace']
+
+ data = {}
+ data["volume_backend_name"] = self.__class__.__name__
+ data["vendor_name"] = 'SolidFire Inc'
+ data["driver_version"] = '1.2'
+ data["storage_protocol"] = 'iSCSI'
+
+ data['total_capacity_gb'] = results['maxProvisionedSpace']
+ data['free_capacity_gb'] = free_capacity
+ data['reserved_percentage'] = FLAGS.reserved_percentage
+ data['QoS_support'] = True
+ data['compression_percent'] =\
+ results['compressionPercent']
+ data['deduplicaton_percent'] =\
+ results['deDuplicationPercent']
+ data['thin_provision_percent'] =\
+ results['thinProvisioningPercent']
+ self.cluster_stats = data