from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.hds import iscsi
+from cinder.volume import volume_types
+
LOG = logging.getLogger(__name__)
HNASCONF = """<?xml version="1.0" encoding="UTF-8" ?>
# The following information is passed on to tests, when creating a volume
_VOLUME = {'name': 'testvol', 'volume_id': '1234567890', 'size': 128,
- 'volume_type': None, 'provider_location': None, 'id': 'abcdefg'}
+ 'volume_type': 'silver', 'volume_type_id': '1',
+ 'provider_location': None, 'id': 'abcdefg',
+ 'host': 'host1@hnas-iscsi-backend#silver'}
class SimulatedHnasBackend(object):
"CTL: 1 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up"
return self.out
- def get_hdp_info(self, cmd, ip0, user, pw):
+ def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None):
self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: " \
"70 Normal fs1\n" \
"HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 Normal fs2"
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats["vendor_name"], "HDS")
self.assertEqual(stats["storage_protocol"], "iSCSI")
- self.assertTrue(stats["total_capacity_gb"] > 0)
+ self.assertEqual(len(stats['pools']), 2)
def test_delete_volume(self):
vol = self._create_volume()
self.assertNotEqual(num_conn_before, num_conn_after)
# cleanup
self.backend.deleteVolumebyProvider(vol['provider_location'])
+
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs',
+ return_value={'key': 'type', 'service_label': 'silver'})
+ def test_get_pool(self, m_ext_spec):
+ label = self.driver.get_pool(_VOLUME)
+ self.assertEqual('silver', label)
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.hds import nfs
+from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
"""
# The following information is passed on to tests, when creating a volume
+_SERVICE = ('Test_hdp', 'Test_path', 'Test_label')
_SHARE = '172.17.39.132:/cinder'
+_SHARE2 = '172.17.39.133:/cinder'
_EXPORT = '/cinder'
_VOLUME = {'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190',
- 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', 'size': 128,
- 'volume_type': None, 'provider_location': None,
- 'id': 'bcc48c61-9691-4e5f-897c-793686093190'}
+ 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190',
+ 'size': 128,
+ 'volume_type': 'silver',
+ 'volume_type_id': 'test',
+ 'metadata': [{'key': 'type',
+ 'service_label': 'silver'}],
+ 'provider_location': None,
+ 'id': 'bcc48c61-9691-4e5f-897c-793686093190',
+ 'status': 'available',
+ 'host': 'host1@hnas-iscsi-backend#silver'}
_SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
- 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'size': 128,
- 'volume_type': None, 'provider_location': None,
+ 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc',
+ 'size': 128,
+ 'volume_type': None,
+ 'provider_location': None,
'volume_size': 128,
'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190',
- 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190'}
+ 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191',
+ 'host': 'host1@hnas-iscsi-backend#silver'}
+
+GET_ID_VOL = {
+ ("bcc48c61-9691-4e5f-897c-793686093190"): [_VOLUME],
+ ("bcc48c61-9691-4e5f-897c-793686093191"): [_SNAPVOLUME]
+}
+
+
+def id_to_vol(arg):
+ return GET_ID_VOL.get(arg)
class SimulatedHnasBackend(object):
@mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
def test_create_snapshot(self, m_get_volume_location, m_get_export_path,
m_get_provider_location, m_id_to_vol):
- vol = _VOLUME.copy()
svol = _SNAPVOLUME.copy()
+ m_id_to_vol.return_value = svol
- m_id_to_vol(vol['volume_id']).return_value = vol
- m_id_to_vol(svol['id']).return_value = svol
-
- m_get_provider_location(vol['volume_id']).return_value = _SHARE
- m_get_provider_location(svol['id']).return_value = _SHARE
-
- m_get_volume_location(svol['volume_id']).return_value = _SHARE
- m_get_export_path(svol['volume_id']).return_value = _EXPORT
+ m_get_provider_location.return_value = _SHARE
+ m_get_volume_location.return_value = _SHARE
+ m_get_export_path.return_value = _EXPORT
loc = self.driver.create_snapshot(svol)
- self.assertNotEqual(loc, None)
+ out = "{'provider_location': \'" + _SHARE + "'}"
+ self.assertEqual(str(loc), out)
- @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
+ @mock.patch.object(nfs.HDSNFSDriver, '_get_service')
+ @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
@mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
def test_create_cloned_volume(self, m_get_volume_location,
- m_get_provider_location, m_id_to_vol):
+ m_get_provider_location, m_id_to_vol,
+ m_get_service):
vol = _VOLUME.copy()
svol = _SNAPVOLUME.copy()
- m_id_to_vol(vol['id']).return_value = vol
- m_id_to_vol(svol['id']).return_value = svol
+ m_get_service.return_value = _SERVICE
+ m_get_provider_location.return_value = _SHARE
+ m_get_volume_location.return_value = _SHARE
- m_get_provider_location(vol['id']).return_value = _SHARE
- m_get_provider_location(svol['id']).return_value = _SHARE
+ loc = self.driver.create_cloned_volume(vol, svol)
- m_get_volume_location(svol['id']).return_value = _SHARE
+ out = "{'provider_location': \'" + _SHARE + "'}"
+ self.assertEqual(str(loc), out)
- loc = self.driver.create_cloned_volume(vol, svol)
- self.assertNotEqual(loc, None)
+ @mock.patch.object(nfs.HDSNFSDriver, '_ensure_shares_mounted')
+ @mock.patch.object(nfs.HDSNFSDriver, '_do_create_volume')
+ @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
+ @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
+ @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
+ def test_create_volume(self, m_get_volume_location,
+ m_get_provider_location, m_id_to_vol,
+ m_do_create_volume, m_ensure_shares_mounted):
+
+ vol = _VOLUME.copy()
+
+ m_get_provider_location.return_value = _SHARE2
+ m_get_volume_location.return_value = _SHARE2
+
+ loc = self.driver.create_volume(vol)
+
+ out = "{'provider_location': \'" + _SHARE2 + "'}"
+ self.assertEqual(str(loc), out)
@mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
m_get_provider_location, m_id_to_vol):
svol = _SNAPVOLUME.copy()
- m_id_to_vol(svol['volume_id']).return_value = svol
- m_get_provider_location(svol['volume_id']).return_value = _SHARE
+ m_id_to_vol.return_value = svol
+ m_get_provider_location.return_value = _SHARE
m_volume_not_present.return_value = True
self.driver.delete_snapshot(svol)
self.assertEqual(svol['provider_location'], None)
- @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
+ @mock.patch.object(nfs.HDSNFSDriver, '_get_service')
+ @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
@mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
@mock.patch.object(nfs.HDSNFSDriver, '_get_export_path')
@mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
def test_create_volume_from_snapshot(self, m_get_volume_location,
m_get_export_path,
- m_get_provider_location, m_id_to_vol):
+ m_get_provider_location, m_id_to_vol,
+ m_get_service):
vol = _VOLUME.copy()
svol = _SNAPVOLUME.copy()
- m_id_to_vol(svol['volume_id']).return_value = vol
- m_id_to_vol(svol['id']).return_value = svol
+ m_get_service.return_value = _SERVICE
+ m_get_provider_location.return_value = _SHARE
+ m_get_export_path.return_value = _EXPORT
+ m_get_volume_location.return_value = _SHARE
- m_get_provider_location(svol['id']).return_value = _SHARE
- m_get_export_path(svol['volume_id']).return_value = _EXPORT
- m_get_volume_location(svol['volume_id']).return_value = _SHARE
+ loc = self.driver.create_volume_from_snapshot(vol, svol)
+ out = "{'provider_location': \'" + _SHARE + "'}"
+ self.assertEqual(str(loc), out)
+
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs',
+ return_value={'key': 'type', 'service_label': 'silver'})
+ def test_get_pool(self, m_ext_spec):
+ vol = _VOLUME.copy()
- loc = self.driver.create_volume_from_snapshot(_VOLUME, svol)
- self.assertNotEqual(loc, None)
+ self.assertEqual(self.driver.get_pool(vol), 'silver')
{'out': out, 'err': err})
return newout
- def get_hdp_info(self, cmd, ip0, user, pw):
+ def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None):
"""Gets the list of filesystems and fsids.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
+ :param fslabel: filesystem label we want to get info
:returns: formated string with filesystems and fsids
"""
- out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-a',
- check_exit_code=True)
+ if fslabel is None:
+ out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-a',
+ check_exit_code=True)
+ else:
+ out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-f', fslabel,
+ check_exit_code=True)
+
lines = out.split('\n')
single_evs = True
+ LOG.debug("Parsing output: %s", lines)
+
newout = ""
for line in lines:
if 'Not mounted' in line:
if 'not' not in line and 'EVS' in line:
single_evs = False
if 'GB' in line or 'TB' in line:
+ LOG.debug("Parsing output: %s", line)
inf = line.split()
if not single_evs:
from cinder.volume import driver
from cinder.volume.drivers.hds import hnas_backend
from cinder.volume import utils
+from cinder.volume import volume_types
-HDS_HNAS_ISCSI_VERSION = '2.2.0'
+HDS_HNAS_ISCSI_VERSION = '3.0.0'
LOG = logging.getLogger(__name__)
:param volume: dictionary volume reference
"""
- label = None
- if volume['volume_type']:
- label = volume['volume_type']['name']
-
- label = label or 'default'
- if label not in self.config['services'].keys():
- # default works if no match is found
- label = 'default'
- LOG.info(_LI("Using default: instead of %s"), label)
- LOG.info(_LI("Available services: %s"),
- self.config['services'].keys())
+ label = utils.extract_host(volume['host'], level='pool')
+ LOG.info(_LI("Using service label: %s"), label)
if label in self.config['services'].keys():
svc = self.config['services'][label]
def _get_stats(self):
"""Get HDP stats from HNAS."""
- total_cap = 0
- total_used = 0
- out = self.bend.get_hdp_info(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'])
-
- for line in out.split('\n'):
- if 'HDP' in line:
- (hdp, size, _ign, used) = line.split()[1:5] # in MB
- LOG.debug("stats: looking for: %s", hdp)
- if int(hdp) >= units.Ki: # HNAS fsid
- hdp = line.split()[11]
- if hdp in self.config['hdp'].keys():
- total_cap += int(size)
- total_used += int(used)
-
- LOG.info(_LI("stats: total: %(cap)d used: %(used)d"),
- {'cap': total_cap, 'used': total_used})
-
hnas_stat = {}
- hnas_stat['total_capacity_gb'] = int(total_cap / units.Ki) # in GB
- hnas_stat['free_capacity_gb'] = \
- int((total_cap - total_used) / units.Ki)
be_name = self.configuration.safe_get('volume_backend_name')
hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver'
hnas_stat["vendor_name"] = 'HDS'
hnas_stat["driver_version"] = HDS_HNAS_ISCSI_VERSION
hnas_stat["storage_protocol"] = 'iSCSI'
- hnas_stat['QoS_support'] = False
hnas_stat['reserved_percentage'] = 0
+ for pool in self.pools:
+ out = self.bend.get_hdp_info(self.config['hnas_cmd'],
+ self.config['mgmt_ip0'],
+ self.config['username'],
+ self.config['password'],
+ pool['hdp'])
+
+ LOG.debug('Query for pool %s: %s', pool['pool_name'], out)
+
+ (hdp, size, _ign, used) = out.split()[1:5] # in MB
+ pool['total_capacity_gb'] = int(size) / units.Ki
+ pool['free_capacity_gb'] = (int(size) - int(used)) / units.Ki
+ pool['allocated_capacity_gb'] = int(used) / units.Ki
+ pool['QoS_support'] = 'False'
+ pool['reserved_percentage'] = 0
+
+ hnas_stat['pools'] = self.pools
+
LOG.info(_LI("stats: stats: %s"), hnas_stat)
return hnas_stat
(self.arid, self.hnas_name, self.lumax) = self._array_info_get()
self._check_hdp_list()
+ service_list = self.config['services'].keys()
+ for svc in service_list:
+ svc = self.config['services'][svc]
+ pool = {}
+ pool['pool_name'] = svc['volume_type']
+ pool['service_label'] = svc['volume_type']
+ pool['hdp'] = svc['hdp']
+
+ self.pools.append(pool)
+
+ LOG.info(_LI("Configured pools: %s"), self.pools)
+
iscsi_info = self._get_iscsi_info()
LOG.info(_LI("do_setup: %s"), iscsi_info)
for svc in self.config['services'].keys():
self.driver_stats = self._get_stats()
return self.driver_stats
+
+ def get_pool(self, volume):
+
+ if not volume['volume_type']:
+ return 'default'
+ else:
+ metadata = {}
+ type_id = volume['volume_type_id']
+ if type_id is not None:
+ metadata = volume_types.get_volume_type_extra_specs(type_id)
+ if not metadata.get('service_label'):
+ return 'default'
+ else:
+ if metadata['service_label'] not in \
+ self.config['services'].keys():
+ return 'default'
+ else:
+ pass
+ return metadata['service_label']
from cinder.openstack.common import log as logging
from cinder.volume.drivers.hds import hnas_backend
from cinder.volume.drivers import nfs
+from cinder.volume import utils
+from cinder.volume import volume_types
-HDS_HNAS_NFS_VERSION = '2.2.0'
+HDS_HNAS_NFS_VERSION = '3.0.0'
LOG = logging.getLogger(__name__)
:param volume: dictionary volume reference
"""
- label = None
- if volume['volume_type']:
- label = volume['volume_type']['name']
- label = label or 'default'
- if label not in self.config['services'].keys():
- # default works if no match is found
- label = 'default'
+ LOG.debug("_get_service: volume: %s", volume)
+ label = utils.extract_host(volume['host'], level='pool')
+
if label in self.config['services'].keys():
svc = self.config['services'][label]
LOG.info(_LI("Get service: %(lbl)s->%(svc)s"),
"""
_stats = super(HDSNFSDriver, self).get_volume_stats(refresh)
- be_name = self.configuration.safe_get('volume_backend_name')
- _stats["volume_backend_name"] = be_name or 'HDSNFSDriver'
_stats["vendor_name"] = 'HDS'
_stats["driver_version"] = HDS_HNAS_NFS_VERSION
_stats["storage_protocol"] = 'NFS'
+ for pool in self.pools:
+ capacity, free, used = self._get_capacity_info(pool['hdp'])
+ pool['total_capacity_gb'] = capacity / float(units.Gi)
+ pool['free_capacity_gb'] = free / float(units.Gi)
+ pool['allocated_capacity_gb'] = used / float(units.Gi)
+ pool['QoS_support'] = 'False'
+ pool['reserved_percentage'] = 0
+
+ _stats['pools'] = self.pools
+
+ LOG.info(_LI('Driver stats: %s'), _stats)
+
return _stats
def _get_nfs_info(self):
nfs_info = self._get_nfs_info()
+ LOG.debug("nfs_info: %s", nfs_info)
+
for share in self.shares:
if share in nfs_info.keys():
LOG.info(_LI("share: %(share)s -> %(info)s"),
else:
LOG.info(_LI("share: %s incorrect entry"), share)
+ LOG.debug("self.config['services'] = %s", self.config['services'])
+
+ service_list = self.config['services'].keys()
+ for svc in service_list:
+ svc = self.config['services'][svc]
+ pool = {}
+ pool['pool_name'] = svc['volume_type']
+ pool['service_label'] = svc['volume_type']
+ pool['hdp'] = svc['hdp']
+
+ self.pools.append(pool)
+
+ LOG.info(_LI("Configured pools: %s"), self.pools)
+
def _clone_volume(self, volume_name, clone_name, volume_id):
"""Clones mounted volume using the HNAS file_clone.
_fslabel, source_path, target_path)
return out
+
+ def get_pool(self, volume):
+ if not volume['volume_type']:
+ return 'default'
+ else:
+ metadata = {}
+ type_id = volume['volume_type_id']
+ if type_id is not None:
+ metadata = volume_types.get_volume_type_extra_specs(type_id)
+ if not metadata.get('service_label'):
+ return 'default'
+ else:
+ if metadata['service_label'] not in \
+ self.config['services'].keys():
+ return 'default'
+ else:
+ return metadata['service_label']
+
+ def create_volume(self, volume):
+ """Creates a volume.
+
+ :param volume: volume reference
+ """
+ self._ensure_shares_mounted()
+
+ (_hdp, _path, _fslabel) = self._get_service(volume)
+
+ volume['provider_location'] = _hdp
+
+ LOG.info(_LI("Volume service: %(label)s. Casted to: %(loc)s"),
+ {'label': _fslabel, 'loc': volume['provider_location']})
+
+ self._do_create_volume(volume)
+
+ return {'provider_location': volume['provider_location']}