]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Adds pool aware scheduling for HNAS drivers
authorErlon R. Cruz <erlon.cruz@fit-tecnologia.org.br>
Mon, 12 Jan 2015 18:59:10 +0000 (16:59 -0200)
committerErlon R. Cruz <erlon.cruz@fit-tecnologia.org.br>
Mon, 9 Mar 2015 20:16:47 +0000 (17:16 -0300)
This commit adds the pool aware feature for HNAS drivers. Now, each service
label in HNAS XML config will be considered as a pool unit. In order to be able
to cast a volume to a service, the admin must create 'volume_type's and each
volume type need to have the 'service_label' with the same tag set in the XML
config, and then the user can create a volume with the desired volume_type.

Implements:  blueprint hds-hnas-pool-aware-sched
Change-Id:  Iddff7ad606b06454458fc53bfc7b812f9815f0e1

cinder/tests/test_hds_iscsi.py
cinder/tests/test_hds_nfs.py
cinder/volume/drivers/hds/hnas_backend.py
cinder/volume/drivers/hds/iscsi.py
cinder/volume/drivers/hds/nfs.py

index 8acdb43febd4020e87e48ba2fe8f040e680f3a1a..231df11c1ae7bf5425fe1e9f56e7b82667e8011e 100644 (file)
@@ -29,6 +29,8 @@ from cinder.openstack.common import log as logging
 from cinder import test
 from cinder.volume import configuration as conf
 from cinder.volume.drivers.hds import iscsi
+from cinder.volume import volume_types
+
 LOG = logging.getLogger(__name__)
 
 HNASCONF = """<?xml version="1.0" encoding="UTF-8" ?>
@@ -80,7 +82,9 @@ HNAS_WRONG_CONF2 = """<?xml version="1.0" encoding="UTF-8" ?>
 
 # The following information is passed on to tests, when creating a volume
 _VOLUME = {'name': 'testvol', 'volume_id': '1234567890', 'size': 128,
-           'volume_type': None, 'provider_location': None, 'id': 'abcdefg'}
+           'volume_type': 'silver', 'volume_type_id': '1',
+           'provider_location': None, 'id': 'abcdefg',
+           'host': 'host1@hnas-iscsi-backend#silver'}
 
 
 class SimulatedHnasBackend(object):
@@ -269,7 +273,7 @@ class SimulatedHnasBackend(object):
             "CTL: 1 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up"
         return self.out
 
-    def get_hdp_info(self, cmd, ip0, user, pw):
+    def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None):
         self.out = "HDP: 1024  272384 MB    33792 MB  12 %  LUs:  " \
             "70  Normal  fs1\n" \
             "HDP: 1025  546816 MB    73728 MB  13 %  LUs:  194  Normal  fs2"
@@ -351,7 +355,7 @@ class HNASiSCSIDriverTest(test.TestCase):
         stats = self.driver.get_volume_stats(True)
         self.assertEqual(stats["vendor_name"], "HDS")
         self.assertEqual(stats["storage_protocol"], "iSCSI")
-        self.assertTrue(stats["total_capacity_gb"] > 0)
+        self.assertEqual(len(stats['pools']), 2)
 
     def test_delete_volume(self):
         vol = self._create_volume()
@@ -443,3 +447,9 @@ class HNASiSCSIDriverTest(test.TestCase):
         self.assertNotEqual(num_conn_before, num_conn_after)
         # cleanup
         self.backend.deleteVolumebyProvider(vol['provider_location'])
+
+    @mock.patch.object(volume_types, 'get_volume_type_extra_specs',
+                       return_value={'key': 'type', 'service_label': 'silver'})
+    def test_get_pool(self, m_ext_spec):
+        label = self.driver.get_pool(_VOLUME)
+        self.assertEqual('silver', label)
index e69ff0f2c723a1fc46bdfdfea0321c121c9ab6f1..d0d779252dfa29eb3a01baf9b10a92080b31373f 100644 (file)
@@ -1,7 +1,7 @@
 # Copyright (c) 2014 Hitachi Data Systems, Inc.
 # All Rights Reserved.
 #
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
 #
@@ -25,6 +25,7 @@ from cinder.openstack.common import log as logging
 from cinder import test
 from cinder.volume import configuration as conf
 from cinder.volume.drivers.hds import nfs
+from cinder.volume import volume_types
 
 LOG = logging.getLogger(__name__)
 
@@ -76,18 +77,39 @@ HNAS_WRONG_CONF2 = """<?xml version="1.0" encoding="UTF-8" ?>
 """
 
 # The following information is passed on to tests, when creating a volume
+_SERVICE = ('Test_hdp', 'Test_path', 'Test_label')
 _SHARE = '172.17.39.132:/cinder'
+_SHARE2 = '172.17.39.133:/cinder'
 _EXPORT = '/cinder'
 _VOLUME = {'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190',
-           'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', 'size': 128,
-           'volume_type': None, 'provider_location': None,
-           'id': 'bcc48c61-9691-4e5f-897c-793686093190'}
+           'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190',
+           'size': 128,
+           'volume_type': 'silver',
+           'volume_type_id': 'test',
+           'metadata': [{'key': 'type',
+                         'service_label': 'silver'}],
+           'provider_location': None,
+           'id': 'bcc48c61-9691-4e5f-897c-793686093190',
+           'status': 'available',
+           'host': 'host1@hnas-iscsi-backend#silver'}
 _SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
-               'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'size': 128,
-               'volume_type': None, 'provider_location': None,
+               'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc',
+               'size': 128,
+               'volume_type': None,
+               'provider_location': None,
                'volume_size': 128,
                'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190',
-               'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190'}
+               'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191',
+               'host': 'host1@hnas-iscsi-backend#silver'}
+
+GET_ID_VOL = {
+    ("bcc48c61-9691-4e5f-897c-793686093190"): [_VOLUME],
+    ("bcc48c61-9691-4e5f-897c-793686093191"): [_SNAPVOLUME]
+}
+
+
+def id_to_vol(arg):
+    return GET_ID_VOL.get(arg)
 
 
 class SimulatedHnasBackend(object):
@@ -190,39 +212,54 @@ class HDSNFSDriverTest(test.TestCase):
     @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
     def test_create_snapshot(self, m_get_volume_location, m_get_export_path,
                              m_get_provider_location, m_id_to_vol):
-        vol = _VOLUME.copy()
         svol = _SNAPVOLUME.copy()
+        m_id_to_vol.return_value = svol
 
-        m_id_to_vol(vol['volume_id']).return_value = vol
-        m_id_to_vol(svol['id']).return_value = svol
-
-        m_get_provider_location(vol['volume_id']).return_value = _SHARE
-        m_get_provider_location(svol['id']).return_value = _SHARE
-
-        m_get_volume_location(svol['volume_id']).return_value = _SHARE
-        m_get_export_path(svol['volume_id']).return_value = _EXPORT
+        m_get_provider_location.return_value = _SHARE
+        m_get_volume_location.return_value = _SHARE
+        m_get_export_path.return_value = _EXPORT
 
         loc = self.driver.create_snapshot(svol)
-        self.assertNotEqual(loc, None)
+        out = "{'provider_location': \'" + _SHARE + "'}"
+        self.assertEqual(str(loc), out)
 
-    @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
+    @mock.patch.object(nfs.HDSNFSDriver, '_get_service')
+    @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
     @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
     @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
     def test_create_cloned_volume(self, m_get_volume_location,
-                                  m_get_provider_location, m_id_to_vol):
+                                  m_get_provider_location, m_id_to_vol,
+                                  m_get_service):
         vol = _VOLUME.copy()
         svol = _SNAPVOLUME.copy()
 
-        m_id_to_vol(vol['id']).return_value = vol
-        m_id_to_vol(svol['id']).return_value = svol
+        m_get_service.return_value = _SERVICE
+        m_get_provider_location.return_value = _SHARE
+        m_get_volume_location.return_value = _SHARE
 
-        m_get_provider_location(vol['id']).return_value = _SHARE
-        m_get_provider_location(svol['id']).return_value = _SHARE
+        loc = self.driver.create_cloned_volume(vol, svol)
 
-        m_get_volume_location(svol['id']).return_value = _SHARE
+        out = "{'provider_location': \'" + _SHARE + "'}"
+        self.assertEqual(str(loc), out)
 
-        loc = self.driver.create_cloned_volume(vol, svol)
-        self.assertNotEqual(loc, None)
+    @mock.patch.object(nfs.HDSNFSDriver, '_ensure_shares_mounted')
+    @mock.patch.object(nfs.HDSNFSDriver, '_do_create_volume')
+    @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
+    @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
+    @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
+    def test_create_volume(self, m_get_volume_location,
+                           m_get_provider_location, m_id_to_vol,
+                           m_do_create_volume, m_ensure_shares_mounted):
+
+        vol = _VOLUME.copy()
+
+        m_get_provider_location.return_value = _SHARE2
+        m_get_volume_location.return_value = _SHARE2
+
+        loc = self.driver.create_volume(vol)
+
+        out = "{'provider_location': \'" + _SHARE2 + "'}"
+        self.assertEqual(str(loc), out)
 
     @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
     @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
@@ -231,30 +268,38 @@ class HDSNFSDriverTest(test.TestCase):
                              m_get_provider_location, m_id_to_vol):
         svol = _SNAPVOLUME.copy()
 
-        m_id_to_vol(svol['volume_id']).return_value = svol
-        m_get_provider_location(svol['volume_id']).return_value = _SHARE
+        m_id_to_vol.return_value = svol
+        m_get_provider_location.return_value = _SHARE
 
         m_volume_not_present.return_value = True
 
         self.driver.delete_snapshot(svol)
         self.assertEqual(svol['provider_location'], None)
 
-    @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
+    @mock.patch.object(nfs.HDSNFSDriver, '_get_service')
+    @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
     @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
     @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path')
     @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
     def test_create_volume_from_snapshot(self, m_get_volume_location,
                                          m_get_export_path,
-                                         m_get_provider_location, m_id_to_vol):
+                                         m_get_provider_location, m_id_to_vol,
+                                         m_get_service):
         vol = _VOLUME.copy()
         svol = _SNAPVOLUME.copy()
 
-        m_id_to_vol(svol['volume_id']).return_value = vol
-        m_id_to_vol(svol['id']).return_value = svol
+        m_get_service.return_value = _SERVICE
+        m_get_provider_location.return_value = _SHARE
+        m_get_export_path.return_value = _EXPORT
+        m_get_volume_location.return_value = _SHARE
 
-        m_get_provider_location(svol['id']).return_value = _SHARE
-        m_get_export_path(svol['volume_id']).return_value = _EXPORT
-        m_get_volume_location(svol['volume_id']).return_value = _SHARE
+        loc = self.driver.create_volume_from_snapshot(vol, svol)
+        out = "{'provider_location': \'" + _SHARE + "'}"
+        self.assertEqual(str(loc), out)
+
+    @mock.patch.object(volume_types, 'get_volume_type_extra_specs',
+                       return_value={'key': 'type', 'service_label': 'silver'})
+    def test_get_pool(self, m_ext_spec):
+        vol = _VOLUME.copy()
 
-        loc = self.driver.create_volume_from_snapshot(_VOLUME, svol)
-        self.assertNotEqual(loc, None)
+        self.assertEqual(self.driver.get_pool(vol), 'silver')
index c0751ada46f986b3aed760ae4ddd46c17f8678f4..55423aa3240bf96f2ed8f8dd18c9efe2acc18262 100644 (file)
@@ -158,20 +158,28 @@ class HnasBackend(object):
                   {'out': out, 'err': err})
         return newout
 
-    def get_hdp_info(self, cmd, ip0, user, pw):
+    def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None):
         """Gets the list of filesystems and fsids.
 
         :param ip0: string IP address of controller
         :param user: string user authentication for array
         :param pw: string password authentication for array
+        :param fslabel: filesystem label we want to get info
         :returns: formated string with filesystems and fsids
         """
 
-        out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-a',
-                                check_exit_code=True)
+        if fslabel is None:
+            out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-a',
+                                    check_exit_code=True)
+        else:
+            out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-f', fslabel,
+                                    check_exit_code=True)
+
         lines = out.split('\n')
         single_evs = True
 
+        LOG.debug("Parsing output: %s", lines)
+
         newout = ""
         for line in lines:
             if 'Not mounted' in line:
@@ -179,6 +187,7 @@ class HnasBackend(object):
             if 'not' not in line and 'EVS' in line:
                 single_evs = False
             if 'GB' in line or 'TB' in line:
+                LOG.debug("Parsing output: %s", line)
                 inf = line.split()
 
                 if not single_evs:
index 80d52828950b7491d364e0709b088b8b4335be50..e44e491551ca8b43082a58f17ce080fcb7244621 100644 (file)
@@ -30,9 +30,10 @@ from cinder.openstack.common import log as logging
 from cinder.volume import driver
 from cinder.volume.drivers.hds import hnas_backend
 from cinder.volume import utils
+from cinder.volume import volume_types
 
 
-HDS_HNAS_ISCSI_VERSION = '2.2.0'
+HDS_HNAS_ISCSI_VERSION = '3.0.0'
 
 LOG = logging.getLogger(__name__)
 
@@ -215,17 +216,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
            :param volume: dictionary volume reference
         """
 
-        label = None
-        if volume['volume_type']:
-            label = volume['volume_type']['name']
-
-        label = label or 'default'
-        if label not in self.config['services'].keys():
-            # default works if no match is found
-            label = 'default'
-            LOG.info(_LI("Using default: instead of %s"), label)
-            LOG.info(_LI("Available services: %s"),
-                     self.config['services'].keys())
+        label = utils.extract_host(volume['host'], level='pool')
+        LOG.info(_LI("Using service label: %s"), label)
 
         if label in self.config['services'].keys():
             svc = self.config['services'][label]
@@ -296,38 +288,32 @@ class HDSISCSIDriver(driver.ISCSIDriver):
     def _get_stats(self):
         """Get HDP stats from HNAS."""
 
-        total_cap = 0
-        total_used = 0
-        out = self.bend.get_hdp_info(self.config['hnas_cmd'],
-                                     self.config['mgmt_ip0'],
-                                     self.config['username'],
-                                     self.config['password'])
-
-        for line in out.split('\n'):
-            if 'HDP' in line:
-                (hdp, size, _ign, used) = line.split()[1:5]  # in MB
-                LOG.debug("stats: looking for: %s", hdp)
-                if int(hdp) >= units.Ki:        # HNAS fsid
-                    hdp = line.split()[11]
-                if hdp in self.config['hdp'].keys():
-                    total_cap += int(size)
-                    total_used += int(used)
-
-        LOG.info(_LI("stats: total: %(cap)d used: %(used)d"),
-                 {'cap': total_cap, 'used': total_used})
-
         hnas_stat = {}
-        hnas_stat['total_capacity_gb'] = int(total_cap / units.Ki)  # in GB
-        hnas_stat['free_capacity_gb'] = \
-            int((total_cap - total_used) / units.Ki)
         be_name = self.configuration.safe_get('volume_backend_name')
         hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver'
         hnas_stat["vendor_name"] = 'HDS'
         hnas_stat["driver_version"] = HDS_HNAS_ISCSI_VERSION
         hnas_stat["storage_protocol"] = 'iSCSI'
-        hnas_stat['QoS_support'] = False
         hnas_stat['reserved_percentage'] = 0
 
+        for pool in self.pools:
+            out = self.bend.get_hdp_info(self.config['hnas_cmd'],
+                                         self.config['mgmt_ip0'],
+                                         self.config['username'],
+                                         self.config['password'],
+                                         pool['hdp'])
+
+            LOG.debug('Query for pool %s: %s', pool['pool_name'], out)
+
+            (hdp, size, _ign, used) = out.split()[1:5]  # in MB
+            pool['total_capacity_gb'] = int(size) / units.Ki
+            pool['free_capacity_gb'] = (int(size) - int(used)) / units.Ki
+            pool['allocated_capacity_gb'] = int(used) / units.Ki
+            pool['QoS_support'] = 'False'
+            pool['reserved_percentage'] = 0
+
+        hnas_stat['pools'] = self.pools
+
         LOG.info(_LI("stats: stats: %s"), hnas_stat)
         return hnas_stat
 
@@ -401,6 +387,18 @@ class HDSISCSIDriver(driver.ISCSIDriver):
         (self.arid, self.hnas_name, self.lumax) = self._array_info_get()
         self._check_hdp_list()
 
+        service_list = self.config['services'].keys()
+        for svc in service_list:
+            svc = self.config['services'][svc]
+            pool = {}
+            pool['pool_name'] = svc['volume_type']
+            pool['service_label'] = svc['volume_type']
+            pool['hdp'] = svc['hdp']
+
+            self.pools.append(pool)
+
+        LOG.info(_LI("Configured pools: %s"), self.pools)
+
         iscsi_info = self._get_iscsi_info()
         LOG.info(_LI("do_setup: %s"), iscsi_info)
         for svc in self.config['services'].keys():
@@ -716,3 +714,22 @@ class HDSISCSIDriver(driver.ISCSIDriver):
             self.driver_stats = self._get_stats()
 
         return self.driver_stats
+
+    def get_pool(self, volume):
+
+        if not volume['volume_type']:
+            return 'default'
+        else:
+            metadata = {}
+            type_id = volume['volume_type_id']
+            if type_id is not None:
+                metadata = volume_types.get_volume_type_extra_specs(type_id)
+            if not metadata.get('service_label'):
+                return 'default'
+            else:
+                if metadata['service_label'] not in \
+                        self.config['services'].keys():
+                    return 'default'
+                else:
+                    pass
+                return metadata['service_label']
index 55e7aeb88cd25f10e597f7fb692c95654d646320..e3149f7e14b3357e4a44efe025abdff732e7a1a7 100644 (file)
@@ -32,9 +32,11 @@ from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.hds import hnas_backend
 from cinder.volume.drivers import nfs
+from cinder.volume import utils
+from cinder.volume import volume_types
 
 
-HDS_HNAS_NFS_VERSION = '2.2.0'
+HDS_HNAS_NFS_VERSION = '3.0.0'
 
 LOG = logging.getLogger(__name__)
 
@@ -194,13 +196,9 @@ class HDSNFSDriver(nfs.NfsDriver):
         :param volume: dictionary volume reference
         """
 
-        label = None
-        if volume['volume_type']:
-            label = volume['volume_type']['name']
-        label = label or 'default'
-        if label not in self.config['services'].keys():
-            # default works if no match is found
-            label = 'default'
+        LOG.debug("_get_service: volume: %s", volume)
+        label = utils.extract_host(volume['host'], level='pool')
+
         if label in self.config['services'].keys():
             svc = self.config['services'][label]
             LOG.info(_LI("Get service: %(lbl)s->%(svc)s"),
@@ -414,12 +412,22 @@ class HDSNFSDriver(nfs.NfsDriver):
         """
 
         _stats = super(HDSNFSDriver, self).get_volume_stats(refresh)
-        be_name = self.configuration.safe_get('volume_backend_name')
-        _stats["volume_backend_name"] = be_name or 'HDSNFSDriver'
         _stats["vendor_name"] = 'HDS'
         _stats["driver_version"] = HDS_HNAS_NFS_VERSION
         _stats["storage_protocol"] = 'NFS'
 
+        for pool in self.pools:
+            capacity, free, used = self._get_capacity_info(pool['hdp'])
+            pool['total_capacity_gb'] = capacity / float(units.Gi)
+            pool['free_capacity_gb'] = free / float(units.Gi)
+            pool['allocated_capacity_gb'] = used / float(units.Gi)
+            pool['QoS_support'] = 'False'
+            pool['reserved_percentage'] = 0
+
+        _stats['pools'] = self.pools
+
+        LOG.info(_LI('Driver stats: %s'), _stats)
+
         return _stats
 
     def _get_nfs_info(self):
@@ -460,6 +468,8 @@ class HDSNFSDriver(nfs.NfsDriver):
 
         nfs_info = self._get_nfs_info()
 
+        LOG.debug("nfs_info: %s", nfs_info)
+
         for share in self.shares:
             if share in nfs_info.keys():
                 LOG.info(_LI("share: %(share)s -> %(info)s"),
@@ -488,6 +498,20 @@ class HDSNFSDriver(nfs.NfsDriver):
             else:
                 LOG.info(_LI("share: %s incorrect entry"), share)
 
+        LOG.debug("self.config['services'] = %s", self.config['services'])
+
+        service_list = self.config['services'].keys()
+        for svc in service_list:
+            svc = self.config['services'][svc]
+            pool = {}
+            pool['pool_name'] = svc['volume_type']
+            pool['service_label'] = svc['volume_type']
+            pool['hdp'] = svc['hdp']
+
+            self.pools.append(pool)
+
+        LOG.info(_LI("Configured pools: %s"), self.pools)
+
     def _clone_volume(self, volume_name, clone_name, volume_id):
         """Clones mounted volume using the HNAS file_clone.
 
@@ -515,3 +539,38 @@ class HDSNFSDriver(nfs.NfsDriver):
                                    _fslabel, source_path, target_path)
 
         return out
+
+    def get_pool(self, volume):
+        if not volume['volume_type']:
+            return 'default'
+        else:
+            metadata = {}
+            type_id = volume['volume_type_id']
+            if type_id is not None:
+                metadata = volume_types.get_volume_type_extra_specs(type_id)
+            if not metadata.get('service_label'):
+                return 'default'
+            else:
+                if metadata['service_label'] not in \
+                        self.config['services'].keys():
+                    return 'default'
+                else:
+                    return metadata['service_label']
+
+    def create_volume(self, volume):
+        """Creates a volume.
+
+        :param volume: volume reference
+        """
+        self._ensure_shares_mounted()
+
+        (_hdp, _path, _fslabel) = self._get_service(volume)
+
+        volume['provider_location'] = _hdp
+
+        LOG.info(_LI("Volume service: %(label)s. Casted to: %(loc)s"),
+                 {'label': _fslabel, 'loc': volume['provider_location']})
+
+        self._do_create_volume(volume)
+
+        return {'provider_location': volume['provider_location']}