]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Filter & goodness functions in NetApp drivers
authorClinton Knight <cknight@netapp.com>
Sat, 23 Jan 2016 00:02:00 +0000 (19:02 -0500)
committerClinton Knight <cknight@netapp.com>
Fri, 12 Feb 2016 16:05:17 +0000 (16:05 +0000)
This commit adds a performance module that utilizes various
performance APIs to measure storage controller node utilization
for each pool, and to report those metrics to the Cinder scheduler
via the standard filter & goodness functions.  Both Clustered and
7-mode Data ONTAP driver flavors are supported across NFS and
iSCSI/FC protocols.

Implements: blueprint netapp-data-ontap-goodness-functions
Change-Id: I1a550edb5f6e94854d1adc7f750a904deed0002c

34 files changed:
cinder/tests/unit/test_netapp.py
cinder/tests/unit/test_netapp_nfs.py
cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py
cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py
cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py
cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py
cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py
cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py [new file with mode: 0644]
cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py [new file with mode: 0644]
cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_7mode.py [new file with mode: 0644]
cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py [new file with mode: 0644]
cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py [new file with mode: 0644]
cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py
cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py
cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py
cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py
cinder/volume/drivers/netapp/dataontap/block_7mode.py
cinder/volume/drivers/netapp/dataontap/block_base.py
cinder/volume/drivers/netapp/dataontap/block_cmode.py
cinder/volume/drivers/netapp/dataontap/client/api.py
cinder/volume/drivers/netapp/dataontap/client/client_7mode.py
cinder/volume/drivers/netapp/dataontap/client/client_base.py
cinder/volume/drivers/netapp/dataontap/client/client_cmode.py
cinder/volume/drivers/netapp/dataontap/fc_7mode.py
cinder/volume/drivers/netapp/dataontap/fc_cmode.py
cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py
cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py
cinder/volume/drivers/netapp/dataontap/nfs_7mode.py
cinder/volume/drivers/netapp/dataontap/nfs_base.py
cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
cinder/volume/drivers/netapp/dataontap/performance/__init__.py [new file with mode: 0644]
cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py [new file with mode: 0644]
cinder/volume/drivers/netapp/dataontap/performance/perf_base.py [new file with mode: 0644]
cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py [new file with mode: 0644]

index 78552beed8d5ec397f9f3ab870dd88992a84a6ff..bea686a0d4958124e798bf28adf651896dc2536f 100644 (file)
@@ -31,6 +31,8 @@ from cinder.volume.drivers.netapp.dataontap import block_cmode
 from cinder.volume.drivers.netapp.dataontap.client import client_7mode
 from cinder.volume.drivers.netapp.dataontap.client import client_base
 from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
+from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
 from cinder.volume.drivers.netapp.dataontap import ssc_cmode
 from cinder.volume.drivers.netapp import options
 from cinder.volume.drivers.netapp import utils
@@ -568,6 +570,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
             ssc_cmode, 'refresh_cluster_ssc',
             lambda a, b, c, synchronous: None)
         self.mock_object(utils, 'OpenStackInfo')
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
 
         configuration = self._set_config(create_configuration())
         driver = common.NetAppDriver(configuration=configuration)
@@ -601,6 +604,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
         configuration = self._set_config(create_configuration())
         driver = common.NetAppDriver(configuration=configuration)
         mock_client = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         mock_client.assert_called_with(**FAKE_CONNECTION_HTTP)
 
@@ -612,6 +616,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
         configuration.netapp_transport_type = 'http'
         driver = common.NetAppDriver(configuration=configuration)
         mock_client = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         mock_client.assert_called_with(**FAKE_CONNECTION_HTTP)
 
@@ -624,6 +629,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
         driver = common.NetAppDriver(configuration=configuration)
         driver.library._get_root_volume_name = mock.Mock()
         mock_client = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         FAKE_CONNECTION_HTTPS = dict(FAKE_CONNECTION_HTTP,
                                      transport_type='https')
@@ -637,6 +643,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
         configuration.netapp_server_port = 81
         driver = common.NetAppDriver(configuration=configuration)
         mock_client = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         FAKE_CONNECTION_HTTP_PORT = dict(FAKE_CONNECTION_HTTP, port=81)
         mock_client.assert_called_with(**FAKE_CONNECTION_HTTP_PORT)
@@ -651,6 +658,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
         driver = common.NetAppDriver(configuration=configuration)
         driver.library._get_root_volume_name = mock.Mock()
         mock_client = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         FAKE_CONNECTION_HTTPS_PORT = dict(FAKE_CONNECTION_HTTP, port=446,
                                           transport_type='https')
@@ -1265,6 +1273,7 @@ class NetAppDirect7modeISCSIDriverTestCase_NV(test.TestCase):
                        FakeDirect7modeHTTPConnection)
         self.mock_object(driver.library, '_get_root_volume_name', mock.Mock(
             return_value='root'))
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
         driver.do_setup(context='')
         driver.root_volume_name = 'root'
         self.driver = driver
@@ -1325,6 +1334,7 @@ class NetAppDirect7modeISCSIDriverTestCase_WV(
                        FakeDirect7modeHTTPConnection)
         self.mock_object(driver.library, '_get_root_volume_name',
                          mock.Mock(return_value='root'))
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
         driver.do_setup(context='')
         self.driver = driver
         self.driver.root_volume_name = 'root'
index 19e0136072c3b4bddcd24766333d78f6e362d54f..d33605feb721cb5a61250455f4e126127f039100 100644 (file)
@@ -39,6 +39,8 @@ from cinder.volume.drivers.netapp.dataontap.client import client_7mode
 from cinder.volume.drivers.netapp.dataontap.client import client_base
 from cinder.volume.drivers.netapp.dataontap.client import client_cmode
 from cinder.volume.drivers.netapp.dataontap import nfs_base
+from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
+from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
 from cinder.volume.drivers.netapp.dataontap import ssc_cmode
 from cinder.volume.drivers.netapp import utils
 
@@ -215,6 +217,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
     @mock.patch.object(client_cmode.Client, '__init__', return_value=None)
     def test_do_setup(self, mock_client_init, mock_super_do_setup):
         context = mock.Mock()
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         self._driver.do_setup(context)
         mock_client_init.assert_called_once_with(vserver=FAKE_VSERVER,
                                                  **CONNECTION_INFO)
@@ -909,6 +912,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
         configuration = self._set_config(create_configuration())
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP)
 
@@ -920,6 +924,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
         configuration.netapp_transport_type = 'http'
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP)
 
@@ -931,6 +936,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
         configuration.netapp_transport_type = 'https'
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTPS)
 
@@ -942,6 +948,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
         configuration.netapp_server_port = 81
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_CONNECTION_INFO_HTTP, port=81)
         mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP)
@@ -955,6 +962,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
         configuration.netapp_server_port = 446
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_cmode, 'Client')
+        self.mock_object(perf_cmode, 'PerformanceCmodeLibrary')
         driver.do_setup(context='')
         FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_CONNECTION_INFO_HTTPS, port=446)
         mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS)
@@ -1506,6 +1514,7 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
     @mock.patch.object(client_7mode.Client, '__init__', return_value=None)
     def test_do_setup(self, mock_client_init, mock_super_do_setup):
         context = mock.Mock()
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
         self._driver.do_setup(context)
         mock_client_init.assert_called_once_with(**SEVEN_MODE_CONNECTION_INFO)
         mock_super_do_setup.assert_called_once_with(context)
@@ -1517,6 +1526,7 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
         configuration = self._set_config(create_configuration())
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_7mode, 'Client')
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
         driver.do_setup(context='')
         mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP)
 
@@ -1528,6 +1538,7 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
         configuration.netapp_transport_type = 'http'
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_7mode, 'Client')
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
         driver.do_setup(context='')
         mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP)
 
@@ -1539,6 +1550,7 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
         configuration.netapp_transport_type = 'https'
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_7mode, 'Client')
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
         driver.do_setup(context='')
         mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTPS)
 
@@ -1550,6 +1562,7 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
         configuration.netapp_server_port = 81
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_7mode, 'Client')
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
         driver.do_setup(context='')
         FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_7MODE_CONNECTION_INFO_HTTP,
                                         port=81)
@@ -1564,6 +1577,7 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
         configuration.netapp_server_port = 446
         driver = common.NetAppDriver(configuration=configuration)
         mock_invoke = self.mock_object(client_7mode, 'Client')
+        self.mock_object(perf_7mode, 'Performance7modeLibrary')
         driver.do_setup(context='')
         FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTPS,
                                          port=446)
index c651cd10dff860de2f58a48e9268cfdb99e3f92f..b21858fd76447b8f8c11fde2b983067aebaef331 100644 (file)
@@ -199,3 +199,480 @@ VOLUME_LIST_INFO_RESPONSE = etree.XML("""
     </volumes>
   </results>
 """)
+
+NO_RECORDS_RESPONSE = etree.XML("""
+  <results status="passed">
+    <num-records>0</num-records>
+  </results>
+""")
+
+NODE_NAME = 'fake_node1'
+NODE_NAMES = ('fake_node1', 'fake_node2')
+VOLUME_AGGREGATE_NAME = 'fake_aggr1'
+VOLUME_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2')
+
+AGGR_GET_ITER_RESPONSE = etree.XML("""
+  <results status="passed">
+    <attributes-list>
+      <aggr-attributes>
+        <aggr-64bit-upgrade-attributes>
+          <aggr-status-attributes>
+            <is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress>
+          </aggr-status-attributes>
+        </aggr-64bit-upgrade-attributes>
+        <aggr-fs-attributes>
+          <block-type>64_bit</block-type>
+          <fsid>1758646411</fsid>
+          <type>aggr</type>
+        </aggr-fs-attributes>
+        <aggr-inode-attributes>
+          <files-private-used>512</files-private-used>
+          <files-total>30384</files-total>
+          <files-used>96</files-used>
+          <inodefile-private-capacity>30384</inodefile-private-capacity>
+          <inodefile-public-capacity>30384</inodefile-public-capacity>
+          <maxfiles-available>30384</maxfiles-available>
+          <maxfiles-possible>243191</maxfiles-possible>
+          <maxfiles-used>96</maxfiles-used>
+          <percent-inode-used-capacity>0</percent-inode-used-capacity>
+        </aggr-inode-attributes>
+        <aggr-ownership-attributes>
+          <home-id>4082368507</home-id>
+          <home-name>cluster3-01</home-name>
+          <owner-id>4082368507</owner-id>
+          <owner-name>cluster3-01</owner-name>
+        </aggr-ownership-attributes>
+        <aggr-performance-attributes>
+          <free-space-realloc>off</free-space-realloc>
+          <max-write-alloc-blocks>0</max-write-alloc-blocks>
+        </aggr-performance-attributes>
+        <aggr-raid-attributes>
+          <checksum-status>active</checksum-status>
+          <checksum-style>block</checksum-style>
+          <disk-count>3</disk-count>
+          <ha-policy>cfo</ha-policy>
+          <has-local-root>true</has-local-root>
+          <has-partner-root>false</has-partner-root>
+          <is-checksum-enabled>true</is-checksum-enabled>
+          <is-hybrid>false</is-hybrid>
+          <is-hybrid-enabled>false</is-hybrid-enabled>
+          <is-inconsistent>false</is-inconsistent>
+          <mirror-status>unmirrored</mirror-status>
+          <mount-state>online</mount-state>
+          <plex-count>1</plex-count>
+          <plexes>
+            <plex-attributes>
+              <is-online>true</is-online>
+              <is-resyncing>false</is-resyncing>
+              <plex-name>/%(aggr1)s/plex0</plex-name>
+              <plex-status>normal,active</plex-status>
+              <raidgroups>
+                <raidgroup-attributes>
+                  <checksum-style>block</checksum-style>
+                  <is-cache-tier>false</is-cache-tier>
+                  <is-recomputing-parity>false</is-recomputing-parity>
+                  <is-reconstructing>false</is-reconstructing>
+                  <raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
+                  <recomputing-parity-percentage>0</recomputing-parity-percentage>
+                  <reconstruction-percentage>0</reconstruction-percentage>
+                </raidgroup-attributes>
+              </raidgroups>
+              <resyncing-percentage>0</resyncing-percentage>
+            </plex-attributes>
+          </plexes>
+          <raid-lost-write-state>on</raid-lost-write-state>
+          <raid-size>16</raid-size>
+          <raid-status>raid_dp, normal</raid-status>
+          <raid-type>raid_dp</raid-type>
+          <state>online</state>
+        </aggr-raid-attributes>
+        <aggr-snaplock-attributes>
+          <is-snaplock>false</is-snaplock>
+        </aggr-snaplock-attributes>
+        <aggr-snapshot-attributes>
+          <files-total>0</files-total>
+          <files-used>0</files-used>
+          <is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled>
+          <is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled>
+          <maxfiles-available>0</maxfiles-available>
+          <maxfiles-possible>0</maxfiles-possible>
+          <maxfiles-used>0</maxfiles-used>
+          <percent-inode-used-capacity>0</percent-inode-used-capacity>
+          <percent-used-capacity>0</percent-used-capacity>
+          <size-available>0</size-available>
+          <size-total>0</size-total>
+          <size-used>0</size-used>
+          <snapshot-reserve-percent>0</snapshot-reserve-percent>
+        </aggr-snapshot-attributes>
+        <aggr-space-attributes>
+          <aggregate-metadata>245760</aggregate-metadata>
+          <hybrid-cache-size-total>0</hybrid-cache-size-total>
+          <percent-used-capacity>95</percent-used-capacity>
+          <size-available>45670400</size-available>
+          <size-total>943718400</size-total>
+          <size-used>898048000</size-used>
+          <total-reserved-space>0</total-reserved-space>
+          <used-including-snapshot-reserve>898048000</used-including-snapshot-reserve>
+          <volume-footprints>897802240</volume-footprints>
+        </aggr-space-attributes>
+        <aggr-volume-count-attributes>
+          <flexvol-count>1</flexvol-count>
+          <flexvol-count-collective>0</flexvol-count-collective>
+          <flexvol-count-striped>0</flexvol-count-striped>
+        </aggr-volume-count-attributes>
+        <aggregate-name>%(aggr1)s</aggregate-name>
+        <aggregate-uuid>15863632-ea49-49a8-9c88-2bd2d57c6d7a</aggregate-uuid>
+        <nodes>
+          <node-name>cluster3-01</node-name>
+        </nodes>
+        <striping-type>unknown</striping-type>
+      </aggr-attributes>
+      <aggr-attributes>
+        <aggr-64bit-upgrade-attributes>
+          <aggr-status-attributes>
+            <is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress>
+          </aggr-status-attributes>
+        </aggr-64bit-upgrade-attributes>
+        <aggr-fs-attributes>
+          <block-type>64_bit</block-type>
+          <fsid>706602229</fsid>
+          <type>aggr</type>
+        </aggr-fs-attributes>
+        <aggr-inode-attributes>
+          <files-private-used>528</files-private-used>
+          <files-total>31142</files-total>
+          <files-used>96</files-used>
+          <inodefile-private-capacity>31142</inodefile-private-capacity>
+          <inodefile-public-capacity>31142</inodefile-public-capacity>
+          <maxfiles-available>31142</maxfiles-available>
+          <maxfiles-possible>1945584</maxfiles-possible>
+          <maxfiles-used>96</maxfiles-used>
+          <percent-inode-used-capacity>0</percent-inode-used-capacity>
+        </aggr-inode-attributes>
+        <aggr-ownership-attributes>
+          <home-id>4082368507</home-id>
+          <home-name>cluster3-01</home-name>
+          <owner-id>4082368507</owner-id>
+          <owner-name>cluster3-01</owner-name>
+        </aggr-ownership-attributes>
+        <aggr-performance-attributes>
+          <free-space-realloc>off</free-space-realloc>
+          <max-write-alloc-blocks>0</max-write-alloc-blocks>
+        </aggr-performance-attributes>
+        <aggr-raid-attributes>
+          <checksum-status>active</checksum-status>
+          <checksum-style>block</checksum-style>
+          <disk-count>10</disk-count>
+          <ha-policy>sfo</ha-policy>
+          <has-local-root>false</has-local-root>
+          <has-partner-root>false</has-partner-root>
+          <is-checksum-enabled>true</is-checksum-enabled>
+          <is-hybrid>false</is-hybrid>
+          <is-hybrid-enabled>false</is-hybrid-enabled>
+          <is-inconsistent>false</is-inconsistent>
+          <mirror-status>unmirrored</mirror-status>
+          <mount-state>online</mount-state>
+          <plex-count>1</plex-count>
+          <plexes>
+            <plex-attributes>
+              <is-online>true</is-online>
+              <is-resyncing>false</is-resyncing>
+              <plex-name>/%(aggr2)s/plex0</plex-name>
+              <plex-status>normal,active</plex-status>
+              <raidgroups>
+                <raidgroup-attributes>
+                  <checksum-style>block</checksum-style>
+                  <is-cache-tier>false</is-cache-tier>
+                  <is-recomputing-parity>false</is-recomputing-parity>
+                  <is-reconstructing>false</is-reconstructing>
+                  <raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name>
+                  <recomputing-parity-percentage>0</recomputing-parity-percentage>
+                  <reconstruction-percentage>0</reconstruction-percentage>
+                </raidgroup-attributes>
+                <raidgroup-attributes>
+                  <checksum-style>block</checksum-style>
+                  <is-cache-tier>false</is-cache-tier>
+                  <is-recomputing-parity>false</is-recomputing-parity>
+                  <is-reconstructing>false</is-reconstructing>
+                  <raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name>
+                  <recomputing-parity-percentage>0</recomputing-parity-percentage>
+                  <reconstruction-percentage>0</reconstruction-percentage>
+                </raidgroup-attributes>
+              </raidgroups>
+              <resyncing-percentage>0</resyncing-percentage>
+            </plex-attributes>
+          </plexes>
+          <raid-lost-write-state>on</raid-lost-write-state>
+          <raid-size>8</raid-size>
+          <raid-status>raid4, normal</raid-status>
+          <raid-type>raid4</raid-type>
+          <state>online</state>
+        </aggr-raid-attributes>
+        <aggr-snaplock-attributes>
+          <is-snaplock>false</is-snaplock>
+        </aggr-snaplock-attributes>
+        <aggr-snapshot-attributes>
+          <files-total>0</files-total>
+          <files-used>0</files-used>
+          <is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled>
+          <is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled>
+          <maxfiles-available>0</maxfiles-available>
+          <maxfiles-possible>0</maxfiles-possible>
+          <maxfiles-used>0</maxfiles-used>
+          <percent-inode-used-capacity>0</percent-inode-used-capacity>
+          <percent-used-capacity>0</percent-used-capacity>
+          <size-available>0</size-available>
+          <size-total>0</size-total>
+          <size-used>0</size-used>
+          <snapshot-reserve-percent>0</snapshot-reserve-percent>
+        </aggr-snapshot-attributes>
+        <aggr-space-attributes>
+          <aggregate-metadata>425984</aggregate-metadata>
+          <hybrid-cache-size-total>0</hybrid-cache-size-total>
+          <percent-used-capacity>15</percent-used-capacity>
+          <size-available>6448431104</size-available>
+          <size-total>7549747200</size-total>
+          <size-used>1101316096</size-used>
+          <total-reserved-space>0</total-reserved-space>
+          <used-including-snapshot-reserve>1101316096</used-including-snapshot-reserve>
+          <volume-footprints>1100890112</volume-footprints>
+        </aggr-space-attributes>
+        <aggr-volume-count-attributes>
+          <flexvol-count>2</flexvol-count>
+          <flexvol-count-collective>0</flexvol-count-collective>
+          <flexvol-count-striped>0</flexvol-count-striped>
+        </aggr-volume-count-attributes>
+        <aggregate-name>%(aggr2)s</aggregate-name>
+        <aggregate-uuid>2a741934-1aaf-42dd-93ca-aaf231be108a</aggregate-uuid>
+        <nodes>
+          <node-name>cluster3-01</node-name>
+        </nodes>
+        <striping-type>not_striped</striping-type>
+      </aggr-attributes>
+    </attributes-list>
+    <num-records>2</num-records>
+  </results>
+""" % {
+    'aggr1': VOLUME_AGGREGATE_NAMES[0],
+    'aggr2': VOLUME_AGGREGATE_NAMES[1],
+})
+
+AGGR_GET_SPACE_RESPONSE = etree.XML("""
+  <results status="passed">
+    <attributes-list>
+      <aggr-attributes>
+        <aggr-raid-attributes>
+          <plexes>
+            <plex-attributes>
+              <plex-name>/%(aggr1)s/plex0</plex-name>
+              <raidgroups>
+                <raidgroup-attributes>
+                  <raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
+                </raidgroup-attributes>
+              </raidgroups>
+            </plex-attributes>
+          </plexes>
+        </aggr-raid-attributes>
+        <aggr-space-attributes>
+          <size-available>45670400</size-available>
+          <size-total>943718400</size-total>
+          <size-used>898048000</size-used>
+        </aggr-space-attributes>
+        <aggregate-name>%(aggr1)s</aggregate-name>
+      </aggr-attributes>
+      <aggr-attributes>
+        <aggr-raid-attributes>
+          <plexes>
+            <plex-attributes>
+              <plex-name>/%(aggr2)s/plex0</plex-name>
+              <raidgroups>
+                <raidgroup-attributes>
+                  <raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name>
+                </raidgroup-attributes>
+                <raidgroup-attributes>
+                  <raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name>
+                </raidgroup-attributes>
+              </raidgroups>
+            </plex-attributes>
+          </plexes>
+        </aggr-raid-attributes>
+        <aggr-space-attributes>
+          <size-available>4267659264</size-available>
+          <size-total>7549747200</size-total>
+          <size-used>3282087936</size-used>
+        </aggr-space-attributes>
+        <aggregate-name>%(aggr2)s</aggregate-name>
+      </aggr-attributes>
+    </attributes-list>
+    <num-records>2</num-records>
+  </results>
+""" % {
+    'aggr1': VOLUME_AGGREGATE_NAMES[0],
+    'aggr2': VOLUME_AGGREGATE_NAMES[1],
+})
+
+AGGR_GET_NODE_RESPONSE = etree.XML("""
+  <results status="passed">
+    <attributes-list>
+      <aggr-attributes>
+        <aggr-ownership-attributes>
+          <home-name>%(node)s</home-name>
+        </aggr-ownership-attributes>
+        <aggregate-name>%(aggr)s</aggregate-name>
+      </aggr-attributes>
+    </attributes-list>
+    <num-records>1</num-records>
+  </results>
+""" % {
+    'aggr': VOLUME_AGGREGATE_NAME,
+    'node': NODE_NAME,
+})
+
+PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [
+    'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD',
+    'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1',
+    'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM',
+    'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE',
+    'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO',
+    'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM',
+    'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT',
+    'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH',
+]
+
+PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE = etree.XML("""
+  <results status="passed">
+    <counters>
+      <counter-info>
+        <desc>No. of times 8.3 names are accessed per second.</desc>
+        <name>access_8_3_names</name>
+        <privilege-level>diag</privilege-level>
+        <properties>rate</properties>
+        <unit>per_sec</unit>
+      </counter-info>
+      <counter-info>
+        <desc>Array of counts of different types of CPs</desc>
+        <labels>
+          <label-info>wafl_timer generated CP</label-info>
+          <label-info>snapshot generated CP</label-info>
+          <label-info>wafl_avail_bufs generated CP</label-info>
+          <label-info>dirty_blk_cnt generated CP</label-info>
+          <label-info>full NV-log generated CP,back-to-back CP</label-info>
+          <label-info>flush generated CP,sync generated CP</label-info>
+          <label-info>deferred back-to-back CP</label-info>
+          <label-info>low mbufs generated CP</label-info>
+          <label-info>low datavecs generated CP</label-info>
+          <label-info>nvlog replay takeover time limit CP</label-info>
+        </labels>
+        <name>cp_count</name>
+        <privilege-level>diag</privilege-level>
+        <properties>delta</properties>
+        <type>array</type>
+        <unit>none</unit>
+      </counter-info>
+      <counter-info>
+        <base-counter>total_cp_msecs</base-counter>
+        <desc>Array of percentage time spent in different phases of CP</desc>
+        <labels>
+          <label-info>%(labels)s</label-info>
+        </labels>
+        <name>cp_phase_times</name>
+        <privilege-level>diag</privilege-level>
+        <properties>percent</properties>
+        <type>array</type>
+        <unit>percent</unit>
+      </counter-info>
+    </counters>
+  </results>
+""" % {'labels': ','.join(PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS)})
+
+PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML("""
+  <results status="passed">
+    <instances>
+      <instance-data>
+        <counters>
+          <counter-data>
+            <name>avg_processor_busy</name>
+            <value>5674745133134</value>
+          </counter-data>
+        </counters>
+        <name>system</name>
+        <uuid>%(node1)s:kernel:system</uuid>
+      </instance-data>
+      <instance-data>
+        <counters>
+          <counter-data>
+            <name>avg_processor_busy</name>
+            <value>4077649009234</value>
+          </counter-data>
+        </counters>
+        <name>system</name>
+        <uuid>%(node2)s:kernel:system</uuid>
+      </instance-data>
+    </instances>
+    <timestamp>1453412013</timestamp>
+  </results>
+""" % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]})
+
+PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE = etree.XML("""
+  <results status="passed">
+    <timestamp>1454146292</timestamp>
+    <instances>
+      <instance-data>
+        <name>system</name>
+        <counters>
+          <counter-data>
+            <name>avg_processor_busy</name>
+            <value>13215732322</value>
+          </counter-data>
+        </counters>
+      </instance-data>
+    </instances>
+  </results>""")
+
+PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML("""
+  <results status="passed">
+    <attributes-list>
+      <instance-info>
+        <name>system</name>
+        <uuid>%(node)s:kernel:system</uuid>
+      </instance-info>
+    </attributes-list>
+    <num-records>1</num-records>
+  </results>
+""" % {'node': NODE_NAME})
+
+PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE = etree.XML("""
+  <results status="passed">
+    <instances>
+      <instance-info>
+        <name>processor0</name>
+      </instance-info>
+      <instance-info>
+        <name>processor1</name>
+      </instance-info>
+    </instances>
+  </results>""")
+
+SYSTEM_GET_INFO_RESPONSE = etree.XML("""
+  <results status="passed">
+    <system-info>
+      <system-name>%(node)s</system-name>
+      <system-id>4082368508</system-id>
+      <system-model>SIMBOX</system-model>
+      <system-machine-type>SIMBOX</system-machine-type>
+      <vendor-id>NetApp</vendor-id>
+      <system-serial-number>4082368508</system-serial-number>
+      <board-speed>2593</board-speed>
+      <board-type>NetApp VSim</board-type>
+      <cpu-serial-number>999999</cpu-serial-number>
+      <number-of-processors>2</number-of-processors>
+      <memory-size>1599</memory-size>
+      <cpu-processor-id>0x40661</cpu-processor-id>
+      <cpu-microcode-version>15</cpu-microcode-version>
+      <maximum-aggregate-size>2199023255552</maximum-aggregate-size>
+      <maximum-flexible-volume-size>17592186044416</maximum-flexible-volume-size>
+      <maximum-flexible-volume-count>500</maximum-flexible-volume-count>
+      <supports-raid-array>true</supports-raid-array>
+    </system-info>
+  </results>
+""" % {'node': NODE_NAME})
index 6f67ce458dcae6f6d22b4a59fd56c3e546c0cf10..b111033d50faabd5327ca34848c9f7193637596f 100644 (file)
@@ -21,6 +21,8 @@ import mock
 import six
 
 from cinder import test
+from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
+    fakes as fake_client)
 from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
 from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
 from cinder.volume.drivers.netapp.dataontap.client import client_7mode
@@ -667,3 +669,63 @@ class NetApp7modeClientTestCase(test.TestCase):
 
         self.assertEqual(expected_total_bytes, total_bytes)
         self.assertEqual(expected_available_bytes, available_bytes)
+
+    def test_get_performance_instance_names(self):
+
+        mock_send_request = self.mock_object(self.client, 'send_request')
+        mock_send_request.return_value = netapp_api.NaElement(
+            fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE)
+
+        result = self.client.get_performance_instance_names('processor')
+
+        expected = ['processor0', 'processor1']
+        self.assertEqual(expected, result)
+
+        perf_object_instance_list_info_args = {'objectname': 'processor'}
+        mock_send_request.assert_called_once_with(
+            'perf-object-instance-list-info',
+            perf_object_instance_list_info_args, enable_tunneling=False)
+
+    def test_get_performance_counters(self):
+
+        mock_send_request = self.mock_object(self.client, 'send_request')
+        mock_send_request.return_value = netapp_api.NaElement(
+            fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE)
+
+        instance_names = ['system']
+        counter_names = ['avg_processor_busy']
+        result = self.client.get_performance_counters('system',
+                                                      instance_names,
+                                                      counter_names)
+
+        expected = [
+            {
+                'avg_processor_busy': '13215732322',
+                'instance-name': 'system',
+                'timestamp': '1454146292',
+            }
+        ]
+        self.assertEqual(expected, result)
+
+        perf_object_get_instances_args = {
+            'objectname': 'system',
+            'instances': [
+                {'instance': instance} for instance in instance_names
+            ],
+            'counters': [
+                {'counter': counter} for counter in counter_names
+            ],
+        }
+        mock_send_request.assert_called_once_with(
+            'perf-object-get-instances', perf_object_get_instances_args,
+            enable_tunneling=False)
+
+    def test_get_system_name(self):
+
+        mock_send_request = self.mock_object(self.client, 'send_request')
+        mock_send_request.return_value = netapp_api.NaElement(
+            fake_client.SYSTEM_GET_INFO_RESPONSE)
+
+        result = self.client.get_system_name()
+
+        self.assertEqual(fake_client.NODE_NAME, result)
index 26763026ed0dfd83d8ed650b187731828c678021..7c08df3f4d45a6ea373f47d637c9b1b4d229f5d8 100644 (file)
@@ -19,7 +19,10 @@ from lxml import etree
 import mock
 import six
 
+from cinder import exception
 from cinder import test
+from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
+    fakes as fake_client)
 import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
 from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
 from cinder.volume.drivers.netapp.dataontap.client import client_base
@@ -45,6 +48,7 @@ class NetAppBaseClientTestCase(test.TestCase):
         self.fake_lun = six.text_type(uuid.uuid4())
         self.fake_size = '1024'
         self.fake_metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
+        self.mock_send_request = self.mock_object(self.client, 'send_request')
 
     def tearDown(self):
         super(NetAppBaseClientTestCase, self).tearDown()
@@ -471,3 +475,33 @@ class NetAppBaseClientTestCase(test.TestCase):
         initiators = fake.FC_FORMATTED_INITIATORS
         mock_has_luns_mapped_to_initiator.return_value = False
         self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators))
+
+    def test_get_performance_counter_info(self):
+
+        self.mock_send_request.return_value = netapp_api.NaElement(
+            fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE)
+
+        result = self.client.get_performance_counter_info('wafl',
+                                                          'cp_phase_times')
+
+        expected = {
+            'name': 'cp_phase_times',
+            'base-counter': 'total_cp_msecs',
+            'labels': fake_client.PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS,
+        }
+        self.assertEqual(expected, result)
+
+        perf_object_counter_list_info_args = {'objectname': 'wafl'}
+        self.mock_send_request.assert_called_once_with(
+            'perf-object-counter-list-info',
+            perf_object_counter_list_info_args, enable_tunneling=False)
+
+    def test_get_performance_counter_info_not_found(self):
+
+        self.mock_send_request.return_value = netapp_api.NaElement(
+            fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE)
+
+        self.assertRaises(exception.NotFound,
+                          self.client.get_performance_counter_info,
+                          'wafl',
+                          'invalid')
index 0e5298f15b5a929283bf93ca4d1a1204602c02c0..7dd075fcdf14ff373dc65c3da30c50ae73038eff 100644 (file)
@@ -58,6 +58,9 @@ class NetAppCmodeClientTestCase(test.TestCase):
     def tearDown(self):
         super(NetAppCmodeClientTestCase, self).tearDown()
 
+    def _mock_api_error(self, code='fake'):
+        return mock.Mock(side_effect=netapp_api.NaApiError(code=code))
+
     def test_has_records(self):
 
         result = self.client._has_records(netapp_api.NaElement(
@@ -952,3 +955,207 @@ class NetAppCmodeClientTestCase(test.TestCase):
 
         self.assertEqual(expected_total_size, total_size)
         self.assertEqual(expected_available_size, available_size)
+
+    def test_get_aggregates(self):
+
+        api_response = netapp_api.NaElement(
+            fake_client.AGGR_GET_ITER_RESPONSE)
+        self.mock_object(self.client,
+                         'send_request',
+                         mock.Mock(return_value=api_response))
+
+        result = self.client._get_aggregates()
+
+        self.client.send_request.assert_has_calls([
+            mock.call('aggr-get-iter', {}, enable_tunneling=False)])
+        self.assertListEqual(
+            [aggr.to_string() for aggr in api_response.get_child_by_name(
+                'attributes-list').get_children()],
+            [aggr.to_string() for aggr in result])
+
+    def test_get_aggregates_with_filters(self):
+
+        api_response = netapp_api.NaElement(
+            fake_client.AGGR_GET_SPACE_RESPONSE)
+        self.mock_object(self.client,
+                         'send_request',
+                         mock.Mock(return_value=api_response))
+
+        desired_attributes = {
+            'aggr-attributes': {
+                'aggregate-name': None,
+                'aggr-space-attributes': {
+                    'size-total': None,
+                    'size-available': None,
+                }
+            }
+        }
+
+        result = self.client._get_aggregates(
+            aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES,
+            desired_attributes=desired_attributes)
+
+        aggr_get_iter_args = {
+            'query': {
+                'aggr-attributes': {
+                    'aggregate-name': '|'.join(
+                        fake_client.VOLUME_AGGREGATE_NAMES),
+                }
+            },
+            'desired-attributes': desired_attributes
+        }
+
+        self.client.send_request.assert_has_calls([
+            mock.call('aggr-get-iter', aggr_get_iter_args,
+                      enable_tunneling=False)])
+        self.assertListEqual(
+            [aggr.to_string() for aggr in api_response.get_child_by_name(
+                'attributes-list').get_children()],
+            [aggr.to_string() for aggr in result])
+
+    def test_get_aggregates_not_found(self):
+
+        api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
+        self.mock_object(self.client,
+                         'send_request',
+                         mock.Mock(return_value=api_response))
+
+        result = self.client._get_aggregates()
+
+        self.client.send_request.assert_has_calls([
+            mock.call('aggr-get-iter', {}, enable_tunneling=False)])
+        self.assertListEqual([], result)
+
+    def test_get_node_for_aggregate(self):
+
+        api_response = netapp_api.NaElement(
+            fake_client.AGGR_GET_NODE_RESPONSE).get_child_by_name(
+            'attributes-list').get_children()
+        self.mock_object(self.client,
+                         '_get_aggregates',
+                         mock.Mock(return_value=api_response))
+
+        result = self.client.get_node_for_aggregate(
+            fake_client.VOLUME_AGGREGATE_NAME)
+
+        desired_attributes = {
+            'aggr-attributes': {
+                'aggregate-name': None,
+                'aggr-ownership-attributes': {
+                    'home-name': None,
+                },
+            },
+        }
+
+        self.client._get_aggregates.assert_has_calls([
+            mock.call(
+                aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
+                desired_attributes=desired_attributes)])
+
+        self.assertEqual(fake_client.NODE_NAME, result)
+
+    def test_get_node_for_aggregate_none_requested(self):
+
+        result = self.client.get_node_for_aggregate(None)
+
+        self.assertIsNone(result)
+
+    def test_get_node_for_aggregate_api_not_found(self):
+
+        self.mock_object(self.client,
+                         'send_request',
+                         mock.Mock(side_effect=self._mock_api_error(
+                             netapp_api.EAPINOTFOUND)))
+
+        result = self.client.get_node_for_aggregate(
+            fake_client.VOLUME_AGGREGATE_NAME)
+
+        self.assertIsNone(result)
+
+    def test_get_node_for_aggregate_api_error(self):
+
+        self.mock_object(self.client, 'send_request', self._mock_api_error())
+
+        self.assertRaises(netapp_api.NaApiError,
+                          self.client.get_node_for_aggregate,
+                          fake_client.VOLUME_AGGREGATE_NAME)
+
+    def test_get_node_for_aggregate_not_found(self):
+
+        api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
+        self.mock_object(self.client,
+                         'send_request',
+                         mock.Mock(return_value=api_response))
+
+        result = self.client.get_node_for_aggregate(
+            fake_client.VOLUME_AGGREGATE_NAME)
+
+        self.assertIsNone(result)
+
+    def test_get_performance_instance_uuids(self):
+
+        self.mock_send_request.return_value = netapp_api.NaElement(
+            fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE)
+
+        result = self.client.get_performance_instance_uuids(
+            'system', fake_client.NODE_NAME)
+
+        expected = [fake_client.NODE_NAME + ':kernel:system']
+        self.assertEqual(expected, result)
+
+        perf_object_instance_list_info_iter_args = {
+            'objectname': 'system',
+            'query': {
+                'instance-info': {
+                    'uuid': fake_client.NODE_NAME + ':*',
+                }
+            }
+        }
+        self.mock_send_request.assert_called_once_with(
+            'perf-object-instance-list-info-iter',
+            perf_object_instance_list_info_iter_args, enable_tunneling=False)
+
+    def test_get_performance_counters(self):
+
+        self.mock_send_request.return_value = netapp_api.NaElement(
+            fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE)
+
+        instance_uuids = [
+            fake_client.NODE_NAMES[0] + ':kernel:system',
+            fake_client.NODE_NAMES[1] + ':kernel:system',
+        ]
+        counter_names = ['avg_processor_busy']
+        result = self.client.get_performance_counters('system',
+                                                      instance_uuids,
+                                                      counter_names)
+
+        expected = [
+            {
+                'avg_processor_busy': '5674745133134',
+                'instance-name': 'system',
+                'instance-uuid': instance_uuids[0],
+                'node-name': fake_client.NODE_NAMES[0],
+                'timestamp': '1453412013',
+            }, {
+                'avg_processor_busy': '4077649009234',
+                'instance-name': 'system',
+                'instance-uuid': instance_uuids[1],
+                'node-name': fake_client.NODE_NAMES[1],
+                'timestamp': '1453412013'
+            },
+        ]
+        self.assertEqual(expected, result)
+
+        perf_object_get_instances_args = {
+            'objectname': 'system',
+            'instance-uuids': [
+                {'instance-uuid': instance_uuid}
+                for instance_uuid in instance_uuids
+            ],
+            'counters': [
+                {'counter': counter} for counter in counter_names
+            ],
+        }
+        self.mock_send_request.assert_called_once_with(
+            'perf-object-get-instances', perf_object_get_instances_args,
+            enable_tunneling=False)
index 20d18de9daf85aa46c54be7c107c8694026e1e55..64d970d659f20fed73e6be5f9642ff77eab69b2e 100644 (file)
@@ -238,6 +238,9 @@ FAKE_CMODE_POOLS = [
         'thick_provisioning_support': False,
         'provisioned_capacity_gb': 0.93,
         'max_over_subscription_ratio': 20.0,
+        'utilization': 30.0,
+        'filter_function': 'filter',
+        'goodness_function': 'goodness',
     }
 ]
 
@@ -340,6 +343,9 @@ FAKE_7MODE_POOLS = [
         'thin_provisioning_support': False,
         'thick_provisioning_support': True,
         'provisioned_capacity_gb': 0.0,
+        'utilization': 30.0,
+        'filter_function': 'filter',
+        'goodness_function': 'goodness',
     }
 ]
 
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py
new file mode 100644 (file)
index 0000000..a8d2476
--- /dev/null
@@ -0,0 +1,546 @@
+# Copyright (c) 2016 Clinton Knight
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+NODE = 'cluster1-01'
+
+COUNTERS_T1 = [
+    {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'avg_processor_busy': '29078861388',
+        'instance-name': 'system',
+        'timestamp': '1453573776',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'cpu_elapsed_time': '1063283283681',
+        'instance-name': 'system',
+        'timestamp': '1453573776',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'cpu_elapsed_time1': '1063283283681',
+        'instance-name': 'system',
+        'timestamp': '1453573776',
+    }, {
+        'cp_phase_times:p2a_snap': '714',
+        'cp_phase_times:p4_finish': '14897',
+        'cp_phase_times:setup': '581',
+        'cp_phase_times:p2a_dlog1': '6019',
+        'cp_phase_times:p2a_dlog2': '2328',
+        'cp_phase_times:p2v_cont': '2479',
+        'cp_phase_times:p2v_volinfo': '1138',
+        'cp_phase_times:p2v_bm': '3484',
+        'cp_phase_times:p2v_fsinfo': '2031',
+        'cp_phase_times:p2a_inofile': '356',
+        'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,'
+                          '427,1058,354,3484,5135,1460,1138,2479,356,1373'
+                          ',6019,9,2328,2257,229,493,1275,0,6059,714,530215,'
+                          '21603833,0,0,3286,11075940,22001,14897,36',
+        'cp_phase_times:p2v_dlog2': '377',
+        'instance-name': 'wafl',
+        'cp_phase_times:p3_wait': '0',
+        'cp_phase_times:p2a_bm': '6059',
+        'cp_phase_times:p1_quota': '498',
+        'cp_phase_times:p2v_inofile': '839',
+        'cp_phase_times:p2a_refcount': '493',
+        'cp_phase_times:p2a_fsinfo': '2257',
+        'cp_phase_times:p2a_hyabc': '0',
+        'cp_phase_times:p2a_volinfo': '530215',
+        'cp_phase_times:pre_p0': '5007',
+        'cp_phase_times:p2a_hya': '9',
+        'cp_phase_times:p0_snap_del': '1840',
+        'cp_phase_times:p2a_ino': '1373',
+        'cp_phase_times:p2v_df_scores_sub': '354',
+        'cp_phase_times:p2v_ino_pub': '799',
+        'cp_phase_times:p2a_ipu_bitmap_grow': '229',
+        'cp_phase_times:p2v_refcount': '427',
+        'timestamp': '1453573776',
+        'cp_phase_times:p2v_dlog1': '0',
+        'cp_phase_times:p2_finish': '0',
+        'cp_phase_times:p1_clean': '9832',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:wafl',
+        'cp_phase_times:p3a_volinfo': '11075940',
+        'cp_phase_times:p2a_topaa': '1275',
+        'cp_phase_times:p2_flush': '21603833',
+        'cp_phase_times:p2v_df_scores': '1460',
+        'cp_phase_times:ipu_disk_add': '0',
+        'cp_phase_times:p2v_snap': '5135',
+        'cp_phase_times:p5_finish': '36',
+        'cp_phase_times:p2v_ino_pri': '1336',
+        'cp_phase_times:p3v_volinfo': '3286',
+        'cp_phase_times:p2v_topaa': '1058',
+        'cp_phase_times:p3_finish': '22001',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:wafl',
+        'total_cp_msecs': '33309624',
+        'instance-name': 'wafl',
+        'timestamp': '1453573776',
+    }, {
+        'domain_busy:kahuna': '2712467226',
+        'timestamp': '1453573777',
+        'domain_busy:cifs': '434036',
+        'domain_busy:raid_exempt': '28',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor0',
+        'domain_busy:target': '6460782',
+        'domain_busy:nwk_exempt': '20',
+        'domain_busy:raid': '722094140',
+        'domain_busy:storage': '2253156562',
+        'instance-name': 'processor0',
+        'domain_busy:cluster': '34',
+        'domain_busy:wafl_xcleaner': '51275254',
+        'domain_busy:wafl_exempt': '1243553699',
+        'domain_busy:protocol': '54',
+        'domain_busy': '1028851855595,2712467226,2253156562,5688808118,'
+                       '722094140,28,6460782,59,434036,1243553699,51275254,'
+                       '61237441,34,54,11,20,5254181873,13656398235,452215',
+        'domain_busy:nwk_legacy': '5254181873',
+        'domain_busy:dnscache': '59',
+        'domain_busy:exempt': '5688808118',
+        'domain_busy:hostos': '13656398235',
+        'domain_busy:sm_exempt': '61237441',
+        'domain_busy:nwk_exclusive': '11',
+        'domain_busy:idle': '1028851855595',
+        'domain_busy:ssan_exempt': '452215',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor0',
+        'processor_elapsed_time': '1063283843318',
+        'instance-name': 'processor0',
+        'timestamp': '1453573777',
+    }, {
+        'domain_busy:kahuna': '1978024846',
+        'timestamp': '1453573777',
+        'domain_busy:cifs': '318584',
+        'domain_busy:raid_exempt': '0',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor1',
+        'domain_busy:target': '3330956',
+        'domain_busy:nwk_exempt': '0',
+        'domain_busy:raid': '722235930',
+        'domain_busy:storage': '1498890708',
+        'instance-name': 'processor1',
+        'domain_busy:cluster': '0',
+        'domain_busy:wafl_xcleaner': '50122685',
+        'domain_busy:wafl_exempt': '1265921369',
+        'domain_busy:protocol': '0',
+        'domain_busy': '1039557880852,1978024846,1498890708,3734060289,'
+                       '722235930,0,3330956,0,318584,1265921369,50122685,'
+                       '36417362,0,0,0,0,2815252976,10274810484,393451',
+        'domain_busy:nwk_legacy': '2815252976',
+        'domain_busy:dnscache': '0',
+        'domain_busy:exempt': '3734060289',
+        'domain_busy:hostos': '10274810484',
+        'domain_busy:sm_exempt': '36417362',
+        'domain_busy:nwk_exclusive': '0',
+        'domain_busy:idle': '1039557880852',
+        'domain_busy:ssan_exempt': '393451',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor1',
+        'processor_elapsed_time': '1063283843321',
+        'instance-name': 'processor1',
+        'timestamp': '1453573777',
+    }
+]
+
+COUNTERS_T2 = [
+    {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'avg_processor_busy': '29081228905',
+        'instance-name': 'system',
+        'timestamp': '1453573834',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'cpu_elapsed_time': '1063340792148',
+        'instance-name': 'system',
+        'timestamp': '1453573834',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'cpu_elapsed_time1': '1063340792148',
+        'instance-name': 'system',
+        'timestamp': '1453573834',
+    }, {
+        'cp_phase_times:p2a_snap': '714',
+        'cp_phase_times:p4_finish': '14897',
+        'cp_phase_times:setup': '581',
+        'cp_phase_times:p2a_dlog1': '6019',
+        'cp_phase_times:p2a_dlog2': '2328',
+        'cp_phase_times:p2v_cont': '2479',
+        'cp_phase_times:p2v_volinfo': '1138',
+        'cp_phase_times:p2v_bm': '3484',
+        'cp_phase_times:p2v_fsinfo': '2031',
+        'cp_phase_times:p2a_inofile': '356',
+        'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,'
+                          '427,1058,354,3484,5135,1460,1138,2479,356,1373,'
+                          '6019,9,2328,2257,229,493,1275,0,6059,714,530215,'
+                          '21604863,0,0,3286,11076392,22001,14897,36',
+        'cp_phase_times:p2v_dlog2': '377',
+        'instance-name': 'wafl',
+        'cp_phase_times:p3_wait': '0',
+        'cp_phase_times:p2a_bm': '6059',
+        'cp_phase_times:p1_quota': '498',
+        'cp_phase_times:p2v_inofile': '839',
+        'cp_phase_times:p2a_refcount': '493',
+        'cp_phase_times:p2a_fsinfo': '2257',
+        'cp_phase_times:p2a_hyabc': '0',
+        'cp_phase_times:p2a_volinfo': '530215',
+        'cp_phase_times:pre_p0': '5007',
+        'cp_phase_times:p2a_hya': '9',
+        'cp_phase_times:p0_snap_del': '1840',
+        'cp_phase_times:p2a_ino': '1373',
+        'cp_phase_times:p2v_df_scores_sub': '354',
+        'cp_phase_times:p2v_ino_pub': '799',
+        'cp_phase_times:p2a_ipu_bitmap_grow': '229',
+        'cp_phase_times:p2v_refcount': '427',
+        'timestamp': '1453573834',
+        'cp_phase_times:p2v_dlog1': '0',
+        'cp_phase_times:p2_finish': '0',
+        'cp_phase_times:p1_clean': '9832',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:wafl',
+        'cp_phase_times:p3a_volinfo': '11076392',
+        'cp_phase_times:p2a_topaa': '1275',
+        'cp_phase_times:p2_flush': '21604863',
+        'cp_phase_times:p2v_df_scores': '1460',
+        'cp_phase_times:ipu_disk_add': '0',
+        'cp_phase_times:p2v_snap': '5135',
+        'cp_phase_times:p5_finish': '36',
+        'cp_phase_times:p2v_ino_pri': '1336',
+        'cp_phase_times:p3v_volinfo': '3286',
+        'cp_phase_times:p2v_topaa': '1058',
+        'cp_phase_times:p3_finish': '22001',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:wafl',
+        'total_cp_msecs': '33311106',
+        'instance-name': 'wafl',
+        'timestamp': '1453573834',
+    }, {
+        'domain_busy:kahuna': '2712629374',
+        'timestamp': '1453573834',
+        'domain_busy:cifs': '434036',
+        'domain_busy:raid_exempt': '28',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor0',
+        'domain_busy:target': '6461082',
+        'domain_busy:nwk_exempt': '20',
+        'domain_busy:raid': '722136824',
+        'domain_busy:storage': '2253260824',
+        'instance-name': 'processor0',
+        'domain_busy:cluster': '34',
+        'domain_busy:wafl_xcleaner': '51277506',
+        'domain_busy:wafl_exempt': '1243637154',
+        'domain_busy:protocol': '54',
+        'domain_busy': '1028906640232,2712629374,2253260824,5689093500,'
+                       '722136824,28,6461082,59,434036,1243637154,51277506,'
+                       '61240335,34,54,11,20,5254491236,13657992139,452215',
+        'domain_busy:nwk_legacy': '5254491236',
+        'domain_busy:dnscache': '59',
+        'domain_busy:exempt': '5689093500',
+        'domain_busy:hostos': '13657992139',
+        'domain_busy:sm_exempt': '61240335',
+        'domain_busy:nwk_exclusive': '11',
+        'domain_busy:idle': '1028906640232',
+        'domain_busy:ssan_exempt': '452215',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor0',
+        'processor_elapsed_time': '1063341351916',
+        'instance-name': 'processor0',
+        'timestamp': '1453573834',
+    }, {
+        'domain_busy:kahuna': '1978217049',
+        'timestamp': '1453573834',
+        'domain_busy:cifs': '318584',
+        'domain_busy:raid_exempt': '0',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor1',
+        'domain_busy:target': '3331147',
+        'domain_busy:nwk_exempt': '0',
+        'domain_busy:raid': '722276805',
+        'domain_busy:storage': '1498984059',
+        'instance-name': 'processor1',
+        'domain_busy:cluster': '0',
+        'domain_busy:wafl_xcleaner': '50126176',
+        'domain_busy:wafl_exempt': '1266039846',
+        'domain_busy:protocol': '0',
+        'domain_busy': '1039613222253,1978217049,1498984059,3734279672,'
+                       '722276805,0,3331147,0,318584,1266039846,50126176,'
+                       '36419297,0,0,0,0,2815435865,10276068104,393451',
+        'domain_busy:nwk_legacy': '2815435865',
+        'domain_busy:dnscache': '0',
+        'domain_busy:exempt': '3734279672',
+        'domain_busy:hostos': '10276068104',
+        'domain_busy:sm_exempt': '36419297',
+        'domain_busy:nwk_exclusive': '0',
+        'domain_busy:idle': '1039613222253',
+        'domain_busy:ssan_exempt': '393451',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor1',
+        'processor_elapsed_time': '1063341351919',
+        'instance-name': 'processor1',
+        'timestamp': '1453573834',
+    },
+]
+
+SYSTEM_INSTANCE_UUIDS = ['cluster1-01:kernel:system']
+SYSTEM_INSTANCE_NAMES = ['system']
+
+SYSTEM_COUNTERS = [
+    {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'avg_processor_busy': '27877641199',
+        'instance-name': 'system',
+        'timestamp': '1453524928',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'cpu_elapsed_time': '1014438541279',
+        'instance-name': 'system',
+        'timestamp': '1453524928',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:system',
+        'cpu_elapsed_time1': '1014438541279',
+        'instance-name': 'system',
+        'timestamp': '1453524928',
+    },
+]
+
+
+WAFL_INSTANCE_UUIDS = ['cluster1-01:kernel:wafl']
+WAFL_INSTANCE_NAMES = ['wafl']
+
+WAFL_COUNTERS = [
+    {
+        'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,'
+                          '418,1048,344,3344,4867,1397,1101,2380,356,1318,'
+                          '5954,9,2236,2190,228,476,1221,0,5838,696,515588,'
+                          '20542954,0,0,3122,10567367,20696,13982,36',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:wafl',
+        'instance-name': 'wafl',
+        'timestamp': '1453523339',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:wafl',
+        'total_cp_msecs': '31721222',
+        'instance-name': 'wafl',
+        'timestamp': '1453523339',
+    },
+]
+
+WAFL_CP_PHASE_TIMES_COUNTER_INFO = {
+    'labels': [
+        'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA',
+        'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI',
+        'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT',
+        'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP',
+        'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE',
+        'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO',
+        'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA',
+        'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH',
+        'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO',
+        'P3_FINISH', 'P4_FINISH', 'P5_FINISH',
+    ],
+    'name': 'cp_phase_times',
+}
+
+EXPANDED_WAFL_COUNTERS = [
+    {
+        'cp_phase_times:p2a_snap': '696',
+        'cp_phase_times:p4_finish': '13982',
+        'cp_phase_times:setup': '563',
+        'cp_phase_times:p2a_dlog1': '5954',
+        'cp_phase_times:p2a_dlog2': '2236',
+        'cp_phase_times:p2v_cont': '2380',
+        'cp_phase_times:p2v_volinfo': '1101',
+        'cp_phase_times:p2v_bm': '3344',
+        'cp_phase_times:p2v_fsinfo': '1937',
+        'cp_phase_times:p2a_inofile': '356',
+        'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,'
+                          '418,1048,344,3344,4867,1397,1101,2380,356,1318,'
+                          '5954,9,2236,2190,228,476,1221,0,5838,696,515588,'
+                          '20542954,0,0,3122,10567367,20696,13982,36',
+        'cp_phase_times:p2v_dlog2': '359',
+        'instance-name': 'wafl',
+        'cp_phase_times:p3_wait': '0',
+        'cp_phase_times:p2a_bm': '5838',
+        'cp_phase_times:p1_quota': '469',
+        'cp_phase_times:p2v_inofile': '821',
+        'cp_phase_times:p2a_refcount': '476',
+        'cp_phase_times:p2a_fsinfo': '2190',
+        'cp_phase_times:p2a_hyabc': '0',
+        'cp_phase_times:p2a_volinfo': '515588',
+        'cp_phase_times:pre_p0': '4844',
+        'cp_phase_times:p2a_hya': '9',
+        'cp_phase_times:p0_snap_del': '1731',
+        'cp_phase_times:p2a_ino': '1318',
+        'cp_phase_times:p2v_df_scores_sub': '344',
+        'cp_phase_times:p2v_ino_pub': '763',
+        'cp_phase_times:p2a_ipu_bitmap_grow': '228',
+        'cp_phase_times:p2v_refcount': '418',
+        'timestamp': '1453523339',
+        'cp_phase_times:p2v_dlog1': '0',
+        'cp_phase_times:p2_finish': '0',
+        'cp_phase_times:p1_clean': '9676',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:wafl',
+        'cp_phase_times:p3a_volinfo': '10567367',
+        'cp_phase_times:p2a_topaa': '1221',
+        'cp_phase_times:p2_flush': '20542954',
+        'cp_phase_times:p2v_df_scores': '1397',
+        'cp_phase_times:ipu_disk_add': '0',
+        'cp_phase_times:p2v_snap': '4867',
+        'cp_phase_times:p5_finish': '36',
+        'cp_phase_times:p2v_ino_pri': '1282',
+        'cp_phase_times:p3v_volinfo': '3122',
+        'cp_phase_times:p2v_topaa': '1048',
+        'cp_phase_times:p3_finish': '20696',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:wafl',
+        'total_cp_msecs': '31721222',
+        'instance-name': 'wafl',
+        'timestamp': '1453523339',
+    },
+]
+
+PROCESSOR_INSTANCE_UUIDS = [
+    'cluster1-01:kernel:processor0',
+    'cluster1-01:kernel:processor1',
+]
+PROCESSOR_INSTANCE_NAMES = ['processor0', 'processor1']
+
+PROCESSOR_COUNTERS = [
+    {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor0',
+        'domain_busy': '980648687811,2597164534,2155400686,5443901498,'
+                       '690280568,28,6180773,59,413895,1190100947,48989575,'
+                       '58549809,34,54,11,20,5024141791,13136260754,452215',
+        'instance-name': 'processor0',
+        'timestamp': '1453524150',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor0',
+        'processor_elapsed_time': '1013660714257',
+        'instance-name': 'processor0',
+        'timestamp': '1453524150',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor1',
+        'domain_busy': '990957980543,1891766637,1433411516,3572427934,'
+                       '691372324,0,3188648,0,305947,1211235777,47954620,'
+                       '34832715,0,0,0,0,2692084482,9834648927,393451',
+        'instance-name': 'processor1',
+        'timestamp': '1453524150',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor1',
+        'processor_elapsed_time': '1013660714261',
+        'instance-name': 'processor1',
+        'timestamp': '1453524150',
+    },
+]
+
+PROCESSOR_DOMAIN_BUSY_COUNTER_INFO = {
+    'labels': [
+        'idle', 'kahuna', 'storage', 'exempt', 'raid', 'raid_exempt',
+        'target', 'dnscache', 'cifs', 'wafl_exempt', 'wafl_xcleaner',
+        'sm_exempt', 'cluster', 'protocol', 'nwk_exclusive', 'nwk_exempt',
+        'nwk_legacy', 'hostOS', 'ssan_exempt',
+    ],
+    'name': 'domain_busy',
+}
+
+EXPANDED_PROCESSOR_COUNTERS = [
+    {
+        'domain_busy:kahuna': '2597164534',
+        'timestamp': '1453524150',
+        'domain_busy:cifs': '413895',
+        'domain_busy:raid_exempt': '28',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor0',
+        'domain_busy:target': '6180773',
+        'domain_busy:nwk_exempt': '20',
+        'domain_busy:raid': '690280568',
+        'domain_busy:storage': '2155400686',
+        'instance-name': 'processor0',
+        'domain_busy:cluster': '34',
+        'domain_busy:wafl_xcleaner': '48989575',
+        'domain_busy:wafl_exempt': '1190100947',
+        'domain_busy:protocol': '54',
+        'domain_busy': '980648687811,2597164534,2155400686,5443901498,'
+                       '690280568,28,6180773,59,413895,1190100947,48989575,'
+                       '58549809,34,54,11,20,5024141791,13136260754,452215',
+        'domain_busy:nwk_legacy': '5024141791',
+        'domain_busy:dnscache': '59',
+        'domain_busy:exempt': '5443901498',
+        'domain_busy:hostos': '13136260754',
+        'domain_busy:sm_exempt': '58549809',
+        'domain_busy:nwk_exclusive': '11',
+        'domain_busy:idle': '980648687811',
+        'domain_busy:ssan_exempt': '452215',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor0',
+        'processor_elapsed_time': '1013660714257',
+        'instance-name': 'processor0',
+        'timestamp': '1453524150',
+    }, {
+        'domain_busy:kahuna': '1891766637',
+        'timestamp': '1453524150',
+        'domain_busy:cifs': '305947',
+        'domain_busy:raid_exempt': '0',
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor1',
+        'domain_busy:target': '3188648',
+        'domain_busy:nwk_exempt': '0',
+        'domain_busy:raid': '691372324',
+        'domain_busy:storage': '1433411516',
+        'instance-name': 'processor1',
+        'domain_busy:cluster': '0',
+        'domain_busy:wafl_xcleaner': '47954620',
+        'domain_busy:wafl_exempt': '1211235777',
+        'domain_busy:protocol': '0',
+        'domain_busy': '990957980543,1891766637,1433411516,3572427934,'
+                       '691372324,0,3188648,0,305947,1211235777,47954620,'
+                       '34832715,0,0,0,0,2692084482,9834648927,393451',
+        'domain_busy:nwk_legacy': '2692084482',
+        'domain_busy:dnscache': '0',
+        'domain_busy:exempt': '3572427934',
+        'domain_busy:hostos': '9834648927',
+        'domain_busy:sm_exempt': '34832715',
+        'domain_busy:nwk_exclusive': '0',
+        'domain_busy:idle': '990957980543',
+        'domain_busy:ssan_exempt': '393451',
+    }, {
+        'node-name': 'cluster1-01',
+        'instance-uuid': 'cluster1-01:kernel:processor1',
+        'processor_elapsed_time': '1013660714261',
+        'instance-name': 'processor1',
+        'timestamp': '1453524150',
+    },
+]
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_7mode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_7mode.py
new file mode 100644 (file)
index 0000000..71a04d9
--- /dev/null
@@ -0,0 +1,254 @@
+# Copyright (c) 2016 Clinton Knight
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import ddt
+import mock
+
+from cinder import test
+from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \
+    import fakes as fake
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
+from cinder.volume.drivers.netapp.dataontap.performance import perf_base
+
+
+@ddt.ddt
+class Performance7modeLibraryTestCase(test.TestCase):
+
+    def setUp(self):
+        super(Performance7modeLibraryTestCase, self).setUp()
+
+        with mock.patch.object(perf_7mode.Performance7modeLibrary,
+                               '_init_counter_info'):
+            self.zapi_client = mock.Mock()
+            self.zapi_client.get_system_name.return_value = fake.NODE
+            self.perf_library = perf_7mode.Performance7modeLibrary(
+                self.zapi_client)
+            self.perf_library.system_object_name = 'system'
+            self.perf_library.avg_processor_busy_base_counter_name = (
+                'cpu_elapsed_time1')
+
+    def test_init_counter_info_not_supported(self):
+
+        self.zapi_client.features.SYSTEM_METRICS = False
+        mock_get_base_counter_name = self.mock_object(
+            self.perf_library, '_get_base_counter_name')
+
+        self.perf_library._init_counter_info()
+
+        self.assertIsNone(self.perf_library.system_object_name)
+        self.assertIsNone(
+            self.perf_library.avg_processor_busy_base_counter_name)
+        self.assertFalse(mock_get_base_counter_name.called)
+
+    def test_init_counter_info_api_error(self):
+
+        self.zapi_client.features.SYSTEM_METRICS = True
+        mock_get_base_counter_name = self.mock_object(
+            self.perf_library, '_get_base_counter_name',
+            mock.Mock(side_effect=netapp_api.NaApiError))
+
+        self.perf_library._init_counter_info()
+
+        self.assertEqual('system', self.perf_library.system_object_name)
+        self.assertEqual(
+            'cpu_elapsed_time1',
+            self.perf_library.avg_processor_busy_base_counter_name)
+        mock_get_base_counter_name.assert_called_once_with(
+            'system', 'avg_processor_busy')
+
+    def test_init_counter_info_system(self):
+
+        self.zapi_client.features.SYSTEM_METRICS = True
+        mock_get_base_counter_name = self.mock_object(
+            self.perf_library, '_get_base_counter_name',
+            mock.Mock(return_value='cpu_elapsed_time1'))
+
+        self.perf_library._init_counter_info()
+
+        self.assertEqual('system', self.perf_library.system_object_name)
+        self.assertEqual(
+            'cpu_elapsed_time1',
+            self.perf_library.avg_processor_busy_base_counter_name)
+        mock_get_base_counter_name.assert_called_once_with(
+            'system', 'avg_processor_busy')
+
+    def test_update_performance_cache(self):
+
+        self.perf_library.performance_counters = range(11, 21)
+
+        mock_get_node_utilization_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_counters',
+            mock.Mock(return_value=21))
+        mock_get_node_utilization = self.mock_object(
+            self.perf_library, '_get_node_utilization',
+            mock.Mock(return_value=25))
+
+        self.perf_library.update_performance_cache()
+
+        self.assertEqual(range(12, 22), self.perf_library.performance_counters)
+        self.assertEqual(25, self.perf_library.utilization)
+        mock_get_node_utilization_counters.assert_called_once_with()
+        mock_get_node_utilization.assert_called_once_with(12, 21, fake.NODE)
+
+    def test_update_performance_cache_first_pass(self):
+
+        mock_get_node_utilization_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_counters',
+            mock.Mock(return_value=11))
+        mock_get_node_utilization = self.mock_object(
+            self.perf_library, '_get_node_utilization',
+            mock.Mock(return_value=25))
+
+        self.perf_library.update_performance_cache()
+
+        self.assertEqual([11], self.perf_library.performance_counters)
+        mock_get_node_utilization_counters.assert_called_once_with()
+        self.assertFalse(mock_get_node_utilization.called)
+
+    def test_update_performance_cache_counters_unavailable(self):
+
+        self.perf_library.performance_counters = range(11, 21)
+        self.perf_library.utilization = 55.0
+
+        mock_get_node_utilization_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_counters',
+            mock.Mock(return_value=None))
+        mock_get_node_utilization = self.mock_object(
+            self.perf_library, '_get_node_utilization',
+            mock.Mock(return_value=25))
+
+        self.perf_library.update_performance_cache()
+
+        self.assertEqual(range(11, 21),
+                         self.perf_library.performance_counters)
+        self.assertEqual(55.0, self.perf_library.utilization)
+        mock_get_node_utilization_counters.assert_called_once_with()
+        self.assertFalse(mock_get_node_utilization.called)
+
+    def test_update_performance_cache_not_supported(self):
+
+        self.zapi_client.features.SYSTEM_METRICS = False
+        mock_get_node_utilization_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_counters')
+
+        self.perf_library.update_performance_cache()
+
+        self.assertEqual([], self.perf_library.performance_counters)
+        self.assertEqual(perf_base.DEFAULT_UTILIZATION,
+                         self.perf_library.utilization)
+        self.assertFalse(mock_get_node_utilization_counters.called)
+
+    def test_get_node_utilization(self):
+
+        self.perf_library.utilization = 47.1
+
+        result = self.perf_library.get_node_utilization()
+
+        self.assertEqual(47.1, result)
+
+    def test_get_node_utilization_counters(self):
+
+        mock_get_node_utilization_system_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_system_counters',
+            mock.Mock(return_value=['A', 'B', 'C']))
+        mock_get_node_utilization_wafl_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_wafl_counters',
+            mock.Mock(return_value=['D', 'E', 'F']))
+        mock_get_node_utilization_processor_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_processor_counters',
+            mock.Mock(return_value=['G', 'H', 'I']))
+
+        result = self.perf_library._get_node_utilization_counters()
+
+        expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
+        self.assertEqual(expected, result)
+
+        mock_get_node_utilization_system_counters.assert_called_once_with()
+        mock_get_node_utilization_wafl_counters.assert_called_once_with()
+        mock_get_node_utilization_processor_counters.assert_called_once_with()
+
+    def test_get_node_utilization_counters_api_error(self):
+
+        self.mock_object(self.perf_library,
+                         '_get_node_utilization_system_counters',
+                         mock.Mock(side_effect=netapp_api.NaApiError))
+
+        result = self.perf_library._get_node_utilization_counters()
+
+        self.assertIsNone(result)
+
+    def test_get_node_utilization_system_counters(self):
+
+        mock_get_performance_instance_names = self.mock_object(
+            self.zapi_client, 'get_performance_instance_names',
+            mock.Mock(return_value=fake.SYSTEM_INSTANCE_NAMES))
+        mock_get_performance_counters = self.mock_object(
+            self.zapi_client, 'get_performance_counters',
+            mock.Mock(return_value=fake.SYSTEM_COUNTERS))
+
+        result = self.perf_library._get_node_utilization_system_counters()
+
+        self.assertEqual(fake.SYSTEM_COUNTERS, result)
+
+        mock_get_performance_instance_names.assert_called_once_with('system')
+        mock_get_performance_counters.assert_called_once_with(
+            'system', fake.SYSTEM_INSTANCE_NAMES,
+            ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time'])
+
+    def test_get_node_utilization_wafl_counters(self):
+
+        mock_get_performance_instance_names = self.mock_object(
+            self.zapi_client, 'get_performance_instance_names',
+            mock.Mock(return_value=fake.WAFL_INSTANCE_NAMES))
+        mock_get_performance_counters = self.mock_object(
+            self.zapi_client, 'get_performance_counters',
+            mock.Mock(return_value=fake.WAFL_COUNTERS))
+        mock_get_performance_counter_info = self.mock_object(
+            self.zapi_client, 'get_performance_counter_info',
+            mock.Mock(return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO))
+
+        result = self.perf_library._get_node_utilization_wafl_counters()
+
+        self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result)
+
+        mock_get_performance_instance_names.assert_called_once_with('wafl')
+        mock_get_performance_counters.assert_called_once_with(
+            'wafl', fake.WAFL_INSTANCE_NAMES,
+            ['total_cp_msecs', 'cp_phase_times'])
+        mock_get_performance_counter_info.assert_called_once_with(
+            'wafl', 'cp_phase_times')
+
+    def test_get_node_utilization_processor_counters(self):
+
+        mock_get_performance_instance_names = self.mock_object(
+            self.zapi_client, 'get_performance_instance_names',
+            mock.Mock(return_value=fake.PROCESSOR_INSTANCE_NAMES))
+        mock_get_performance_counters = self.mock_object(
+            self.zapi_client, 'get_performance_counters',
+            mock.Mock(return_value=fake.PROCESSOR_COUNTERS))
+        self.mock_object(
+            self.zapi_client, 'get_performance_counter_info',
+            mock.Mock(return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO))
+
+        result = self.perf_library._get_node_utilization_processor_counters()
+
+        self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result)
+
+        mock_get_performance_instance_names.assert_called_once_with(
+            'processor')
+        mock_get_performance_counters.assert_called_once_with(
+            'processor', fake.PROCESSOR_INSTANCE_NAMES,
+            ['domain_busy', 'processor_elapsed_time'])
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py
new file mode 100644 (file)
index 0000000..a6efa60
--- /dev/null
@@ -0,0 +1,366 @@
+# Copyright (c) 2016 Clinton Knight
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import ddt
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \
+    import fakes as fake
+from cinder.volume.drivers.netapp.dataontap.performance import perf_base
+
+
+@ddt.ddt
+class PerformanceLibraryTestCase(test.TestCase):
+
+    def setUp(self):
+        super(PerformanceLibraryTestCase, self).setUp()
+
+        with mock.patch.object(perf_base.PerformanceLibrary,
+                               '_init_counter_info'):
+            self.zapi_client = mock.Mock()
+            self.perf_library = perf_base.PerformanceLibrary(self.zapi_client)
+            self.perf_library.system_object_name = 'system'
+            self.perf_library.avg_processor_busy_base_counter_name = (
+                'cpu_elapsed_time1')
+
+    def test_init(self):
+
+        mock_zapi_client = mock.Mock()
+        mock_init_counter_info = self.mock_object(
+            perf_base.PerformanceLibrary, '_init_counter_info')
+
+        library = perf_base.PerformanceLibrary(mock_zapi_client)
+
+        self.assertEqual(mock_zapi_client, library.zapi_client)
+        mock_init_counter_info.assert_called_once_with()
+
+    def test_init_counter_info(self):
+
+        self.perf_library._init_counter_info()
+
+        self.assertIsNone(self.perf_library.system_object_name)
+        self.assertIsNone(
+            self.perf_library.avg_processor_busy_base_counter_name)
+
+    def test_get_node_utilization_kahuna_overutilized(self):
+
+        mock_get_kahuna_utilization = self.mock_object(
+            self.perf_library, '_get_kahuna_utilization',
+            mock.Mock(return_value=61.0))
+        mock_get_average_cpu_utilization = self.mock_object(
+            self.perf_library, '_get_average_cpu_utilization',
+            mock.Mock(return_value=25.0))
+
+        result = self.perf_library._get_node_utilization('fake1',
+                                                         'fake2',
+                                                         'fake_node')
+
+        self.assertAlmostEqual(100.0, result)
+        mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2')
+        self.assertFalse(mock_get_average_cpu_utilization.called)
+
+    @ddt.data({'cpu': -0.01, 'cp_time': 10000, 'poll_time': 0},
+              {'cpu': 1.01, 'cp_time': 0, 'poll_time': 1000},
+              {'cpu': 0.50, 'cp_time': 0, 'poll_time': 0})
+    @ddt.unpack
+    def test_get_node_utilization_zero_time(self, cpu, cp_time, poll_time):
+
+        mock_get_kahuna_utilization = self.mock_object(
+            self.perf_library, '_get_kahuna_utilization',
+            mock.Mock(return_value=59.0))
+        mock_get_average_cpu_utilization = self.mock_object(
+            self.perf_library, '_get_average_cpu_utilization',
+            mock.Mock(return_value=cpu))
+        mock_get_total_consistency_point_time = self.mock_object(
+            self.perf_library, '_get_total_consistency_point_time',
+            mock.Mock(return_value=cp_time))
+        mock_get_consistency_point_p2_flush_time = self.mock_object(
+            self.perf_library, '_get_consistency_point_p2_flush_time',
+            mock.Mock(return_value=cp_time))
+        mock_get_total_time = self.mock_object(
+            self.perf_library, '_get_total_time',
+            mock.Mock(return_value=poll_time))
+        mock_get_adjusted_consistency_point_time = self.mock_object(
+            self.perf_library, '_get_adjusted_consistency_point_time')
+
+        result = self.perf_library._get_node_utilization('fake1',
+                                                         'fake2',
+                                                         'fake_node')
+
+        expected = max(min(100.0, 100.0 * cpu), 0)
+        self.assertEqual(expected, result)
+
+        mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2')
+        mock_get_average_cpu_utilization.assert_called_once_with('fake1',
+                                                                 'fake2')
+        mock_get_total_consistency_point_time.assert_called_once_with('fake1',
+                                                                      'fake2')
+        mock_get_consistency_point_p2_flush_time.assert_called_once_with(
+            'fake1', 'fake2')
+        mock_get_total_time.assert_called_once_with('fake1',
+                                                    'fake2',
+                                                    'total_cp_msecs')
+        self.assertFalse(mock_get_adjusted_consistency_point_time.called)
+
+    @ddt.data({'cpu': 0.75, 'adjusted_cp_time': 8000, 'expected': 80},
+              {'cpu': 0.80, 'adjusted_cp_time': 7500, 'expected': 80},
+              {'cpu': 0.50, 'adjusted_cp_time': 11000, 'expected': 100})
+    @ddt.unpack
+    def test_get_node_utilization(self, cpu, adjusted_cp_time, expected):
+
+        mock_get_kahuna_utilization = self.mock_object(
+            self.perf_library, '_get_kahuna_utilization',
+            mock.Mock(return_value=59.0))
+        mock_get_average_cpu_utilization = self.mock_object(
+            self.perf_library, '_get_average_cpu_utilization',
+            mock.Mock(return_value=cpu))
+        mock_get_total_consistency_point_time = self.mock_object(
+            self.perf_library, '_get_total_consistency_point_time',
+            mock.Mock(return_value=90.0))
+        mock_get_consistency_point_p2_flush_time = self.mock_object(
+            self.perf_library, '_get_consistency_point_p2_flush_time',
+            mock.Mock(return_value=50.0))
+        mock_get_total_time = self.mock_object(
+            self.perf_library, '_get_total_time',
+            mock.Mock(return_value=10000))
+        mock_get_adjusted_consistency_point_time = self.mock_object(
+            self.perf_library, '_get_adjusted_consistency_point_time',
+            mock.Mock(return_value=adjusted_cp_time))
+
+        result = self.perf_library._get_node_utilization('fake1',
+                                                         'fake2',
+                                                         'fake_node')
+
+        self.assertEqual(expected, result)
+
+        mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2')
+        mock_get_average_cpu_utilization.assert_called_once_with('fake1',
+                                                                 'fake2')
+        mock_get_total_consistency_point_time.assert_called_once_with('fake1',
+                                                                      'fake2')
+        mock_get_consistency_point_p2_flush_time.assert_called_once_with(
+            'fake1', 'fake2')
+        mock_get_total_time.assert_called_once_with('fake1',
+                                                    'fake2',
+                                                    'total_cp_msecs')
+        mock_get_adjusted_consistency_point_time.assert_called_once_with(
+            90.0, 50.0)
+
+    def test_get_node_utilization_calculation_error(self):
+
+        self.mock_object(self.perf_library,
+                         '_get_kahuna_utilization',
+                         mock.Mock(return_value=59.0))
+        self.mock_object(self.perf_library,
+                         '_get_average_cpu_utilization',
+                         mock.Mock(return_value=25.0))
+        self.mock_object(self.perf_library,
+                         '_get_total_consistency_point_time',
+                         mock.Mock(return_value=90.0))
+        self.mock_object(self.perf_library,
+                         '_get_consistency_point_p2_flush_time',
+                         mock.Mock(return_value=50.0))
+        self.mock_object(self.perf_library,
+                         '_get_total_time',
+                         mock.Mock(return_value=10000))
+        self.mock_object(self.perf_library,
+                         '_get_adjusted_consistency_point_time',
+                         mock.Mock(side_effect=ZeroDivisionError))
+
+        result = self.perf_library._get_node_utilization('fake1',
+                                                         'fake2',
+                                                         'fake_node')
+
+        self.assertEqual(perf_base.DEFAULT_UTILIZATION, result)
+
+    def test_get_kahuna_utilization(self):
+
+        mock_get_performance_counter = self.mock_object(
+            self.perf_library,
+            '_get_performance_counter_average_multi_instance',
+            mock.Mock(return_value=[0.2, 0.3]))
+
+        result = self.perf_library._get_kahuna_utilization('fake_t1',
+                                                           'fake_t2')
+
+        self.assertAlmostEqual(50.0, result)
+        mock_get_performance_counter.assert_called_once_with(
+            'fake_t1', 'fake_t2', 'domain_busy:kahuna',
+            'processor_elapsed_time')
+
+    def test_get_average_cpu_utilization(self):
+
+        mock_get_performance_counter_average = self.mock_object(
+            self.perf_library, '_get_performance_counter_average',
+            mock.Mock(return_value=0.45))
+
+        result = self.perf_library._get_average_cpu_utilization('fake_t1',
+                                                                'fake_t2')
+
+        self.assertAlmostEqual(0.45, result)
+        mock_get_performance_counter_average.assert_called_once_with(
+            'fake_t1', 'fake_t2', 'avg_processor_busy', 'cpu_elapsed_time1')
+
+    def test_get_total_consistency_point_time(self):
+
+        mock_get_performance_counter_delta = self.mock_object(
+            self.perf_library, '_get_performance_counter_delta',
+            mock.Mock(return_value=500))
+
+        result = self.perf_library._get_total_consistency_point_time(
+            'fake_t1', 'fake_t2')
+
+        self.assertEqual(500, result)
+        mock_get_performance_counter_delta.assert_called_once_with(
+            'fake_t1', 'fake_t2', 'total_cp_msecs')
+
+    def test_get_consistency_point_p2_flush_time(self):
+
+        mock_get_performance_counter_delta = self.mock_object(
+            self.perf_library, '_get_performance_counter_delta',
+            mock.Mock(return_value=500))
+
+        result = self.perf_library._get_consistency_point_p2_flush_time(
+            'fake_t1', 'fake_t2')
+
+        self.assertEqual(500, result)
+        mock_get_performance_counter_delta.assert_called_once_with(
+            'fake_t1', 'fake_t2', 'cp_phase_times:p2_flush')
+
+    def test_get_total_time(self):
+
+        mock_find_performance_counter_timestamp = self.mock_object(
+            self.perf_library, '_find_performance_counter_timestamp',
+            mock.Mock(side_effect=[100, 105]))
+
+        result = self.perf_library._get_total_time('fake_t1',
+                                                   'fake_t2',
+                                                   'fake_counter')
+
+        self.assertEqual(5000, result)
+        mock_find_performance_counter_timestamp.assert_has_calls([
+            mock.call('fake_t1', 'fake_counter'),
+            mock.call('fake_t2', 'fake_counter')])
+
+    def test_get_adjusted_consistency_point_time(self):
+
+        result = self.perf_library._get_adjusted_consistency_point_time(
+            500, 200)
+
+        self.assertAlmostEqual(250, result)
+
+    def test_get_performance_counter_delta(self):
+
+        result = self.perf_library._get_performance_counter_delta(
+            fake.COUNTERS_T1, fake.COUNTERS_T2, 'total_cp_msecs')
+
+        self.assertEqual(1482, result)
+
+    def test_get_performance_counter_average(self):
+
+        result = self.perf_library._get_performance_counter_average(
+            fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna',
+            'processor_elapsed_time', 'processor0')
+
+        self.assertAlmostEqual(0.00281954360981, result)
+
+    def test_get_performance_counter_average_multi_instance(self):
+
+        result = (
+            self.perf_library._get_performance_counter_average_multi_instance(
+                fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna',
+                'processor_elapsed_time'))
+
+        expected = [0.002819543609809441, 0.0033421611147606135]
+        self.assertAlmostEqual(expected, result)
+
+    def test_find_performance_counter_value(self):
+
+        result = self.perf_library._find_performance_counter_value(
+            fake.COUNTERS_T1, 'domain_busy:kahuna',
+            instance_name='processor0')
+
+        self.assertEqual('2712467226', result)
+
+    def test_find_performance_counter_value_not_found(self):
+
+        self.assertRaises(
+            exception.NotFound,
+            self.perf_library._find_performance_counter_value,
+            fake.COUNTERS_T1, 'invalid', instance_name='processor0')
+
+    def test_find_performance_counter_timestamp(self):
+
+        result = self.perf_library._find_performance_counter_timestamp(
+            fake.COUNTERS_T1, 'domain_busy')
+
+        self.assertEqual('1453573777', result)
+
+    def test_find_performance_counter_timestamp_not_found(self):
+
+        self.assertRaises(
+            exception.NotFound,
+            self.perf_library._find_performance_counter_timestamp,
+            fake.COUNTERS_T1, 'invalid', instance_name='processor0')
+
+    def test_expand_performance_array(self):
+
+        counter_info = {
+            'labels': ['idle', 'kahuna', 'storage', 'exempt'],
+            'name': 'domain_busy',
+        }
+        self.zapi_client.get_performance_counter_info = mock.Mock(
+            return_value=counter_info)
+
+        counter = {
+            'node-name': 'cluster1-01',
+            'instance-uuid': 'cluster1-01:kernel:processor0',
+            'domain_busy': '969142314286,2567571412,2131582146,5383861579',
+            'instance-name': 'processor0',
+            'timestamp': '1453512244',
+        }
+        self.perf_library._expand_performance_array('wafl',
+                                                    'domain_busy',
+                                                    counter)
+
+        modified_counter = {
+            'node-name': 'cluster1-01',
+            'instance-uuid': 'cluster1-01:kernel:processor0',
+            'domain_busy': '969142314286,2567571412,2131582146,5383861579',
+            'instance-name': 'processor0',
+            'timestamp': '1453512244',
+            'domain_busy:idle': '969142314286',
+            'domain_busy:kahuna': '2567571412',
+            'domain_busy:storage': '2131582146',
+            'domain_busy:exempt': '5383861579',
+        }
+        self.assertEqual(modified_counter, counter)
+
+    def test_get_base_counter_name(self):
+
+        counter_info = {
+            'base-counter': 'cpu_elapsed_time',
+            'labels': [],
+            'name': 'avg_processor_busy',
+        }
+        self.zapi_client.get_performance_counter_info = mock.Mock(
+            return_value=counter_info)
+
+        result = self.perf_library._get_base_counter_name(
+            'system:constituent', 'avg_processor_busy')
+
+        self.assertEqual('cpu_elapsed_time', result)
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py
new file mode 100644 (file)
index 0000000..1244676
--- /dev/null
@@ -0,0 +1,476 @@
+# Copyright (c) 2016 Clinton Knight
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import ddt
+import mock
+
+from cinder import test
+from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \
+    import fakes as fake
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.performance import perf_base
+from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
+
+
+@ddt.ddt
+class PerformanceCmodeLibraryTestCase(test.TestCase):
+
+    def setUp(self):
+        super(PerformanceCmodeLibraryTestCase, self).setUp()
+
+        with mock.patch.object(perf_cmode.PerformanceCmodeLibrary,
+                               '_init_counter_info'):
+            self.zapi_client = mock.Mock()
+            self.perf_library = perf_cmode.PerformanceCmodeLibrary(
+                self.zapi_client)
+            self.perf_library.system_object_name = 'system'
+            self.perf_library.avg_processor_busy_base_counter_name = (
+                'cpu_elapsed_time1')
+
+        self._set_up_fake_pools()
+
+    def _set_up_fake_pools(self):
+
+        class test_volume(object):
+            self.id = None
+            self.aggr = None
+
+        volume1 = test_volume()
+        volume1.id = {'name': 'pool1'}
+        volume1.aggr = {'name': 'aggr1'}
+        volume2 = test_volume()
+        volume2.id = {'name': 'pool2'}
+        volume2.aggr = {'name': 'aggr2'}
+        volume3 = test_volume()
+        volume3.id = {'name': 'pool3'}
+        volume3.aggr = {'name': 'aggr2'}
+        self.fake_volumes = [volume1, volume2, volume3]
+
+        self.fake_aggrs = set(['aggr1', 'aggr2', 'aggr3'])
+        self.fake_nodes = set(['node1', 'node2'])
+        self.fake_aggr_node_map = {
+            'aggr1': 'node1',
+            'aggr2': 'node2',
+            'aggr3': 'node2',
+        }
+
+    def test_init_counter_info_not_supported(self):
+
+        self.zapi_client.features.SYSTEM_METRICS = False
+        self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False
+        mock_get_base_counter_name = self.mock_object(
+            self.perf_library, '_get_base_counter_name')
+
+        self.perf_library._init_counter_info()
+
+        self.assertIsNone(self.perf_library.system_object_name)
+        self.assertIsNone(
+            self.perf_library.avg_processor_busy_base_counter_name)
+        self.assertFalse(mock_get_base_counter_name.called)
+
+    @ddt.data({
+        'system_constituent': False,
+        'base_counter': 'cpu_elapsed_time1',
+    }, {
+        'system_constituent': True,
+        'base_counter': 'cpu_elapsed_time',
+    })
+    @ddt.unpack
+    def test_init_counter_info_api_error(self, system_constituent,
+                                         base_counter):
+
+        self.zapi_client.features.SYSTEM_METRICS = True
+        self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = (
+            system_constituent)
+        self.mock_object(self.perf_library,
+                         '_get_base_counter_name',
+                         mock.Mock(side_effect=netapp_api.NaApiError))
+
+        self.perf_library._init_counter_info()
+
+        self.assertEqual(
+            base_counter,
+            self.perf_library.avg_processor_busy_base_counter_name)
+
+    def test_init_counter_info_system(self):
+
+        self.zapi_client.features.SYSTEM_METRICS = True
+        self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False
+        mock_get_base_counter_name = self.mock_object(
+            self.perf_library, '_get_base_counter_name',
+            mock.Mock(return_value='cpu_elapsed_time1'))
+
+        self.perf_library._init_counter_info()
+
+        self.assertEqual('system', self.perf_library.system_object_name)
+        self.assertEqual(
+            'cpu_elapsed_time1',
+            self.perf_library.avg_processor_busy_base_counter_name)
+        mock_get_base_counter_name.assert_called_once_with(
+            'system', 'avg_processor_busy')
+
+    def test_init_counter_info_system_constituent(self):
+
+        self.zapi_client.features.SYSTEM_METRICS = False
+        self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = True
+        mock_get_base_counter_name = self.mock_object(
+            self.perf_library, '_get_base_counter_name',
+            mock.Mock(return_value='cpu_elapsed_time'))
+
+        self.perf_library._init_counter_info()
+
+        self.assertEqual('system:constituent',
+                         self.perf_library.system_object_name)
+        self.assertEqual(
+            'cpu_elapsed_time',
+            self.perf_library.avg_processor_busy_base_counter_name)
+        mock_get_base_counter_name.assert_called_once_with(
+            'system:constituent', 'avg_processor_busy')
+
+    def test_update_performance_cache(self):
+
+        self.perf_library.performance_counters = {
+            'node1': range(11, 21),
+            'node2': range(21, 31),
+        }
+        mock_get_aggregates_for_pools = self.mock_object(
+            self.perf_library, '_get_aggregates_for_pools',
+            mock.Mock(return_value=self.fake_aggrs))
+        mock_get_nodes_for_aggregates = self.mock_object(
+            self.perf_library, '_get_nodes_for_aggregates',
+            mock.Mock(return_value=(self.fake_nodes,
+                                    self.fake_aggr_node_map)))
+        mock_get_node_utilization_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_counters',
+            mock.Mock(side_effect=[21, 31]))
+        mock_get_node_utilization = self.mock_object(
+            self.perf_library, '_get_node_utilization',
+            mock.Mock(side_effect=[25, 75]))
+
+        self.perf_library.update_performance_cache(self.fake_volumes)
+
+        expected_performance_counters = {
+            'node1': range(12, 22),
+            'node2': range(22, 32),
+        }
+        self.assertEqual(expected_performance_counters,
+                         self.perf_library.performance_counters)
+
+        expected_pool_utilization = {'pool1': 25, 'pool2': 75, 'pool3': 75}
+        self.assertEqual(expected_pool_utilization,
+                         self.perf_library.pool_utilization)
+
+        mock_get_aggregates_for_pools.assert_called_once_with(
+            self.fake_volumes)
+        mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs)
+        mock_get_node_utilization_counters.assert_has_calls([
+            mock.call('node1'), mock.call('node2')])
+        mock_get_node_utilization.assert_has_calls([
+            mock.call(12, 21, 'node1'), mock.call(22, 31, 'node2')])
+
+    def test_update_performance_cache_first_pass(self):
+
+        mock_get_aggregates_for_pools = self.mock_object(
+            self.perf_library, '_get_aggregates_for_pools',
+            mock.Mock(return_value=self.fake_aggrs))
+        mock_get_nodes_for_aggregates = self.mock_object(
+            self.perf_library, '_get_nodes_for_aggregates',
+            mock.Mock(return_value=(self.fake_nodes,
+                                    self.fake_aggr_node_map)))
+        mock_get_node_utilization_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_counters',
+            mock.Mock(side_effect=[11, 21]))
+        mock_get_node_utilization = self.mock_object(
+            self.perf_library, '_get_node_utilization',
+            mock.Mock(side_effect=[25, 75]))
+
+        self.perf_library.update_performance_cache(self.fake_volumes)
+
+        expected_performance_counters = {'node1': [11], 'node2': [21]}
+        self.assertEqual(expected_performance_counters,
+                         self.perf_library.performance_counters)
+
+        expected_pool_utilization = {
+            'pool1': perf_base.DEFAULT_UTILIZATION,
+            'pool2': perf_base.DEFAULT_UTILIZATION,
+            'pool3': perf_base.DEFAULT_UTILIZATION,
+        }
+        self.assertEqual(expected_pool_utilization,
+                         self.perf_library.pool_utilization)
+
+        mock_get_aggregates_for_pools.assert_called_once_with(
+            self.fake_volumes)
+        mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs)
+        mock_get_node_utilization_counters.assert_has_calls([
+            mock.call('node1'), mock.call('node2')])
+        self.assertFalse(mock_get_node_utilization.called)
+
+    def test_update_performance_cache_unknown_nodes(self):
+
+        self.perf_library.performance_counters = {
+            'node1': range(11, 21),
+            'node2': range(21, 31),
+        }
+        mock_get_aggregates_for_pools = self.mock_object(
+            self.perf_library, '_get_aggregates_for_pools',
+            mock.Mock(return_value=self.fake_aggrs))
+        mock_get_nodes_for_aggregates = self.mock_object(
+            self.perf_library, '_get_nodes_for_aggregates',
+            mock.Mock(return_value=(set(), {})))
+        mock_get_node_utilization_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_counters',
+            mock.Mock(side_effect=[11, 21]))
+        mock_get_node_utilization = self.mock_object(
+            self.perf_library, '_get_node_utilization',
+            mock.Mock(side_effect=[25, 75]))
+
+        self.perf_library.update_performance_cache(self.fake_volumes)
+
+        expected_performance_counters = {
+            'node1': range(11, 21),
+            'node2': range(21, 31),
+        }
+        self.assertEqual(expected_performance_counters,
+                         self.perf_library.performance_counters)
+
+        expected_pool_utilization = {
+            'pool1': perf_base.DEFAULT_UTILIZATION,
+            'pool2': perf_base.DEFAULT_UTILIZATION,
+            'pool3': perf_base.DEFAULT_UTILIZATION,
+        }
+        self.assertEqual(expected_pool_utilization,
+                         self.perf_library.pool_utilization)
+
+        mock_get_aggregates_for_pools.assert_called_once_with(
+            self.fake_volumes)
+        mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs)
+        self.assertFalse(mock_get_node_utilization_counters.called)
+        self.assertFalse(mock_get_node_utilization.called)
+
+    def test_update_performance_cache_counters_unavailable(self):
+
+        self.perf_library.performance_counters = {
+            'node1': range(11, 21),
+            'node2': range(21, 31),
+        }
+        mock_get_aggregates_for_pools = self.mock_object(
+            self.perf_library, '_get_aggregates_for_pools',
+            mock.Mock(return_value=self.fake_aggrs))
+        mock_get_nodes_for_aggregates = self.mock_object(
+            self.perf_library, '_get_nodes_for_aggregates',
+            mock.Mock(return_value=(self.fake_nodes,
+                                    self.fake_aggr_node_map)))
+        mock_get_node_utilization_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_counters',
+            mock.Mock(side_effect=[None, None]))
+        mock_get_node_utilization = self.mock_object(
+            self.perf_library, '_get_node_utilization',
+            mock.Mock(side_effect=[25, 75]))
+
+        self.perf_library.update_performance_cache(self.fake_volumes)
+
+        expected_performance_counters = {
+            'node1': range(11, 21),
+            'node2': range(21, 31),
+        }
+        self.assertEqual(expected_performance_counters,
+                         self.perf_library.performance_counters)
+
+        expected_pool_utilization = {
+            'pool1': perf_base.DEFAULT_UTILIZATION,
+            'pool2': perf_base.DEFAULT_UTILIZATION,
+            'pool3': perf_base.DEFAULT_UTILIZATION,
+        }
+        self.assertEqual(expected_pool_utilization,
+                         self.perf_library.pool_utilization)
+
+        mock_get_aggregates_for_pools.assert_called_once_with(
+            self.fake_volumes)
+        mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs)
+        mock_get_node_utilization_counters.assert_has_calls([
+            mock.call('node1'), mock.call('node2')])
+        self.assertFalse(mock_get_node_utilization.called)
+
+    def test_update_performance_cache_not_supported(self):
+
+        self.zapi_client.features.SYSTEM_METRICS = False
+        self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False
+
+        mock_get_aggregates_for_pools = self.mock_object(
+            self.perf_library, '_get_aggregates_for_pools')
+
+        self.perf_library.update_performance_cache(self.fake_volumes)
+
+        expected_performance_counters = {}
+        self.assertEqual(expected_performance_counters,
+                         self.perf_library.performance_counters)
+
+        expected_pool_utilization = {}
+        self.assertEqual(expected_pool_utilization,
+                         self.perf_library.pool_utilization)
+
+        self.assertFalse(mock_get_aggregates_for_pools.called)
+
+    @ddt.data({'pool': 'pool1', 'expected': 10.0},
+              {'pool': 'pool3', 'expected': perf_base.DEFAULT_UTILIZATION})
+    @ddt.unpack
+    def test_get_node_utilization_for_pool(self, pool, expected):
+
+        self.perf_library.pool_utilization = {'pool1': 10.0, 'pool2': 15.0}
+
+        result = self.perf_library.get_node_utilization_for_pool(pool)
+
+        self.assertAlmostEqual(expected, result)
+
+    def test_get_aggregates_for_pools(self):
+
+        class test_volume(object):
+            self.aggr = None
+
+        volume1 = test_volume()
+        volume1.aggr = {'name': 'aggr1'}
+        volume2 = test_volume()
+        volume2.aggr = {'name': 'aggr2'}
+        volume3 = test_volume()
+        volume3.aggr = {'name': 'aggr2'}
+        volumes = [volume1, volume2, volume3]
+
+        result = self.perf_library._get_aggregates_for_pools(volumes)
+
+        expected_aggregate_names = set(['aggr1', 'aggr2'])
+        self.assertEqual(expected_aggregate_names, result)
+
+    def test_get_nodes_for_aggregates(self):
+
+        aggregate_names = ['aggr1', 'aggr2', 'aggr3']
+        aggregate_nodes = ['node1', 'node2', 'node2']
+
+        mock_get_node_for_aggregate = self.mock_object(
+            self.zapi_client, 'get_node_for_aggregate',
+            mock.Mock(side_effect=aggregate_nodes))
+
+        result = self.perf_library._get_nodes_for_aggregates(aggregate_names)
+
+        self.assertEqual(2, len(result))
+        result_node_names, result_aggr_node_map = result
+
+        expected_node_names = set(['node1', 'node2'])
+        expected_aggr_node_map = dict(zip(aggregate_names, aggregate_nodes))
+        self.assertEqual(expected_node_names, result_node_names)
+        self.assertEqual(expected_aggr_node_map, result_aggr_node_map)
+        mock_get_node_for_aggregate.assert_has_calls([
+            mock.call('aggr1'), mock.call('aggr2'), mock.call('aggr3')])
+
+    def test_get_node_utilization_counters(self):
+
+        mock_get_node_utilization_system_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_system_counters',
+            mock.Mock(return_value=['A', 'B', 'C']))
+        mock_get_node_utilization_wafl_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_wafl_counters',
+            mock.Mock(return_value=['D', 'E', 'F']))
+        mock_get_node_utilization_processor_counters = self.mock_object(
+            self.perf_library, '_get_node_utilization_processor_counters',
+            mock.Mock(return_value=['G', 'H', 'I']))
+
+        result = self.perf_library._get_node_utilization_counters(fake.NODE)
+
+        expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
+        self.assertEqual(expected, result)
+
+        mock_get_node_utilization_system_counters.assert_called_once_with(
+            fake.NODE)
+        mock_get_node_utilization_wafl_counters.assert_called_once_with(
+            fake.NODE)
+        mock_get_node_utilization_processor_counters.assert_called_once_with(
+            fake.NODE)
+
+    def test_get_node_utilization_counters_api_error(self):
+
+        self.mock_object(self.perf_library,
+                         '_get_node_utilization_system_counters',
+                         mock.Mock(side_effect=netapp_api.NaApiError))
+
+        result = self.perf_library._get_node_utilization_counters(fake.NODE)
+
+        self.assertIsNone(result)
+
+    def test_get_node_utilization_system_counters(self):
+
+        mock_get_performance_instance_uuids = self.mock_object(
+            self.zapi_client, 'get_performance_instance_uuids',
+            mock.Mock(return_value=fake.SYSTEM_INSTANCE_UUIDS))
+        mock_get_performance_counters = self.mock_object(
+            self.zapi_client, 'get_performance_counters',
+            mock.Mock(return_value=fake.SYSTEM_COUNTERS))
+
+        result = self.perf_library._get_node_utilization_system_counters(
+            fake.NODE)
+
+        self.assertEqual(fake.SYSTEM_COUNTERS, result)
+
+        mock_get_performance_instance_uuids.assert_called_once_with(
+            'system', fake.NODE)
+        mock_get_performance_counters.assert_called_once_with(
+            'system', fake.SYSTEM_INSTANCE_UUIDS,
+            ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time'])
+
+    def test_get_node_utilization_wafl_counters(self):
+
+        mock_get_performance_instance_uuids = self.mock_object(
+            self.zapi_client, 'get_performance_instance_uuids',
+            mock.Mock(return_value=fake.WAFL_INSTANCE_UUIDS))
+        mock_get_performance_counters = self.mock_object(
+            self.zapi_client, 'get_performance_counters',
+            mock.Mock(return_value=fake.WAFL_COUNTERS))
+        mock_get_performance_counter_info = self.mock_object(
+            self.zapi_client, 'get_performance_counter_info',
+            mock.Mock(return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO))
+
+        result = self.perf_library._get_node_utilization_wafl_counters(
+            fake.NODE)
+
+        self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result)
+
+        mock_get_performance_instance_uuids.assert_called_once_with(
+            'wafl', fake.NODE)
+        mock_get_performance_counters.assert_called_once_with(
+            'wafl', fake.WAFL_INSTANCE_UUIDS,
+            ['total_cp_msecs', 'cp_phase_times'])
+        mock_get_performance_counter_info.assert_called_once_with(
+            'wafl', 'cp_phase_times')
+
+    def test_get_node_utilization_processor_counters(self):
+
+        mock_get_performance_instance_uuids = self.mock_object(
+            self.zapi_client, 'get_performance_instance_uuids',
+            mock.Mock(return_value=fake.PROCESSOR_INSTANCE_UUIDS))
+        mock_get_performance_counters = self.mock_object(
+            self.zapi_client, 'get_performance_counters',
+            mock.Mock(return_value=fake.PROCESSOR_COUNTERS))
+        self.mock_object(
+            self.zapi_client, 'get_performance_counter_info',
+            mock.Mock(return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO))
+
+        result = self.perf_library._get_node_utilization_processor_counters(
+            fake.NODE)
+
+        self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result)
+
+        mock_get_performance_instance_uuids.assert_called_once_with(
+            'processor', fake.NODE)
+        mock_get_performance_counters.assert_called_once_with(
+            'processor', fake.PROCESSOR_INSTANCE_UUIDS,
+            ['domain_busy', 'processor_elapsed_time'])
index cbda297d1530eac86c2b832b3639371b08853781..7f1bde789ddfe72e6086dd1ed6cd4ee56ed9fdcd 100644 (file)
@@ -33,6 +33,7 @@ from cinder.volume.drivers.netapp.dataontap import block_7mode
 from cinder.volume.drivers.netapp.dataontap import block_base
 from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
 from cinder.volume.drivers.netapp.dataontap.client import client_base
+from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
 from cinder.volume.drivers.netapp import utils as na_utils
 
 
@@ -49,6 +50,7 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
 
         self.library.zapi_client = mock.Mock()
         self.zapi_client = self.library.zapi_client
+        self.library.perf_library = mock.Mock()
         self.library.vfiler = mock.Mock()
         # Deprecated option
         self.library.configuration.netapp_volume_list = None
@@ -66,6 +68,7 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
         config.netapp_server_port = '80'
         return config
 
+    @mock.patch.object(perf_7mode, 'Performance7modeLibrary', mock.Mock())
     @mock.patch.object(client_base.Client, 'get_ontapi_version',
                        mock.MagicMock(return_value=(1, 20)))
     @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
@@ -510,10 +513,13 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
         self.library.vols = netapp_api.NaElement(
             client_fakes.VOLUME_LIST_INFO_RESPONSE).get_child_by_name(
             'volumes').get_children()
+        self.library.perf_library.get_node_utilization = (
+            mock.Mock(return_value=30.0))
 
         thick = netapp_lun_space_reservation == 'enabled'
 
-        result = self.library._get_pool_stats()
+        result = self.library._get_pool_stats(filter_function='filter',
+                                              goodness_function='goodness')
 
         expected = [{
             'pool_name': 'vol1',
@@ -524,7 +530,10 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
             'free_capacity_gb': 1339.27,
             'total_capacity_gb': 1342.21,
             'reserved_percentage': 5,
-            'max_over_subscription_ratio': 10.0
+            'max_over_subscription_ratio': 10.0,
+            'utilization': 30.0,
+            'filter_function': 'filter',
+            'goodness_function': 'goodness',
         }]
 
         self.assertEqual(expected, result)
@@ -598,8 +607,11 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
             fake.FAKE_7MODE_VOL1[0].get_child_content('name')
         ]
         self.library.root_volume_name = ''
+        self.library.perf_library.get_node_utilization = (
+            mock.Mock(return_value=30.0))
 
-        pools = self.library._get_pool_stats()
+        pools = self.library._get_pool_stats(filter_function='filter',
+                                             goodness_function='goodness')
 
         self.assertListEqual(fake.FAKE_7MODE_POOLS, pools)
 
index 6d67f2bccff5f9a9a3b4f3039a81f14da1b0cd52..3cf0855cb13112f8550d58019b071d6357ff3d23 100644 (file)
@@ -29,6 +29,7 @@ from cinder.volume.drivers.netapp.dataontap import block_base
 from cinder.volume.drivers.netapp.dataontap import block_cmode
 from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
 from cinder.volume.drivers.netapp.dataontap.client import client_base
+from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
 from cinder.volume.drivers.netapp.dataontap import ssc_cmode
 from cinder.volume.drivers.netapp import utils as na_utils
 
@@ -46,6 +47,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
 
         self.library.zapi_client = mock.Mock()
         self.zapi_client = self.library.zapi_client
+        self.library.perf_library = mock.Mock()
         self.library.vserver = mock.Mock()
         self.library.ssc_vols = None
         self.fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME,
@@ -73,6 +75,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
         config.netapp_vserver = 'openstack'
         return config
 
+    @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
     @mock.patch.object(client_base.Client, 'get_ontapi_version',
                        mock.MagicMock(return_value=(1, 20)))
     @mock.patch.object(na_utils, 'check_flags')
@@ -356,13 +359,16 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
         self.library.max_over_subscription_ratio = 10
         self.library.configuration.netapp_lun_space_reservation = (
             netapp_lun_space_reservation)
+        self.library.perf_library.get_node_utilization_for_pool = (
+            mock.Mock(return_value=30.0))
 
         netapp_thin = 'true' if thin else 'false'
         netapp_thick = 'false' if thin else 'true'
 
         thick = not thin and (netapp_lun_space_reservation == 'enabled')
 
-        result = self.library._get_pool_stats()
+        result = self.library._get_pool_stats(filter_function='filter',
+                                              goodness_function='goodness')
 
         expected = [{'pool_name': 'vola',
                      'netapp_unmirrored': 'true',
@@ -382,7 +388,10 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
                      'max_over_subscription_ratio': 10.0,
                      'netapp_raid_type': 'raiddp',
                      'netapp_disk_type': 'SSD',
-                     'netapp_nodedup': 'true'}]
+                     'netapp_nodedup': 'true',
+                     'utilization': 30.0,
+                     'filter_function': 'filter',
+                     'goodness_function': 'goodness'}]
 
         self.assertEqual(expected, result)
 
@@ -660,8 +669,11 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
         self.library.ssc_vols = fake.ssc_map
         self.mock_object(self.library, '_get_filtered_pools',
                          mock.Mock(return_value=[fake.FAKE_CMODE_VOL1]))
+        self.library.perf_library.get_node_utilization_for_pool = (
+            mock.Mock(return_value=30.0))
 
-        pools = self.library._get_pool_stats()
+        pools = self.library._get_pool_stats(filter_function='filter',
+                                             goodness_function='goodness')
 
         self.assertListEqual(fake.FAKE_CMODE_POOLS, pools)
 
index bb22feff5363bae3fba430e9bf5f75d309cc3c2b..c463d5b8d054467ff58b929c29ed8cbb08a19cea 100644 (file)
@@ -43,6 +43,7 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
                 self.driver._mounted_shares = [fake.NFS_SHARE]
                 self.driver.ssc_vols = True
                 self.driver.zapi_client = mock.Mock()
+                self.driver.perf_library = mock.Mock()
 
     def get_config_7mode(self):
         config = na_fakes.create_configuration_cmode()
@@ -77,8 +78,12 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
         self.mock_object(self.driver,
                          '_get_share_capacity_info',
                          mock.Mock(return_value=capacity))
+        self.mock_object(self.driver.perf_library,
+                         'get_node_utilization',
+                         mock.Mock(return_value=30.0))
 
-        result = self.driver._get_pool_stats()
+        result = self.driver._get_pool_stats(filter_function='filter',
+                                             goodness_function='goodness')
 
         expected = [{'pool_name': '192.168.99.24:/fake/export/path',
                      'QoS_support': False,
@@ -88,7 +93,10 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
                      'total_capacity_gb': 4468.0,
                      'reserved_percentage': 7,
                      'max_over_subscription_ratio': 19.0,
-                     'provisioned_capacity_gb': 4456.0}]
+                     'provisioned_capacity_gb': 4456.0,
+                     'utilization': 30.0,
+                     'filter_function': 'filter',
+                     'goodness_function': 'goodness'}]
 
         self.assertEqual(expected, result)
 
index 58d71c0f13bf6504bd13c43e61cf3b9d2de36f65..4f3d43b7917d9259c6e2f99b30bc9abddca79d8d 100644 (file)
@@ -31,6 +31,7 @@ from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
 from cinder.volume.drivers.netapp.dataontap.client import client_cmode
 from cinder.volume.drivers.netapp.dataontap import nfs_base
 from cinder.volume.drivers.netapp.dataontap import nfs_cmode
+from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
 from cinder.volume.drivers.netapp.dataontap import ssc_cmode
 from cinder.volume.drivers.netapp import utils as na_utils
 from cinder.volume.drivers import nfs
@@ -53,6 +54,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
                 self.driver.ssc_vols = True
                 self.driver.vserver = fake.VSERVER_NAME
                 self.driver.ssc_enabled = True
+                self.driver.perf_library = mock.Mock()
 
     def get_config_cmode(self):
         config = na_fakes.create_configuration_cmode()
@@ -65,6 +67,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
         config.netapp_vserver = fake.VSERVER_NAME
         return config
 
+    @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
     @mock.patch.object(client_cmode, 'Client', mock.Mock())
     @mock.patch.object(nfs.NfsDriver, 'do_setup')
     @mock.patch.object(na_utils, 'check_flags')
@@ -133,8 +136,11 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
         self.mock_object(self.driver,
                          '_get_share_capacity_info',
                          mock.Mock(return_value=capacity))
+        self.driver.perf_library.get_node_utilization_for_pool = (
+            mock.Mock(return_value=30.0))
 
-        result = self.driver._get_pool_stats()
+        result = self.driver._get_pool_stats(filter_function='filter',
+                                             goodness_function='goodness')
 
         expected = [{'pool_name': '192.168.99.24:/fake/export/path',
                      'netapp_unmirrored': 'true',
@@ -154,7 +160,10 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
                      'netapp_disk_type': 'SSD',
                      'netapp_nodedup': 'true',
                      'max_over_subscription_ratio': 19.0,
-                     'provisioned_capacity_gb': 4456.0}]
+                     'provisioned_capacity_gb': 4456.0,
+                     'utilization': 30.0,
+                     'filter_function': 'filter',
+                     'goodness_function': 'goodness'}]
 
         self.assertEqual(expected, result)
 
index 1fdbe77f972b849ab4059612673c7ab45e0d5a49..8012fe1ae19ebc75f3be7d475e2275b9bc633eb6 100644 (file)
@@ -35,6 +35,7 @@ from cinder import utils
 from cinder.volume import configuration
 from cinder.volume.drivers.netapp.dataontap import block_base
 from cinder.volume.drivers.netapp.dataontap.client import client_7mode
+from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
 from cinder.volume.drivers.netapp import options as na_opts
 from cinder.volume.drivers.netapp import utils as na_utils
 
@@ -76,6 +77,8 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
         self.vol_refresh_running = False
         self.vol_refresh_voluntary = False
         self.root_volume_name = self._get_root_volume_name()
+        self.perf_library = perf_7mode.Performance7modeLibrary(
+            self.zapi_client)
 
     def _do_partner_setup(self):
         partner_backend = self.configuration.netapp_partner_backend_name
@@ -238,7 +241,8 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
             wwpns.extend(self.partner_zapi_client.get_fc_target_wwpns())
         return wwpns
 
-    def _update_volume_stats(self):
+    def _update_volume_stats(self, filter_function=None,
+                             goodness_function=None):
         """Retrieve stats info from filer."""
 
         # ensure we get current data
@@ -252,17 +256,20 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
         data['vendor_name'] = 'NetApp'
         data['driver_version'] = self.VERSION
         data['storage_protocol'] = self.driver_protocol
-        data['pools'] = self._get_pool_stats()
+        data['pools'] = self._get_pool_stats(
+            filter_function=filter_function,
+            goodness_function=goodness_function)
         data['sparse_copy_volume'] = True
 
         self.zapi_client.provide_ems(self, self.driver_name, self.app_version,
                                      server_type=self.driver_mode)
         self._stats = data
 
-    def _get_pool_stats(self):
+    def _get_pool_stats(self, filter_function=None, goodness_function=None):
         """Retrieve pool (i.e. Data ONTAP volume) stats info from volumes."""
 
         pools = []
+        self.perf_library.update_performance_cache()
 
         for vol in self.vols:
 
@@ -310,6 +317,11 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
             pool['thick_provisioning_support'] = thick
             pool['thin_provisioning_support'] = not thick
 
+            utilization = self.perf_library.get_node_utilization()
+            pool['utilization'] = na_utils.round_down(utilization, '0.01')
+            pool['filter_function'] = filter_function
+            pool['goodness_function'] = goodness_function
+
             pools.append(pool)
 
         return pools
index cee7a980f864ff68cc878ec5d5f4c1119eb89324..cf85ddc79ff98a192e9e0ab8cac863b225d3f489 100644 (file)
@@ -84,6 +84,8 @@ class NetAppBlockStorageLibrary(object):
                                  'xen', 'hyper_v']
     DEFAULT_LUN_OS = 'linux'
     DEFAULT_HOST_TYPE = 'linux'
+    DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
+    DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
 
     def __init__(self, driver_name, driver_protocol, **kwargs):
 
@@ -470,20 +472,30 @@ class NetAppBlockStorageLibrary(object):
     def _get_fc_target_wwpns(self, include_partner=True):
         raise NotImplementedError()
 
-    def get_volume_stats(self, refresh=False):
+    def get_volume_stats(self, refresh=False, filter_function=None,
+                         goodness_function=None):
         """Get volume stats.
 
-        If 'refresh' is True, run update the stats first.
+        If 'refresh' is True, update the stats first.
         """
 
         if refresh:
-            self._update_volume_stats()
-
+            self._update_volume_stats(filter_function=filter_function,
+                                      goodness_function=goodness_function)
         return self._stats
 
-    def _update_volume_stats(self):
+    def _update_volume_stats(self, filter_function=None,
+                             goodness_function=None):
         raise NotImplementedError()
 
+    def get_default_filter_function(self):
+        """Get the default filter_function string."""
+        return self.DEFAULT_FILTER_FUNCTION
+
+    def get_default_goodness_function(self):
+        """Get the default goodness_function string."""
+        return self.DEFAULT_GOODNESS_FUNCTION
+
     def extend_volume(self, volume, new_size):
         """Driver entry point to increase the size of a volume."""
 
index b8589ffb675c6173549fc59a8421c263f5c2abbb..0e59b0cbbd954aefb96ea4d123c1a2bc4374f5b5 100644 (file)
@@ -35,6 +35,7 @@ from cinder.i18n import _
 from cinder import utils
 from cinder.volume.drivers.netapp.dataontap import block_base
 from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
 from cinder.volume.drivers.netapp.dataontap import ssc_cmode
 from cinder.volume.drivers.netapp import options as na_opts
 from cinder.volume.drivers.netapp import utils as na_utils
@@ -73,6 +74,8 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
 
         self.ssc_vols = {}
         self.stale_vols = set()
+        self.perf_library = perf_cmode.PerformanceCmodeLibrary(
+            self.zapi_client)
 
     def check_for_setup_error(self):
         """Check that the driver is working and can communicate."""
@@ -176,7 +179,8 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
         else:
             self.zapi_client.set_vserver(None)
 
-    def _update_volume_stats(self):
+    def _update_volume_stats(self, filter_function=None,
+                             goodness_function=None):
         """Retrieve stats info from vserver."""
 
         sync = True if self.ssc_vols is None else False
@@ -190,13 +194,15 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
         data['vendor_name'] = 'NetApp'
         data['driver_version'] = self.VERSION
         data['storage_protocol'] = self.driver_protocol
-        data['pools'] = self._get_pool_stats()
+        data['pools'] = self._get_pool_stats(
+            filter_function=filter_function,
+            goodness_function=goodness_function)
         data['sparse_copy_volume'] = True
 
         self.zapi_client.provide_ems(self, self.driver_name, self.app_version)
         self._stats = data
 
-    def _get_pool_stats(self):
+    def _get_pool_stats(self, filter_function=None, goodness_function=None):
         """Retrieve pool (Data ONTAP volume) stats info from SSC volumes."""
 
         pools = []
@@ -204,9 +210,14 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
         if not self.ssc_vols:
             return pools
 
-        for vol in self._get_filtered_pools():
+        filtered_pools = self._get_filtered_pools()
+        self.perf_library.update_performance_cache(filtered_pools)
+
+        for vol in filtered_pools:
+            pool_name = vol.id['name']
+
             pool = dict()
-            pool['pool_name'] = vol.id['name']
+            pool['pool_name'] = pool_name
             pool['QoS_support'] = True
             pool['reserved_percentage'] = (
                 self.reserved_percentage)
@@ -250,6 +261,12 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
             pool['thick_provisioning_support'] = thick
             pool['thin_provisioning_support'] = not thick
 
+            utilization = self.perf_library.get_node_utilization_for_pool(
+                pool_name)
+            pool['utilization'] = na_utils.round_down(utilization, '0.01')
+            pool['filter_function'] = filter_function
+            pool['goodness_function'] = goodness_function
+
             pools.append(pool)
 
         return pools
index b4b77ada3a32bbfb912a61900f6378b123cdc8d3..e8847bda4650b9307b35c8597f486cdbb84b9967 100644 (file)
@@ -34,6 +34,7 @@ from cinder import utils
 
 LOG = logging.getLogger(__name__)
 
+EAPINOTFOUND = '13005'
 ESIS_CLONE_NOT_LICENSED = '14956'
 
 
index 55d2471c7b5fd4b900c371366f7b50cb89c8f5e9..f57c093ad4a6b59fedd64c06c9516431161ae204 100644 (file)
@@ -43,6 +43,15 @@ class Client(client_base.Client):
         self.connection.set_api_version(major, minor)
 
         self.volume_list = volume_list
+        self._init_features()
+
+    def _init_features(self):
+        super(Client, self)._init_features()
+
+        ontapi_version = self.get_ontapi_version()   # major, minor
+
+        ontapi_1_20 = ontapi_version >= (1, 20)
+        self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_20)
 
     def _invoke_vfiler_api(self, na_element, vfiler):
         server = copy.copy(self.connection)
@@ -417,3 +426,76 @@ class Client(client_base.Client):
             flexvol_info.get_child_content('size-available'))
 
         return total_bytes, available_bytes
+
+    def get_performance_instance_names(self, object_name):
+        """Get names of performance instances for a node."""
+
+        api_args = {'objectname': object_name}
+
+        result = self.send_request('perf-object-instance-list-info',
+                                   api_args,
+                                   enable_tunneling=False)
+
+        instance_names = []
+
+        instances = result.get_child_by_name(
+            'instances') or netapp_api.NaElement('None')
+
+        for instance_info in instances.get_children():
+            instance_names.append(instance_info.get_child_content('name'))
+
+        return instance_names
+
+    def get_performance_counters(self, object_name, instance_names,
+                                 counter_names):
+        """Gets or or more 7-mode Data ONTAP performance counters."""
+
+        api_args = {
+            'objectname': object_name,
+            'instances': [
+                {'instance': instance} for instance in instance_names
+            ],
+            'counters': [
+                {'counter': counter} for counter in counter_names
+            ],
+        }
+
+        result = self.send_request('perf-object-get-instances',
+                                   api_args,
+                                   enable_tunneling=False)
+
+        counter_data = []
+
+        timestamp = result.get_child_content('timestamp')
+
+        instances = result.get_child_by_name(
+            'instances') or netapp_api.NaElement('None')
+        for instance in instances.get_children():
+
+            instance_name = instance.get_child_content('name')
+
+            counters = instance.get_child_by_name(
+                'counters') or netapp_api.NaElement('None')
+            for counter in counters.get_children():
+
+                counter_name = counter.get_child_content('name')
+                counter_value = counter.get_child_content('value')
+
+                counter_data.append({
+                    'instance-name': instance_name,
+                    'timestamp': timestamp,
+                    counter_name: counter_value,
+                })
+
+        return counter_data
+
+    def get_system_name(self):
+        """Get the name of the 7-mode Data ONTAP controller."""
+
+        result = self.send_request('system-get-info',
+                                   {},
+                                   enable_tunneling=False)
+
+        system_info = result.get_child_by_name('system-info')
+        system_name = system_info.get_child_content('system-name')
+        return system_name
index 1e9e2c64fccaa6b89bab477d7765fb74105e7a2e..eaead99300f9d6a686e2ffcb388205cd55a18ac9 100644 (file)
@@ -24,7 +24,8 @@ from oslo_utils import timeutils
 
 import six
 
-from cinder.i18n import _LE, _LW, _LI
+from cinder import exception
+from cinder.i18n import _, _LE, _LW, _LI
 from cinder import utils
 from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
 from cinder.volume.drivers.netapp import utils as na_utils
@@ -258,6 +259,36 @@ class Client(object):
         """Retrieves LUNs with specified args."""
         raise NotImplementedError()
 
+    def get_performance_counter_info(self, object_name, counter_name):
+        """Gets info about one or more Data ONTAP performance counters."""
+
+        api_args = {'objectname': object_name}
+        result = self.send_request('perf-object-counter-list-info',
+                                   api_args,
+                                   enable_tunneling=False)
+
+        counters = result.get_child_by_name(
+            'counters') or netapp_api.NaElement('None')
+
+        for counter in counters.get_children():
+
+            if counter.get_child_content('name') == counter_name:
+
+                labels = []
+                label_list = counter.get_child_by_name(
+                    'labels') or netapp_api.NaElement('None')
+                for label in label_list.get_children():
+                    labels.extend(label.get_content().split(','))
+                base_counter = counter.get_child_content('base-counter')
+
+                return {
+                    'name': counter_name,
+                    'labels': labels,
+                    'base-counter': base_counter,
+                }
+        else:
+            raise exception.NotFound(_('Counter %s not found') % counter_name)
+
     def provide_ems(self, requester, netapp_backend, app_version,
                     server_type="cluster"):
         """Provide ems with volume stats for the requester.
index a6672d0e190e5bd4340d2675acaf1df675309970..2b1b41d2bc926504b21efe37164d0b207f6c1de1 100644 (file)
@@ -52,8 +52,12 @@ class Client(client_base.Client):
 
         ontapi_version = self.get_ontapi_version()   # major, minor
 
+        ontapi_1_2x = (1, 20) <= ontapi_version < (1, 30)
         ontapi_1_30 = ontapi_version >= (1, 30)
+        self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x)
         self.features.add_feature('FAST_CLONE_DELETE', supported=ontapi_1_30)
+        self.features.add_feature('SYSTEM_CONSTITUENT_METRICS',
+                                  supported=ontapi_1_30)
 
     def _invoke_vserver_api(self, na_element, vserver):
         server = copy.copy(self.connection)
@@ -669,3 +673,134 @@ class Client(client_base.Client):
         if self.features.FAST_CLONE_DELETE:
             api_args['is-clone-file'] = 'true'
         self.send_request('file-delete-file', api_args, True)
+
+    def _get_aggregates(self, aggregate_names=None, desired_attributes=None):
+
+        query = {
+            'aggr-attributes': {
+                'aggregate-name': '|'.join(aggregate_names),
+            }
+        } if aggregate_names else None
+
+        api_args = {}
+        if query:
+            api_args['query'] = query
+        if desired_attributes:
+            api_args['desired-attributes'] = desired_attributes
+
+        result = self.send_request('aggr-get-iter',
+                                   api_args,
+                                   enable_tunneling=False)
+        if not self._has_records(result):
+            return []
+        else:
+            return result.get_child_by_name('attributes-list').get_children()
+
+    def get_node_for_aggregate(self, aggregate_name):
+        """Get home node for the specified aggregate.
+
+        This API could return None, most notably if it was sent
+        to a Vserver LIF, so the caller must be able to handle that case.
+        """
+
+        if not aggregate_name:
+            return None
+
+        desired_attributes = {
+            'aggr-attributes': {
+                'aggregate-name': None,
+                'aggr-ownership-attributes': {
+                    'home-name': None,
+                },
+            },
+        }
+
+        try:
+            aggrs = self._get_aggregates(aggregate_names=[aggregate_name],
+                                         desired_attributes=desired_attributes)
+        except netapp_api.NaApiError as e:
+            if e.code == netapp_api.EAPINOTFOUND:
+                return None
+            else:
+                raise e
+
+        if len(aggrs) < 1:
+            return None
+
+        aggr_ownership_attrs = aggrs[0].get_child_by_name(
+            'aggr-ownership-attributes') or netapp_api.NaElement('none')
+        return aggr_ownership_attrs.get_child_content('home-name')
+
+    def get_performance_instance_uuids(self, object_name, node_name):
+        """Get UUIDs of performance instances for a cluster node."""
+
+        api_args = {
+            'objectname': object_name,
+            'query': {
+                'instance-info': {
+                    'uuid': node_name + ':*',
+                }
+            }
+        }
+
+        result = self.send_request('perf-object-instance-list-info-iter',
+                                   api_args,
+                                   enable_tunneling=False)
+
+        uuids = []
+
+        instances = result.get_child_by_name(
+            'attributes-list') or netapp_api.NaElement('None')
+
+        for instance_info in instances.get_children():
+            uuids.append(instance_info.get_child_content('uuid'))
+
+        return uuids
+
+    def get_performance_counters(self, object_name, instance_uuids,
+                                 counter_names):
+        """Gets or or more cDOT performance counters."""
+
+        api_args = {
+            'objectname': object_name,
+            'instance-uuids': [
+                {'instance-uuid': instance_uuid}
+                for instance_uuid in instance_uuids
+            ],
+            'counters': [
+                {'counter': counter} for counter in counter_names
+            ],
+        }
+
+        result = self.send_request('perf-object-get-instances',
+                                   api_args,
+                                   enable_tunneling=False)
+
+        counter_data = []
+
+        timestamp = result.get_child_content('timestamp')
+
+        instances = result.get_child_by_name(
+            'instances') or netapp_api.NaElement('None')
+        for instance in instances.get_children():
+
+            instance_name = instance.get_child_content('name')
+            instance_uuid = instance.get_child_content('uuid')
+            node_name = instance_uuid.split(':')[0]
+
+            counters = instance.get_child_by_name(
+                'counters') or netapp_api.NaElement('None')
+            for counter in counters.get_children():
+
+                counter_name = counter.get_child_content('name')
+                counter_value = counter.get_child_content('value')
+
+                counter_data.append({
+                    'instance-name': instance_name,
+                    'instance-uuid': instance_uuid,
+                    'node-name': node_name,
+                    'timestamp': timestamp,
+                    counter_name: counter_value,
+                })
+
+        return counter_data
index 2646eef4acf38ec6a525e8c813d12ce709149531..9efe27c6922571bdce8147b1701b5f6ecc16f4bd 100644 (file)
@@ -64,7 +64,15 @@ class NetApp7modeFibreChannelDriver(driver.BaseVD,
         self.library.delete_snapshot(snapshot)
 
     def get_volume_stats(self, refresh=False):
-        return self.library.get_volume_stats(refresh)
+        return self.library.get_volume_stats(refresh,
+                                             self.get_filter_function(),
+                                             self.get_goodness_function())
+
+    def get_default_filter_function(self):
+        return self.library.get_default_filter_function()
+
+    def get_default_goodness_function(self):
+        return self.library.get_default_goodness_function()
 
     def extend_volume(self, volume, new_size):
         self.library.extend_volume(volume, new_size)
index 7c6e83ab7038d214651b91928582489d40269015..391fff1585a7b720bc2e845b07c2227781b21adf 100644 (file)
@@ -64,7 +64,15 @@ class NetAppCmodeFibreChannelDriver(driver.BaseVD,
         self.library.delete_snapshot(snapshot)
 
     def get_volume_stats(self, refresh=False):
-        return self.library.get_volume_stats(refresh)
+        return self.library.get_volume_stats(refresh,
+                                             self.get_filter_function(),
+                                             self.get_goodness_function())
+
+    def get_default_filter_function(self):
+        return self.library.get_default_filter_function()
+
+    def get_default_goodness_function(self):
+        return self.library.get_default_goodness_function()
 
     def extend_volume(self, volume, new_size):
         self.library.extend_volume(volume, new_size)
index bf47ea1a17c498cfe8dbff7b84d09994a25cd6b4..aa32886493987fd17fa97432c81c10753631aeaf 100644 (file)
@@ -63,7 +63,15 @@ class NetApp7modeISCSIDriver(driver.BaseVD,
         self.library.delete_snapshot(snapshot)
 
     def get_volume_stats(self, refresh=False):
-        return self.library.get_volume_stats(refresh)
+        return self.library.get_volume_stats(refresh,
+                                             self.get_filter_function(),
+                                             self.get_goodness_function())
+
+    def get_default_filter_function(self):
+        return self.library.get_default_filter_function()
+
+    def get_default_goodness_function(self):
+        return self.library.get_default_goodness_function()
 
     def extend_volume(self, volume, new_size):
         self.library.extend_volume(volume, new_size)
index 53036b952d72ccd9b04ac8f3bda0609458061c07..9ce71ad7fb3c91e99f471b7d80ff73c0de87bf8b 100644 (file)
@@ -63,7 +63,15 @@ class NetAppCmodeISCSIDriver(driver.BaseVD,
         self.library.delete_snapshot(snapshot)
 
     def get_volume_stats(self, refresh=False):
-        return self.library.get_volume_stats(refresh)
+        return self.library.get_volume_stats(refresh,
+                                             self.get_filter_function(),
+                                             self.get_goodness_function())
+
+    def get_default_filter_function(self):
+        return self.library.get_default_filter_function()
+
+    def get_default_goodness_function(self):
+        return self.library.get_default_goodness_function()
 
     def extend_volume(self, volume, new_size):
         self.library.extend_volume(volume, new_size)
index 80027472fd19d3b38ab38a39082a0ca5e2234db4..305fb5dacab2ff50502b5bee2271842d169591c5 100644 (file)
@@ -31,6 +31,7 @@ from cinder.i18n import _
 from cinder import utils
 from cinder.volume.drivers.netapp.dataontap.client import client_7mode
 from cinder.volume.drivers.netapp.dataontap import nfs_base
+from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
 from cinder.volume.drivers.netapp import options as na_opts
 from cinder.volume.drivers.netapp import utils as na_utils
 
@@ -59,6 +60,8 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
             vfiler=self.configuration.netapp_vfiler)
 
         self.ssc_enabled = False
+        self.perf_library = perf_7mode.Performance7modeLibrary(
+            self.zapi_client)
 
     def check_for_setup_error(self):
         """Checks if setup occurred properly."""
@@ -97,7 +100,9 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
         data['vendor_name'] = 'NetApp'
         data['driver_version'] = self.VERSION
         data['storage_protocol'] = 'nfs'
-        data['pools'] = self._get_pool_stats()
+        data['pools'] = self._get_pool_stats(
+            filter_function=self.get_filter_function(),
+            goodness_function=self.get_goodness_function())
         data['sparse_copy_volume'] = True
 
         self._spawn_clean_cache_job()
@@ -105,10 +110,11 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
                                      server_type="7mode")
         self._stats = data
 
-    def _get_pool_stats(self):
+    def _get_pool_stats(self, filter_function=None, goodness_function=None):
         """Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
 
         pools = []
+        self.perf_library.update_performance_cache()
 
         for nfs_share in self._mounted_shares:
 
@@ -123,6 +129,11 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
             pool['thick_provisioning_support'] = thick
             pool['thin_provisioning_support'] = not thick
 
+            utilization = self.perf_library.get_node_utilization()
+            pool['utilization'] = na_utils.round_down(utilization, '0.01')
+            pool['filter_function'] = filter_function
+            pool['goodness_function'] = goodness_function
+
             pools.append(pool)
 
         return pools
index cd90782abba2900fda9ac2cb732a83b7d144358c..6ffad932e48b51ee26a2c91a5d451434d7d6c488 100644 (file)
@@ -62,6 +62,8 @@ class NetAppNfsDriver(driver.ManageableVD,
     VERSION = "1.0.0"
     REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
                       'netapp_server_hostname']
+    DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
+    DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
 
     def __init__(self, *args, **kwargs):
         na_utils.validate_instantiation(**kwargs)
@@ -295,6 +297,14 @@ class NetAppNfsDriver(driver.ManageableVD,
         """Retrieve stats info from volume group."""
         raise NotImplementedError()
 
+    def get_default_filter_function(self):
+        """Get the default filter_function string."""
+        return self.DEFAULT_FILTER_FUNCTION
+
+    def get_default_goodness_function(self):
+        """Get the default goodness_function string."""
+        return self.DEFAULT_GOODNESS_FUNCTION
+
     def copy_image_to_volume(self, context, volume, image_service, image_id):
         """Fetch the image from image_service and write it to the volume."""
         super(NetAppNfsDriver, self).copy_image_to_volume(
index 91f6fddf5394fad9f36cdae3c659adb7633c8129..4f46fe4c5c7ba56d172906f43b993078ec2dd609 100644 (file)
@@ -35,6 +35,7 @@ from cinder.image import image_utils
 from cinder import utils
 from cinder.volume.drivers.netapp.dataontap.client import client_cmode
 from cinder.volume.drivers.netapp.dataontap import nfs_base
+from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
 from cinder.volume.drivers.netapp.dataontap import ssc_cmode
 from cinder.volume.drivers.netapp import options as na_opts
 from cinder.volume.drivers.netapp import utils as na_utils
@@ -73,6 +74,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
         self.ssc_enabled = True
         self.ssc_vols = None
         self.stale_vols = set()
+        self.perf_library = perf_cmode.PerformanceCmodeLibrary(
+            self.zapi_client)
 
     def check_for_setup_error(self):
         """Check that the driver is working and can communicate."""
@@ -158,16 +161,21 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
         data['vendor_name'] = 'NetApp'
         data['driver_version'] = self.VERSION
         data['storage_protocol'] = 'nfs'
-        data['pools'] = self._get_pool_stats()
+        data['pools'] = self._get_pool_stats(
+            filter_function=self.get_filter_function(),
+            goodness_function=self.get_goodness_function())
         data['sparse_copy_volume'] = True
 
         self._spawn_clean_cache_job()
         self.zapi_client.provide_ems(self, netapp_backend, self._app_version)
         self._stats = data
 
-    def _get_pool_stats(self):
+    def _get_pool_stats(self, filter_function=None, goodness_function=None):
         """Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
 
+        self.perf_library.update_performance_cache(
+            self.ssc_vols.get('all', []))
+
         pools = []
 
         for nfs_share in self._mounted_shares:
@@ -209,6 +217,12 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
                 pool['thick_provisioning_support'] = thick
                 pool['thin_provisioning_support'] = not thick
 
+                utilization = self.perf_library.get_node_utilization_for_pool(
+                    vol.id['name'])
+                pool['utilization'] = na_utils.round_down(utilization, '0.01')
+                pool['filter_function'] = filter_function
+                pool['goodness_function'] = goodness_function
+
             pools.append(pool)
 
         return pools
diff --git a/cinder/volume/drivers/netapp/dataontap/performance/__init__.py b/cinder/volume/drivers/netapp/dataontap/performance/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py
new file mode 100644 (file)
index 0000000..d1b2d28
--- /dev/null
@@ -0,0 +1,149 @@
+# Copyright (c) 2016 Clinton Knight
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Performance metrics functions and cache for NetApp 7-mode Data ONTAP systems.
+"""
+
+from oslo_log import log as logging
+
+from cinder.i18n import _LE
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.performance import perf_base
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Performance7modeLibrary(perf_base.PerformanceLibrary):
+
+    def __init__(self, zapi_client):
+        super(Performance7modeLibrary, self).__init__(zapi_client)
+
+        self.performance_counters = []
+        self.utilization = perf_base.DEFAULT_UTILIZATION
+        self.node_name = self.zapi_client.get_system_name()
+
+    def _init_counter_info(self):
+        """Set a few counter names based on Data ONTAP version."""
+
+        super(Performance7modeLibrary, self)._init_counter_info()
+
+        if self.zapi_client.features.SYSTEM_METRICS:
+            self.system_object_name = 'system'
+            try:
+                self.avg_processor_busy_base_counter_name = (
+                    self._get_base_counter_name('system',
+                                                'avg_processor_busy'))
+            except netapp_api.NaApiError:
+                self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
+                LOG.exception(_LE('Could not get performance base counter '
+                                  'name. Performance-based scheduler '
+                                  'functions may not be available.'))
+
+    def update_performance_cache(self):
+        """Called periodically to update node utilization metrics."""
+
+        # Nothing to do on older systems
+        if not self.zapi_client.features.SYSTEM_METRICS:
+            return
+
+        # Get new performance counters and save only the last 10
+        counters = self._get_node_utilization_counters()
+        if not counters:
+            return
+
+        self.performance_counters.append(counters)
+        self.performance_counters = self.performance_counters[-10:]
+
+        # Update utilization using newest & oldest sample
+        if len(self.performance_counters) < 2:
+            self.utilization = perf_base.DEFAULT_UTILIZATION
+        else:
+            self.utilization = self._get_node_utilization(
+                self.performance_counters[0], self.performance_counters[-1],
+                self.node_name)
+
+    def get_node_utilization(self):
+        """Get the node utilization, if available."""
+
+        return self.utilization
+
+    def _get_node_utilization_counters(self):
+        """Get all performance counters for calculating node utilization."""
+
+        try:
+            return (self._get_node_utilization_system_counters() +
+                    self._get_node_utilization_wafl_counters() +
+                    self._get_node_utilization_processor_counters())
+        except netapp_api.NaApiError:
+            LOG.exception(_LE('Could not get utilization counters from node '
+                              '%s'), self.node_name)
+            return None
+
+    def _get_node_utilization_system_counters(self):
+        """Get the system counters for calculating node utilization."""
+
+        system_instance_names = (
+            self.zapi_client.get_performance_instance_names(
+                self.system_object_name))
+
+        system_counter_names = [
+            'avg_processor_busy',
+            self.avg_processor_busy_base_counter_name,
+        ]
+        if 'cpu_elapsed_time1' in system_counter_names:
+            system_counter_names.append('cpu_elapsed_time')
+
+        system_counters = self.zapi_client.get_performance_counters(
+            self.system_object_name, system_instance_names,
+            system_counter_names)
+
+        return system_counters
+
+    def _get_node_utilization_wafl_counters(self):
+        """Get the WAFL counters for calculating node utilization."""
+
+        wafl_instance_names = self.zapi_client.get_performance_instance_names(
+            'wafl')
+
+        wafl_counter_names = ['total_cp_msecs', 'cp_phase_times']
+        wafl_counters = self.zapi_client.get_performance_counters(
+            'wafl', wafl_instance_names, wafl_counter_names)
+
+        # Expand array data so we can use wafl:cp_phase_times[P2_FLUSH]
+        for counter in wafl_counters:
+            if 'cp_phase_times' in counter:
+                self._expand_performance_array(
+                    'wafl', 'cp_phase_times', counter)
+
+        return wafl_counters
+
+    def _get_node_utilization_processor_counters(self):
+        """Get the processor counters for calculating node utilization."""
+
+        processor_instance_names = (
+            self.zapi_client.get_performance_instance_names('processor'))
+
+        processor_counter_names = ['domain_busy', 'processor_elapsed_time']
+        processor_counters = self.zapi_client.get_performance_counters(
+            'processor', processor_instance_names, processor_counter_names)
+
+        # Expand array data so we can use processor:domain_busy[kahuna]
+        for counter in processor_counters:
+            if 'domain_busy' in counter:
+                self._expand_performance_array(
+                    'processor', 'domain_busy', counter)
+
+        return processor_counters
diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py
new file mode 100644 (file)
index 0000000..9fa4f89
--- /dev/null
@@ -0,0 +1,226 @@
+# Copyright (c) 2016 Clinton Knight
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Performance metrics functions and cache for NetApp systems.
+"""
+
+from oslo_log import log as logging
+
+from cinder import exception
+from cinder.i18n import _, _LE
+
+
+LOG = logging.getLogger(__name__)
+DEFAULT_UTILIZATION = 50
+
+
+class PerformanceLibrary(object):
+
+    def __init__(self, zapi_client):
+
+        self.zapi_client = zapi_client
+        self._init_counter_info()
+
+    def _init_counter_info(self):
+        """Set a few counter names based on Data ONTAP version."""
+
+        self.system_object_name = None
+        self.avg_processor_busy_base_counter_name = None
+
+    def _get_node_utilization(self, counters_t1, counters_t2, node_name):
+        """Get node utilization from two sets of performance counters."""
+
+        try:
+            # Time spent in the single-threaded Kahuna domain
+            kahuna_percent = self._get_kahuna_utilization(counters_t1,
+                                                          counters_t2)
+
+            # If Kahuna is using >60% of the CPU, the controller is fully busy
+            if kahuna_percent > 60:
+                return 100.0
+
+            # Average CPU busyness across all processors
+            avg_cpu_percent = 100.0 * self._get_average_cpu_utilization(
+                counters_t1, counters_t2)
+
+            # Total Consistency Point (CP) time
+            total_cp_time_msec = self._get_total_consistency_point_time(
+                counters_t1, counters_t2)
+
+            # Time spent in CP Phase 2 (buffer flush)
+            p2_flush_time_msec = self._get_consistency_point_p2_flush_time(
+                counters_t1, counters_t2)
+
+            # Wall-clock time between the two counter sets
+            poll_time_msec = self._get_total_time(counters_t1,
+                                                  counters_t2,
+                                                  'total_cp_msecs')
+
+            # If two polls happened in quick succession, use CPU utilization
+            if total_cp_time_msec == 0 or poll_time_msec == 0:
+                return max(min(100.0, avg_cpu_percent), 0)
+
+            # Adjusted Consistency Point time
+            adjusted_cp_time_msec = self._get_adjusted_consistency_point_time(
+                total_cp_time_msec, p2_flush_time_msec)
+            adjusted_cp_percent = (100.0 *
+                                   adjusted_cp_time_msec / poll_time_msec)
+
+            # Utilization is the greater of CPU busyness & CP time
+            node_utilization = max(avg_cpu_percent, adjusted_cp_percent)
+            return max(min(100.0, node_utilization), 0)
+
+        except Exception:
+            LOG.exception(_LE('Could not calculate node utilization for '
+                              'node %s.'), node_name)
+            return DEFAULT_UTILIZATION
+
+    def _get_kahuna_utilization(self, counters_t1, counters_t2):
+        """Get time spent in the single-threaded Kahuna domain."""
+
+        # Note(cknight): Because Kahuna is single-threaded, running only on
+        # one CPU at a time, we can safely sum the Kahuna CPU usage
+        # percentages across all processors in a node.
+        return sum(self._get_performance_counter_average_multi_instance(
+            counters_t1, counters_t2, 'domain_busy:kahuna',
+            'processor_elapsed_time')) * 100.0
+
+    def _get_average_cpu_utilization(self, counters_t1, counters_t2):
+        """Get average CPU busyness across all processors."""
+
+        return self._get_performance_counter_average(
+            counters_t1, counters_t2, 'avg_processor_busy',
+            self.avg_processor_busy_base_counter_name)
+
+    def _get_total_consistency_point_time(self, counters_t1, counters_t2):
+        """Get time spent in Consistency Points in msecs."""
+
+        return float(self._get_performance_counter_delta(
+            counters_t1, counters_t2, 'total_cp_msecs'))
+
+    def _get_consistency_point_p2_flush_time(self, counters_t1, counters_t2):
+        """Get time spent in CP Phase 2 (buffer flush) in msecs."""
+
+        return float(self._get_performance_counter_delta(
+            counters_t1, counters_t2, 'cp_phase_times:p2_flush'))
+
+    def _get_total_time(self, counters_t1, counters_t2, counter_name):
+        """Get wall clock time between two successive counters in msecs."""
+
+        timestamp_t1 = float(self._find_performance_counter_timestamp(
+            counters_t1, counter_name))
+        timestamp_t2 = float(self._find_performance_counter_timestamp(
+            counters_t2, counter_name))
+        return (timestamp_t2 - timestamp_t1) * 1000.0
+
+    def _get_adjusted_consistency_point_time(self, total_cp_time,
+                                             p2_flush_time):
+        """Get adjusted CP time by limiting CP phase 2 flush time to 20%."""
+
+        return (total_cp_time *
+                (1.0 - (1.0 * p2_flush_time / total_cp_time) / 0.8))
+
+    def _get_performance_counter_delta(self, counters_t1, counters_t2,
+                                       counter_name):
+        """Calculate a delta value from two performance counters."""
+
+        counter_t1 = int(
+            self._find_performance_counter_value(counters_t1, counter_name))
+        counter_t2 = int(
+            self._find_performance_counter_value(counters_t2, counter_name))
+
+        return counter_t2 - counter_t1
+
+    def _get_performance_counter_average(self, counters_t1, counters_t2,
+                                         counter_name, base_counter_name,
+                                         instance_name=None):
+        """Calculate an average value from two performance counters."""
+
+        counter_t1 = float(self._find_performance_counter_value(
+            counters_t1, counter_name, instance_name))
+        counter_t2 = float(self._find_performance_counter_value(
+            counters_t2, counter_name, instance_name))
+        base_counter_t1 = float(self._find_performance_counter_value(
+            counters_t1, base_counter_name, instance_name))
+        base_counter_t2 = float(self._find_performance_counter_value(
+            counters_t2, base_counter_name, instance_name))
+
+        return (counter_t2 - counter_t1) / (base_counter_t2 - base_counter_t1)
+
+    def _get_performance_counter_average_multi_instance(self, counters_t1,
+                                                        counters_t2,
+                                                        counter_name,
+                                                        base_counter_name):
+        """Calculate an average value from multiple counter instances."""
+
+        averages = []
+        instance_names = []
+        for counter in counters_t1:
+            if counter_name in counter:
+                instance_names.append(counter['instance-name'])
+
+        for instance_name in instance_names:
+            average = self._get_performance_counter_average(
+                counters_t1, counters_t2, counter_name, base_counter_name,
+                instance_name)
+            averages.append(average)
+
+        return averages
+
+    def _find_performance_counter_value(self, counters, counter_name,
+                                        instance_name=None):
+        """Given a counter set, return the value of a named instance."""
+
+        for counter in counters:
+            if counter_name in counter:
+                if (instance_name is None
+                        or counter['instance-name'] == instance_name):
+                    return counter[counter_name]
+        else:
+            raise exception.NotFound(_('Counter %s not found') % counter_name)
+
+    def _find_performance_counter_timestamp(self, counters, counter_name,
+                                            instance_name=None):
+        """Given a counter set, return the timestamp of a named instance."""
+
+        for counter in counters:
+            if counter_name in counter:
+                if (instance_name is None
+                        or counter['instance-name'] == instance_name):
+                    return counter['timestamp']
+        else:
+            raise exception.NotFound(_('Counter %s not found') % counter_name)
+
+    def _expand_performance_array(self, object_name, counter_name, counter):
+        """Get array labels and expand counter data array."""
+
+        # Get array labels for counter value
+        counter_info = self.zapi_client.get_performance_counter_info(
+            object_name, counter_name)
+
+        array_labels = [counter_name + ':' + label.lower()
+                        for label in counter_info['labels']]
+        array_values = counter[counter_name].split(',')
+
+        # Combine labels and values, and then mix into existing counter
+        array_data = dict(zip(array_labels, array_values))
+        counter.update(array_data)
+
+    def _get_base_counter_name(self, object_name, counter_name):
+        """Get the name of the base counter for the specified counter."""
+
+        counter_info = self.zapi_client.get_performance_counter_info(
+            object_name, counter_name)
+        return counter_info['base-counter']
diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py
new file mode 100644 (file)
index 0000000..f01b7f1
--- /dev/null
@@ -0,0 +1,206 @@
+# Copyright (c) 2016 Clinton Knight
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Performance metrics functions and cache for NetApp cDOT systems.
+"""
+
+from oslo_log import log as logging
+
+from cinder.i18n import _LE
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.performance import perf_base
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PerformanceCmodeLibrary(perf_base.PerformanceLibrary):
+
+    def __init__(self, zapi_client):
+        super(PerformanceCmodeLibrary, self).__init__(zapi_client)
+
+        self.performance_counters = {}
+        self.pool_utilization = {}
+
+    def _init_counter_info(self):
+        """Set a few counter names based on Data ONTAP version."""
+
+        super(PerformanceCmodeLibrary, self)._init_counter_info()
+
+        try:
+            if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
+                self.system_object_name = 'system:constituent'
+                self.avg_processor_busy_base_counter_name = (
+                    self._get_base_counter_name('system:constituent',
+                                                'avg_processor_busy'))
+            elif self.zapi_client.features.SYSTEM_METRICS:
+                self.system_object_name = 'system'
+                self.avg_processor_busy_base_counter_name = (
+                    self._get_base_counter_name('system',
+                                                'avg_processor_busy'))
+        except netapp_api.NaApiError:
+            if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
+                self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
+            else:
+                self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
+            LOG.exception(_LE('Could not get performance base counter '
+                              'name. Performance-based scheduler '
+                              'functions may not be available.'))
+
+    def update_performance_cache(self, ssc_pools):
+        """Called periodically to update per-pool node utilization metrics."""
+
+        # Nothing to do on older systems
+        if not (self.zapi_client.features.SYSTEM_METRICS or
+                self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS):
+            return
+
+        # Get aggregates and nodes for all known pools
+        aggr_names = self._get_aggregates_for_pools(ssc_pools)
+        node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names)
+
+        # Update performance counter cache for each node
+        node_utilization = {}
+        for node_name in node_names:
+            if node_name not in self.performance_counters:
+                self.performance_counters[node_name] = []
+
+            # Get new performance counters and save only the last 10
+            counters = self._get_node_utilization_counters(node_name)
+            if not counters:
+                continue
+
+            self.performance_counters[node_name].append(counters)
+            self.performance_counters[node_name] = (
+                self.performance_counters[node_name][-10:])
+
+            # Update utilization for each node using newest & oldest sample
+            counters = self.performance_counters[node_name]
+            if len(counters) < 2:
+                node_utilization[node_name] = perf_base.DEFAULT_UTILIZATION
+            else:
+                node_utilization[node_name] = self._get_node_utilization(
+                    counters[0], counters[-1], node_name)
+
+        # Update pool utilization map atomically
+        pool_utilization = {}
+        for pool in ssc_pools:
+            pool_name = pool.id['name']
+            aggr_name = pool.aggr['name']
+            node_name = aggr_node_map.get(aggr_name)
+            if node_name:
+                pool_utilization[pool_name] = node_utilization.get(
+                    node_name, perf_base.DEFAULT_UTILIZATION)
+            else:
+                pool_utilization[pool_name] = perf_base.DEFAULT_UTILIZATION
+
+        self.pool_utilization = pool_utilization
+
+    def get_node_utilization_for_pool(self, pool_name):
+        """Get the node utilization for the specified pool, if available."""
+
+        return self.pool_utilization.get(pool_name,
+                                         perf_base.DEFAULT_UTILIZATION)
+
+    def _get_aggregates_for_pools(self, ssc_pools):
+        """Get the set of aggregates that contain the specified pools."""
+
+        aggr_names = set()
+        for pool in ssc_pools:
+            aggr_names.add(pool.aggr['name'])
+        return aggr_names
+
+    def _get_nodes_for_aggregates(self, aggr_names):
+        """Get the cluster nodes that own the specified aggregates."""
+
+        node_names = set()
+        aggr_node_map = {}
+
+        for aggr_name in aggr_names:
+            node_name = self.zapi_client.get_node_for_aggregate(aggr_name)
+            if node_name:
+                node_names.add(node_name)
+                aggr_node_map[aggr_name] = node_name
+
+        return node_names, aggr_node_map
+
+    def _get_node_utilization_counters(self, node_name):
+        """Get all performance counters for calculating node utilization."""
+
+        try:
+            return (self._get_node_utilization_system_counters(node_name) +
+                    self._get_node_utilization_wafl_counters(node_name) +
+                    self._get_node_utilization_processor_counters(node_name))
+        except netapp_api.NaApiError:
+            LOG.exception(_LE('Could not get utilization counters from node '
+                              '%s'), node_name)
+            return None
+
+    def _get_node_utilization_system_counters(self, node_name):
+        """Get the system counters for calculating node utilization."""
+
+        system_instance_uuids = (
+            self.zapi_client.get_performance_instance_uuids(
+                self.system_object_name, node_name))
+
+        system_counter_names = [
+            'avg_processor_busy',
+            self.avg_processor_busy_base_counter_name,
+        ]
+        if 'cpu_elapsed_time1' in system_counter_names:
+            system_counter_names.append('cpu_elapsed_time')
+
+        system_counters = self.zapi_client.get_performance_counters(
+            self.system_object_name, system_instance_uuids,
+            system_counter_names)
+
+        return system_counters
+
+    def _get_node_utilization_wafl_counters(self, node_name):
+        """Get the WAFL counters for calculating node utilization."""
+
+        wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids(
+            'wafl', node_name)
+
+        wafl_counter_names = ['total_cp_msecs', 'cp_phase_times']
+        wafl_counters = self.zapi_client.get_performance_counters(
+            'wafl', wafl_instance_uuids, wafl_counter_names)
+
+        # Expand array data so we can use wafl:cp_phase_times[P2_FLUSH]
+        for counter in wafl_counters:
+            if 'cp_phase_times' in counter:
+                self._expand_performance_array(
+                    'wafl', 'cp_phase_times', counter)
+
+        return wafl_counters
+
+    def _get_node_utilization_processor_counters(self, node_name):
+        """Get the processor counters for calculating node utilization."""
+
+        processor_instance_uuids = (
+            self.zapi_client.get_performance_instance_uuids('processor',
+                                                            node_name))
+
+        processor_counter_names = ['domain_busy', 'processor_elapsed_time']
+        processor_counters = self.zapi_client.get_performance_counters(
+            'processor', processor_instance_uuids, processor_counter_names)
+
+        # Expand array data so we can use processor:domain_busy[kahuna]
+        for counter in processor_counters:
+            if 'domain_busy' in counter:
+                self._expand_performance_array(
+                    'processor', 'domain_busy', counter)
+
+        return processor_counters