]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
NetApp fix free space as zero during 1st vol stats update
authorNavneet Singh <singn@netapp.com>
Sun, 17 Nov 2013 21:08:07 +0000 (02:38 +0530)
committerNavneet Singh <singn@netapp.com>
Sun, 17 Nov 2013 23:18:39 +0000 (04:48 +0530)
NetApp clustered ontap drivers report space as zero
till first 60 seconds of driver start. This is causing
discomfort for some performance sensitive deployements.
This was due to async nature of the NetApp stats collection job.
Job is changed to sync at driver start to improve customer
experience.

Change-Id: I7d5cbf590897a0d328ece3a60516c92c0ad0ee7f
Closes-bug: #1253660

cinder/tests/test_netapp.py
cinder/volume/drivers/netapp/iscsi.py
cinder/volume/drivers/netapp/nfs.py
cinder/volume/drivers/netapp/ssc_utils.py

index fe3bb55127d8476e53d8ee864a0986512f32e49e..8244553fa4dde901d2423a64a28d27e30cd8139c 100644 (file)
@@ -545,7 +545,8 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
 
     def _custom_setup(self):
         self.stubs.Set(
-            ssc_utils, 'refresh_cluster_ssc', lambda a, b, c: None)
+            ssc_utils, 'refresh_cluster_ssc',
+            lambda a, b, c, synchronous: None)
         configuration = self._set_config(create_configuration())
         driver = common.NetAppDriver(configuration=configuration)
         self.stubs.Set(httplib, 'HTTPConnection',
index e5440465bb1f1466a26ca6e81d064080b6471e53..be5135503ed2353f558203948ecf546aa575f4f7 100644 (file)
@@ -781,7 +781,6 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
         self.client.set_api_version(major, minor)
         self.ssc_vols = None
         self.stale_vols = set()
-        ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
 
     def _create_lun_on_eligible_vol(self, name, size, metadata,
                                     extra_specs=None):
@@ -1057,6 +1056,9 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
 
     def _update_cluster_vol_stats(self, data):
         """Updates vol stats with cluster config."""
+        sync = True if self.ssc_vols is None else False
+        ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver,
+                                      synchronous=sync)
         if self.ssc_vols:
             data['netapp_mirrored'] = 'true'\
                 if self.ssc_vols['mirrored'] else 'false'
@@ -1090,7 +1092,6 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
                 data['free_capacity_gb'] = 0
         else:
             LOG.warn(_("Cluster ssc is not updated. No volume stats found."))
-        ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
 
     @utils.synchronized('update_stale')
     def _update_stale_vols(self, volume=None, reset=False):
index 602a1dcc20dc0ee68ee4d4dec3d1d88baddfff76..6f826979e53cac342050daca10d28ad35c9722ca 100644 (file)
@@ -714,7 +714,6 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
             self.ssc_enabled = True
             LOG.info(_("Shares on vserver %s will only"
                        " be used for provisioning.") % (self.vserver))
-            ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
         else:
             self.ssc_enabled = False
             LOG.warn(_("No vserver set in config. SSC will be disabled."))
@@ -881,6 +880,12 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
 
     def _update_cluster_vol_stats(self, data):
         """Updates vol stats with cluster config."""
+        if self.ssc_enabled:
+            sync = True if self.ssc_vols is None else False
+            ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver,
+                                          synchronous=sync)
+        else:
+            LOG.warn(_("No vserver set in config. SSC will be disabled."))
         if self.ssc_vols:
             data['netapp_mirrored'] = 'true'\
                 if self.ssc_vols['mirrored'] else 'false'
@@ -914,10 +919,6 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
         elif self.ssc_enabled:
             LOG.warn(_("No cluster ssc stats found."
                        " Wait for next volume stats update."))
-        if self.ssc_enabled:
-            ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
-        else:
-            LOG.warn(_("No vserver set in config. SSC will be disabled."))
 
     @utils.synchronized('update_stale')
     def _update_stale_vols(self, volume=None, reset=False):
index d03d61efe4a805ae1dc1244014d6e482ca9f6585..1343d4f4f713b51253848b2c89e0e683f9225626 100644 (file)
@@ -434,6 +434,9 @@ def refresh_cluster_stale_ssc(*args, **kwargs):
                         vol_set = ssc_vols_copy[k]
                         vol_set.discard(vol)
                 backend.refresh_ssc_vols(ssc_vols_copy)
+                LOG.info(_('Successfully completed stale refresh job for'
+                           ' %(server)s and vserver %(vs)s')
+                         % {'server': na_server, 'vs': vserver})
 
         refresh_stale_ssc()
     finally:
@@ -464,13 +467,16 @@ def get_cluster_latest_ssc(*args, **kwargs):
             ssc_vols = get_cluster_ssc(na_server, vserver)
             backend.refresh_ssc_vols(ssc_vols)
             backend.ssc_run_time = timeutils.utcnow()
+            LOG.info(_('Successfully completed ssc job for %(server)s'
+                       ' and vserver %(vs)s')
+                     % {'server': na_server, 'vs': vserver})
 
         get_latest_ssc()
     finally:
         na_utils.set_safe_attr(backend, 'ssc_job_running', False)
 
 
-def refresh_cluster_ssc(backend, na_server, vserver):
+def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
     """Refresh cluster ssc for backend."""
     if not isinstance(backend, driver.VolumeDriver):
         raise exception.InvalidInput(reason=_("Backend not a VolumeDriver."))
@@ -483,17 +489,23 @@ def refresh_cluster_ssc(backend, na_server, vserver):
     elif (getattr(backend, 'ssc_run_time', None) is None or
           (backend.ssc_run_time and
            timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
-        t = Timer(0, get_cluster_latest_ssc,
-                  args=[backend, na_server, vserver])
-        t.start()
+        if synchronous:
+            get_cluster_latest_ssc(backend, na_server, vserver)
+        else:
+            t = Timer(0, get_cluster_latest_ssc,
+                      args=[backend, na_server, vserver])
+            t.start()
     elif getattr(backend, 'refresh_stale_running', None):
             LOG.warn(_('refresh stale ssc job in progress. Returning... '))
             return
     else:
         if backend.stale_vols:
-            t = Timer(0, refresh_cluster_stale_ssc,
-                      args=[backend, na_server, vserver])
-            t.start()
+            if synchronous:
+                refresh_cluster_stale_ssc(backend, na_server, vserver)
+            else:
+                t = Timer(0, refresh_cluster_stale_ssc,
+                          args=[backend, na_server, vserver])
+                t.start()
 
 
 def get_volumes_for_specs(ssc_vols, specs):