def _custom_setup(self):
self.stubs.Set(
- ssc_utils, 'refresh_cluster_ssc', lambda a, b, c: None)
+ ssc_utils, 'refresh_cluster_ssc',
+ lambda a, b, c, synchronous: None)
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(httplib, 'HTTPConnection',
self.client.set_api_version(major, minor)
self.ssc_vols = None
self.stale_vols = set()
- ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
def _create_lun_on_eligible_vol(self, name, size, metadata,
extra_specs=None):
def _update_cluster_vol_stats(self, data):
"""Updates vol stats with cluster config."""
+ sync = True if self.ssc_vols is None else False
+ ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver,
+ synchronous=sync)
if self.ssc_vols:
data['netapp_mirrored'] = 'true'\
if self.ssc_vols['mirrored'] else 'false'
data['free_capacity_gb'] = 0
else:
LOG.warn(_("Cluster ssc is not updated. No volume stats found."))
- ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
self.ssc_enabled = True
LOG.info(_("Shares on vserver %s will only"
" be used for provisioning.") % (self.vserver))
- ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
else:
self.ssc_enabled = False
LOG.warn(_("No vserver set in config. SSC will be disabled."))
def _update_cluster_vol_stats(self, data):
"""Updates vol stats with cluster config."""
+ if self.ssc_enabled:
+ sync = True if self.ssc_vols is None else False
+ ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver,
+ synchronous=sync)
+ else:
+ LOG.warn(_("No vserver set in config. SSC will be disabled."))
if self.ssc_vols:
data['netapp_mirrored'] = 'true'\
if self.ssc_vols['mirrored'] else 'false'
elif self.ssc_enabled:
LOG.warn(_("No cluster ssc stats found."
" Wait for next volume stats update."))
- if self.ssc_enabled:
- ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
- else:
- LOG.warn(_("No vserver set in config. SSC will be disabled."))
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
+ LOG.info(_('Successfully completed stale refresh job for'
+ ' %(server)s and vserver %(vs)s')
+ % {'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
+ LOG.info(_('Successfully completed ssc job for %(server)s'
+ ' and vserver %(vs)s')
+ % {'server': na_server, 'vs': vserver})
get_latest_ssc()
finally:
na_utils.set_safe_attr(backend, 'ssc_job_running', False)
-def refresh_cluster_ssc(backend, na_server, vserver):
+def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
"""Refresh cluster ssc for backend."""
if not isinstance(backend, driver.VolumeDriver):
raise exception.InvalidInput(reason=_("Backend not a VolumeDriver."))
elif (getattr(backend, 'ssc_run_time', None) is None or
(backend.ssc_run_time and
timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
- t = Timer(0, get_cluster_latest_ssc,
- args=[backend, na_server, vserver])
- t.start()
+ if synchronous:
+ get_cluster_latest_ssc(backend, na_server, vserver)
+ else:
+ t = Timer(0, get_cluster_latest_ssc,
+ args=[backend, na_server, vserver])
+ t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warn(_('refresh stale ssc job in progress. Returning... '))
return
else:
if backend.stale_vols:
- t = Timer(0, refresh_cluster_stale_ssc,
- args=[backend, na_server, vserver])
- t.start()
+ if synchronous:
+ refresh_cluster_stale_ssc(backend, na_server, vserver)
+ else:
+ t = Timer(0, refresh_cluster_stale_ssc,
+ args=[backend, na_server, vserver])
+ t.start()
def get_volumes_for_specs(ssc_vols, specs):