def check_netapp_driver(location):
"""Checks if the driver requested is a netapp driver."""
if location.find(".netapp.") == -1:
- raise exception.InvalidInput(
- reason=_("Only loading netapp drivers supported."))
+ raise exception.InvalidInput(
+ reason=_("Only loading netapp drivers supported."))
class Deprecated(driver.VolumeDriver):
" verify: %(v)s, kwargs: %(k)s." % (params))
url = self._get_resource_url(path, use_system, **kwargs)
if self._content_type == 'json':
- headers = {'Accept': 'application/json',
- 'Content-Type': 'application/json'}
- data = json.dumps(data) if data else None
- res = self.invoke_service(method, url, data=data,
- headers=headers,
- timeout=timeout, verify=verify)
- return res.json() if res.text else None
+ headers = {'Accept': 'application/json',
+ 'Content-Type': 'application/json'}
+ data = json.dumps(data) if data else None
+ res = self.invoke_service(method, url, data=data,
+ headers=headers,
+ timeout=timeout, verify=verify)
+ return res.json() if res.text else None
else:
raise exception.NetAppDriverException(
_("Content type not supported."))
if avl_vol['name'] in self.volume_list:
return avl_vol
elif self._get_vol_option(avl_vol['name'], 'root') != 'true':
- return avl_vol
+ return avl_vol
return None
def _get_igroup_by_initiator(self, initiator):
def _spawn_clean_cache_job(self):
"""Spawns a clean task if not running."""
if getattr(self, 'cleaning', None):
- LOG.debug('Image cache cleaning in progress. Returning... ')
- return
+ LOG.debug('Image cache cleaning in progress. Returning... ')
+ return
else:
- #set cleaning to True
- self.cleaning = True
- t = Timer(0, self._clean_image_cache)
- t.start()
+ #set cleaning to True
+ self.cleaning = True
+ t = Timer(0, self._clean_image_cache)
+ t.start()
def _clean_image_cache(self):
"""Clean the image cache files in cache of space crunch."""
return True
return False
if _do_delete():
- bytes_to_free = bytes_to_free - int(f[1])
- if bytes_to_free <= 0:
- return
+ bytes_to_free = bytes_to_free - int(f[1])
+ if bytes_to_free <= 0:
+ return
def _delete_file(self, path):
"""Delete file from disk and return result as boolean."""
@utils.synchronized(lock_pr)
def refresh_stale_ssc():
- stale_vols = backend._update_stale_vols(reset=True)
- LOG.info(_('Running stale ssc refresh job for %(server)s'
- ' and vserver %(vs)s')
- % {'server': na_server, 'vs': vserver})
- # refreshing single volumes can create inconsistency
- # hence doing manipulations on copy
- ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
- refresh_vols = set()
- expired_vols = set()
- for vol in stale_vols:
- name = vol.id['name']
- res = get_cluster_vols_with_ssc(na_server, vserver, name)
- if res:
- refresh_vols.add(res.pop())
- else:
- expired_vols.add(vol)
- for vol in refresh_vols:
- for k in ssc_vols_copy:
- vol_set = ssc_vols_copy[k]
- vol_set.discard(vol)
- if k == "mirrored" and vol.mirror.get('mirrored'):
- vol_set.add(vol)
- if k == "dedup" and vol.sis.get('dedup'):
- vol_set.add(vol)
- if k == "compression" and vol.sis.get('compression'):
- vol_set.add(vol)
- if k == "thin" and vol.space.get('thin_provisioned'):
- vol_set.add(vol)
- if k == "all":
- vol_set.add(vol)
- for vol in expired_vols:
- for k in ssc_vols_copy:
- vol_set = ssc_vols_copy[k]
- vol_set.discard(vol)
- backend.refresh_ssc_vols(ssc_vols_copy)
- LOG.info(_('Successfully completed stale refresh job for'
- ' %(server)s and vserver %(vs)s')
- % {'server': na_server, 'vs': vserver})
+ stale_vols = backend._update_stale_vols(reset=True)
+ LOG.info(_('Running stale ssc refresh job for %(server)s'
+ ' and vserver %(vs)s')
+ % {'server': na_server, 'vs': vserver})
+ # refreshing single volumes can create inconsistency
+ # hence doing manipulations on copy
+ ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
+ refresh_vols = set()
+ expired_vols = set()
+ for vol in stale_vols:
+ name = vol.id['name']
+ res = get_cluster_vols_with_ssc(na_server, vserver, name)
+ if res:
+ refresh_vols.add(res.pop())
+ else:
+ expired_vols.add(vol)
+ for vol in refresh_vols:
+ for k in ssc_vols_copy:
+ vol_set = ssc_vols_copy[k]
+ vol_set.discard(vol)
+ if k == "mirrored" and vol.mirror.get('mirrored'):
+ vol_set.add(vol)
+ if k == "dedup" and vol.sis.get('dedup'):
+ vol_set.add(vol)
+ if k == "compression" and vol.sis.get('compression'):
+ vol_set.add(vol)
+ if k == "thin" and vol.space.get('thin_provisioned'):
+ vol_set.add(vol)
+ if k == "all":
+ vol_set.add(vol)
+ for vol in expired_vols:
+ for k in ssc_vols_copy:
+ vol_set = ssc_vols_copy[k]
+ vol_set.discard(vol)
+ backend.refresh_ssc_vols(ssc_vols_copy)
+ LOG.info(_('Successfully completed stale refresh job for'
+ ' %(server)s and vserver %(vs)s')
+ % {'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
raise exception.InvalidInput(reason=_("Backend server not NaServer."))
delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
if getattr(backend, 'ssc_job_running', None):
- LOG.warn(_('ssc job in progress. Returning... '))
- return
+ LOG.warn(_('ssc job in progress. Returning... '))
+ return
elif (getattr(backend, 'ssc_run_time', None) is None or
(backend.ssc_run_time and
timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
- LOG.warn(_('refresh stale ssc job in progress. Returning... '))
- return
+ LOG.warn(_('refresh stale ssc job in progress. Returning... '))
+ return
else:
if backend.stale_vols:
if synchronous:
def create_api_request(api_name, query=None, des_result=None,
additional_elems=None, is_iter=False,
record_step=50, tag=None):
- """Creates a NetApp api request.
-
- :param api_name: api name string
- :param query: api query as dict
- :param des_result: desired result as dict
- :param additional_elems: dict other than query and des_result
- :param is_iter: is iterator api
- :param record_step: records at a time for iter api
- :param tag: next tag for iter api
- """
- api_el = NaElement(api_name)
- if query:
- query_el = NaElement('query')
- query_el.translate_struct(query)
- api_el.add_child_elem(query_el)
- if des_result:
- res_el = NaElement('desired-attributes')
- res_el.translate_struct(des_result)
- api_el.add_child_elem(res_el)
- if additional_elems:
- api_el.translate_struct(additional_elems)
- if is_iter:
- api_el.add_new_child('max-records', str(record_step))
- if tag:
- api_el.add_new_child('tag', tag, True)
- return api_el
+ """Creates a NetApp api request.
+
+ :param api_name: api name string
+ :param query: api query as dict
+ :param des_result: desired result as dict
+ :param additional_elems: dict other than query and des_result
+ :param is_iter: is iterator api
+ :param record_step: records at a time for iter api
+ :param tag: next tag for iter api
+ """
+ api_el = NaElement(api_name)
+ if query:
+ query_el = NaElement('query')
+ query_el.translate_struct(query)
+ api_el.add_child_elem(query_el)
+ if des_result:
+ res_el = NaElement('desired-attributes')
+ res_el.translate_struct(des_result)
+ api_el.add_child_elem(res_el)
+ if additional_elems:
+ api_el.translate_struct(additional_elems)
+ if is_iter:
+ api_el.add_new_child('max-records', str(record_step))
+ if tag:
+ api_el.add_new_child('tag', tag, True)
+ return api_el
def to_bool(val):
if (strg == 'true' or strg == 'y'
or strg == 'yes' or strg == 'enabled'
or strg == '1'):
- return True
+ return True
else:
return False
else:
if (not _convert_to_base and
isinstance(ex, hpexceptions.HTTPForbidden) and
ex.get_code() == 150):
- # Error code 150 means 'invalid operation: Cannot grow
- # this type of volume'.
- # Suppress raising this exception because we can
- # resolve it by converting it into a base volume.
- # Afterwards, extending the volume should succeed, or
- # fail with a different exception/error code.
- ex_ctxt.reraise = False
- self._extend_volume(volume, volume_name,
- growth_size_mib,
- _convert_to_base=True)
+ # Error code 150 means 'invalid operation: Cannot grow
+ # this type of volume'.
+ # Suppress raising this exception because we can
+ # resolve it by converting it into a base volume.
+ # Afterwards, extending the volume should succeed, or
+ # fail with a different exception/error code.
+ ex_ctxt.reraise = False
+ self._extend_volume(volume, volume_name,
+ growth_size_mib,
+ _convert_to_base=True)
else:
LOG.error(_("Error extending volume: %(vol)s. "
"Exception: %(ex)s") %