REPL_SETTINGS_PROPAGATE_MAX_RETRIES = 36 # 36 * 5 = 180 seconds
-def log_debug_trace(f):
+def pure_driver_debug_trace(f):
+ """Log the method entrance and exit including active backend name.
+
+ This should only be used on VolumeDriver class methods. It depends on
+ having a 'self' argument that is a PureBaseVolumeDriver.
+ """
def wrapper(*args, **kwargs):
- cls_name = args[0].__class__.__name__
+ driver = args[0] # self
+ cls_name = driver.__class__.__name__
method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name,
"method": f.__name__}
- LOG.debug("Enter " + method_name)
+ backend_name = driver._get_current_array()._backend_id
+ LOG.debug("[%(backend_name)s] Enter %(method_name)s" %
+ {"method_name": method_name, "backend_name": backend_name})
result = f(*args, **kwargs)
- LOG.debug("Leave " + method_name)
+ LOG.debug("[%(backend_name)s] Leave %(method_name)s" %
+ {"method_name": method_name, "backend_name": backend_name})
return result
return wrapper
# for san_password or san_private_key, not relevant to our driver.
pass
- @log_debug_trace
+ @pure_driver_debug_trace
def create_volume(self, volume):
"""Creates a volume."""
vol_name = self._get_vol_name(volume)
self._enable_replication_if_needed(current_array, volume)
- @log_debug_trace
+ @pure_driver_debug_trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
vol_name = self._get_vol_name(volume)
LOG.warning(_LW("Adding Volume to Protection Group "
"failed with message: %s"), err.text)
- @log_debug_trace
+ @pure_driver_debug_trace
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_name = self._get_vol_name(volume)
vol_size = vol_size * units.Gi
array.extend_volume(vol_name, vol_size)
- @log_debug_trace
+ @pure_driver_debug_trace
def delete_volume(self, volume):
"""Disconnect all hosts and delete the volume"""
vol_name = self._get_vol_name(volume)
LOG.warning(_LW("Volume deletion failed with message: %s"),
err.text)
- @log_debug_trace
+ @pure_driver_debug_trace
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
vol_name, snap_suff = self._get_snap_name(snapshot).split(".")
current_array.create_snapshot(vol_name, suffix=snap_suff)
- @log_debug_trace
+ @pure_driver_debug_trace
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
return result
- @log_debug_trace
+ @pure_driver_debug_trace
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate connection."""
# Get current array in case we have failed over via replication.
current_array = self._get_current_array()
self._disconnect(current_array, volume, connector, **kwargs)
- @log_debug_trace
+ @pure_driver_debug_trace
def _disconnect_host(self, array, host_name, vol_name):
"""Return value indicates if host was deleted on array or not"""
try:
return False
- @log_debug_trace
+ @pure_driver_debug_trace
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
return thin_provisioning
- @log_debug_trace
+ @pure_driver_debug_trace
def extend_volume(self, volume, new_size):
"""Extend volume to new_size."""
current_array = self._get_current_array()
current_array.set_pgroup(pgroup_name, addvollist=[vol_name])
- @log_debug_trace
+ @pure_driver_debug_trace
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
finally:
self._delete_pgsnapshot(tmp_pgsnap_name)
- @log_debug_trace
+ @pure_driver_debug_trace
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
model_update = {'status': 'available'}
return model_update, return_volumes
- @log_debug_trace
+ @pure_driver_debug_trace
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return model_update, volume_updates
- @log_debug_trace
+ @pure_driver_debug_trace
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return None, None, None
- @log_debug_trace
+ @pure_driver_debug_trace
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
LOG.warning(_LW("Unable to delete Protection Group "
"Snapshot: %s"), err.text)
- @log_debug_trace
+ @pure_driver_debug_trace
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
existing_ref=existing_ref,
reason=_("Unable to find Purity ref with name=%s") % ref_vol_name)
- @log_debug_trace
+ @pure_driver_debug_trace
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
raise_not_exist=True)
return None
- @log_debug_trace
+ @pure_driver_debug_trace
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
{"old_name": old_name, "error": err.text})
return new_name
- @log_debug_trace
+ @pure_driver_debug_trace
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
return True, None
- @log_debug_trace
+ @pure_driver_debug_trace
def _disable_replication(self, volume):
"""Disable replication on the given volume."""
LOG.error(_LE("Disable replication on volume failed with "
"message: %s"), err.text)
- @log_debug_trace
+ @pure_driver_debug_trace
def failover_host(self, context, volumes, secondary_id=None):
"""Failover backend to a secondary array
return False
# Any unexpected exception to be handled by caller.
- @log_debug_trace
+ @pure_driver_debug_trace
@utils.retry(exception.PureDriverException,
REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL,
REPL_SETTINGS_PROPAGATE_MAX_RETRIES)
_('Protection Group not '
'ready.'))
- @log_debug_trace
+ @pure_driver_debug_trace
@utils.retry(exception.PureDriverException,
REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL,
REPL_SETTINGS_PROPAGATE_MAX_RETRIES)
def _get_pgroup_name_on_target(self, source_array_name, pgroup_name):
return "%s:%s" % (source_array_name, pgroup_name)
- @log_debug_trace
+ @pure_driver_debug_trace
def _setup_replicated_pgroups(self, primary, secondaries, pg_name,
replication_interval, retention_policy):
self._create_protection_group_if_not_exist(
# Start replication on the PG.
primary.set_pgroup(pg_name, replicate_enabled=True)
- @log_debug_trace
+ @pure_driver_debug_trace
def _generate_replication_retention(self):
"""Generates replication retention settings in Purity compatible format
)
return replication_retention
- @log_debug_trace
+ @pure_driver_debug_trace
def _get_latest_replicated_pg_snap(self,
target_array,
source_array_name,
return pg_snap
- @log_debug_trace
+ @pure_driver_debug_trace
def _create_protection_group_if_not_exist(self, source_array, pgname):
try:
source_array.create_pgroup(pgname)
return host
return None
- @log_debug_trace
+ @pure_driver_debug_trace
def initialize_connection(self, volume, connector, initiator_data=None):
"""Allow connection to connector and return connection info."""
connection = self._connect(volume, connector, initiator_data)
return [port["wwn"] for port in ports if port["wwn"]]
@fczm_utils.AddFCZone
- @log_debug_trace
+ @pure_driver_debug_trace
def initialize_connection(self, volume, connector, initiator_data=None):
"""Allow connection to connector and return connection info."""
current_array = self._get_current_array()
return init_targ_map
@fczm_utils.RemoveFCZone
- @log_debug_trace
+ @pure_driver_debug_trace
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate connection."""
current_array = self._get_current_array()