From 6afbf2c1c56faf07908e0ba879d3a49592fdd7fe Mon Sep 17 00:00:00 2001 From: Zach Carmichael Date: Thu, 17 Oct 2024 10:55:33 -0700 Subject: [PATCH] Add stacklevel to across captum to satisfy flake8 Summary: Add stacklevel to silence flake8: Warning (FLAKE8) B028 No explicit stacklevel argument found. The warn method from the warnings stacklevel=1 default, stacklevel=2 in some spots that user might need more context Differential Revision: D64518463 --- captum/_utils/av.py | 3 ++- captum/_utils/progress.py | 3 ++- .../attr/_core/guided_backprop_deconvnet.py | 3 ++- captum/attr/_core/guided_grad_cam.py | 3 ++- captum/attr/_core/lime.py | 10 +++++++--- .../attr/_core/neuron/neuron_conductance.py | 3 ++- captum/attr/_core/shapley_value.py | 6 ++++-- captum/attr/_models/base.py | 3 ++- captum/attr/_utils/batching.py | 9 ++++++--- .../influence/_core/similarity_influence.py | 6 +++++- captum/influence/_core/tracincp.py | 6 ++++-- .../_core/tracincp_fast_rand_proj.py | 9 ++++++--- captum/influence/_utils/common.py | 20 +++++++++++++------ captum/insights/example.py | 3 ++- captum/metrics/_utils/batching.py | 3 ++- .../robust/_core/metrics/attack_comparator.py | 3 ++- 16 files changed, 64 insertions(+), 29 deletions(-) diff --git a/captum/_utils/av.py b/captum/_utils/av.py index 376bac1f4..2ab4ae268 100644 --- a/captum/_utils/av.py +++ b/captum/_utils/av.py @@ -330,7 +330,8 @@ def _manage_loading_layers( "Overwriting activations: load_from_disk is set to False. Removing all " f"activations matching specified parameters {{path: {path}, " f"model_id: {model_id}, layers: {layers}, identifier: {identifier}}} " - "before generating new activations." + "before generating new activations.", + stacklevel=1, ) for layer in layers: files = glob.glob( diff --git a/captum/_utils/progress.py b/captum/_utils/progress.py index 0e2a42d3a..47e391735 100644 --- a/captum/_utils/progress.py +++ b/captum/_utils/progress.py @@ -214,7 +214,8 @@ def progress( warnings.warn( "Tried to show progress with tqdm " "but tqdm is not installed. " - "Fall back to simply print out the progress." + "Fall back to simply print out the progress.", + stacklevel=1, ) return SimpleProgress( iterable, desc=desc, total=total, file=file, mininterval=mininterval diff --git a/captum/attr/_core/guided_backprop_deconvnet.py b/captum/attr/_core/guided_backprop_deconvnet.py index 071aea398..60359bc0d 100644 --- a/captum/attr/_core/guided_backprop_deconvnet.py +++ b/captum/attr/_core/guided_backprop_deconvnet.py @@ -72,7 +72,8 @@ def attribute( # set hooks for overriding ReLU gradients warnings.warn( "Setting backward hooks on ReLU activations." - "The hooks will be removed after the attribution is finished" + "The hooks will be removed after the attribution is finished", + stacklevel=1, ) try: self.model.apply(self._register_hooks) diff --git a/captum/attr/_core/guided_grad_cam.py b/captum/attr/_core/guided_grad_cam.py index 5a7424437..bb9beb6a0 100644 --- a/captum/attr/_core/guided_grad_cam.py +++ b/captum/attr/_core/guided_grad_cam.py @@ -225,7 +225,8 @@ def attribute( warnings.warn( "Couldn't appropriately interpolate GradCAM attributions for some " "input tensors, returning empty tensor for corresponding " - "attributions." + "attributions.", + stacklevel=1, ) output_attr.append(torch.empty(0)) diff --git a/captum/attr/_core/lime.py b/captum/attr/_core/lime.py index cf432007d..21bae8677 100644 --- a/captum/attr/_core/lime.py +++ b/captum/attr/_core/lime.py @@ -715,7 +715,8 @@ def construct_feature_mask( if min_interp_features != 0: warnings.warn( "Minimum element in feature mask is not 0, shifting indices to" - " start at 0." + " start at 0.", + stacklevel=2, ) feature_mask = tuple( single_mask - min_interp_features for single_mask in feature_mask @@ -1157,7 +1158,8 @@ def _attribute_kwargs( # type: ignore "Attempting to construct interpretable model with > 10000 features." "This can be very slow or lead to OOM issues. Please provide a feature" "mask which groups input features to reduce the number of interpretable" - "features. " + "features. ", + stacklevel=1, ) coefs: Tensor @@ -1171,7 +1173,9 @@ def _attribute_kwargs( # type: ignore "You are providing multiple inputs for Lime / Kernel SHAP " "attributions. This trains a separate interpretable model " "for each example, which can be time consuming. It is " - "recommended to compute attributions for one example at a time." + "recommended to compute attributions for one example at a " + "time.", + stacklevel=1, ) output_list = [] for ( diff --git a/captum/attr/_core/neuron/neuron_conductance.py b/captum/attr/_core/neuron/neuron_conductance.py index dcbc3eceb..fcb8bafb5 100644 --- a/captum/attr/_core/neuron/neuron_conductance.py +++ b/captum/attr/_core/neuron/neuron_conductance.py @@ -283,7 +283,8 @@ def attribute( "The neuron_selector provided is a callable. Please ensure that this" " function only selects neurons from the given layer; aggregating" " or performing other operations on the tensor may lead to inaccurate" - " results." + " results.", + stacklevel=1, ) # pyre-fixme[6]: For 1st argument expected `Tensor` but got # `TensorOrTupleOfTensorsGeneric`. diff --git a/captum/attr/_core/shapley_value.py b/captum/attr/_core/shapley_value.py index 734d58fa9..081bd7506 100644 --- a/captum/attr/_core/shapley_value.py +++ b/captum/attr/_core/shapley_value.py @@ -411,7 +411,8 @@ def attribute( warnings.warn( "Feature mask is missing some integers between 0 and " "num_features, for optimal performance, make sure each" - " consecutive integer corresponds to a feature." + " consecutive integer corresponds to a feature.", + stacklevel=1, ) # modified_eval dimensions: 1D tensor with length # equal to #num_examples * #features in batch @@ -858,7 +859,8 @@ def attribute( warnings.warn( "You are attempting to compute Shapley Values with at least 10 " "features, which will likely be very computationally expensive." - "Consider using Shapley Value Sampling instead." + "Consider using Shapley Value Sampling instead.", + stacklevel=1, ) return super().attribute.__wrapped__( diff --git a/captum/attr/_models/base.py b/captum/attr/_models/base.py index ac3eecbd5..8d0c3f6f4 100644 --- a/captum/attr/_models/base.py +++ b/captum/attr/_models/base.py @@ -211,7 +211,8 @@ def configure_interpretable_embedding_layer( "embeddings and compute attributions for each embedding dimension. " "The original embedding layer must be set " "back by calling `remove_interpretable_embedding_layer` function " - "after model interpretation is finished. " + "after model interpretation is finished. ", + stacklevel=1, ) interpretable_emb = InterpretableEmbeddingBase( embedding_layer, embedding_layer_name diff --git a/captum/attr/_utils/batching.py b/captum/attr/_utils/batching.py index 7ea76a025..96e1e20a4 100644 --- a/captum/attr/_utils/batching.py +++ b/captum/attr/_utils/batching.py @@ -51,7 +51,8 @@ def _batch_attribution( warnings.warn( "Internal batch size cannot be less than the number of input examples. " "Defaulting to internal batch size of %d equal to the number of examples." - % num_examples + % num_examples, + stacklevel=1, ) # Number of steps for each batch step_count = max(1, internal_batch_size // num_examples) @@ -62,7 +63,8 @@ def _batch_attribution( "This method computes finite differences between evaluations at " "consecutive steps, so internal batch size must be at least twice " "the number of examples. Defaulting to internal batch size of %d" - " equal to twice the number of examples." % (2 * num_examples) + " equal to twice the number of examples." % (2 * num_examples), + stacklevel=1, ) total_attr = None @@ -161,7 +163,8 @@ def _batched_generator( warnings.warn( """It looks like that the attribution for a gradient-based method is computed in a `torch.no_grad` block or perhaps the inputs have no - requires_grad.""" + requires_grad.""", + stacklevel=1, ) if internal_batch_size is None: # pyre-fixme[7]: Expected `Iterator[Tuple[typing.Tuple[Tensor, ...], typing.A... diff --git a/captum/influence/_core/similarity_influence.py b/captum/influence/_core/similarity_influence.py index 0a1ab1c86..1583658cd 100644 --- a/captum/influence/_core/similarity_influence.py +++ b/captum/influence/_core/similarity_influence.py @@ -297,7 +297,11 @@ def influence( # type: ignore[override] "returned as a tensor with [inputs_idx, src_dataset_idx] pairs " "which may have corrupted similarity scores." ) - warnings.warn(zero_warning, RuntimeWarning) + warnings.warn( + zero_warning, + RuntimeWarning, + stacklevel=1, + ) key = "-".join(["zero_acts", layer]) influences[key] = zero_acts diff --git a/captum/influence/_core/tracincp.py b/captum/influence/_core/tracincp.py index 115bd55e8..a603634d5 100644 --- a/captum/influence/_core/tracincp.py +++ b/captum/influence/_core/tracincp.py @@ -178,7 +178,8 @@ def __init__( "Unable to determine the number of batches in training dataset " "`train_dataset`. Therefore, if showing the progress of computations, " "only the number of batches processed can be displayed, and not the " - "percentage completion of the computation, nor any time estimates." + "percentage completion of the computation, nor any time estimates.", + stacklevel=1, ) @property @@ -1232,7 +1233,8 @@ def _self_influence_by_checkpoints( "Therefore, if showing the progress of the computation of self " "influence scores, only the number of batches processed can be " "displayed, and not the percentage completion of the computation, " - "nor any time estimates." + "nor any time estimates.", + stacklevel=1, ) # pyre-fixme[3]: Return type must be annotated. diff --git a/captum/influence/_core/tracincp_fast_rand_proj.py b/captum/influence/_core/tracincp_fast_rand_proj.py index 7e30c25a6..26f5212fa 100644 --- a/captum/influence/_core/tracincp_fast_rand_proj.py +++ b/captum/influence/_core/tracincp_fast_rand_proj.py @@ -579,7 +579,8 @@ def _self_influence_by_checkpoints( "Therefore, if showing the progress of the computation of self " "influence scores, only the number of batches processed can be " "displayed, and not the percentage completion of the computation, " - "nor any time estimates." + "nor any time estimates.", + stacklevel=1, ) # pyre-fixme[53]: Captured variable `inputs_len` is not annotated. @@ -1040,7 +1041,8 @@ def __init__( "each call to `influence` to compute influence scores or proponents " "will be slower, but may avoid running out of memory." ) - % "`TracInCPFast`" + % "`TracInCPFast`", + stacklevel=1, ) # pyre-fixme[4]: Attribute must be annotated. @@ -1230,7 +1232,8 @@ def self_influence( "random projections results only in approximate self influence " "scores, there is no reason to use `TracInCPFastRandProj` when " "calculating self influence scores." - ) + ), + stacklevel=1, ) raise NotImplementedError diff --git a/captum/influence/_utils/common.py b/captum/influence/_utils/common.py index 788c4fb0c..4a25ccb72 100644 --- a/captum/influence/_utils/common.py +++ b/captum/influence/_utils/common.py @@ -420,7 +420,8 @@ def _self_influence_by_batches_helper( "Therefore, if showing the progress of the computation of self " "influence scores, only the number of batches processed can be " "displayed, and not the percentage completion of the computation, " - "nor any time estimates." + "nor any time estimates.", + stacklevel=1, ) # then create the progress bar inputs_dataset = progress( @@ -501,7 +502,8 @@ def _check_loss_fn( f'please set the reduction attribute of `{loss_fn_name}` to "mean", ' f'i.e. `{loss_fn_name}.reduction = "mean"`. Note that if ' "`sample_wise_grads_per_batch` is True, the implementation " - "assumes the reduction is either a sum or mean reduction." + "assumes the reduction is either a sum or mean reduction.", + stacklevel=1, ) reduction_type = "sum" else: @@ -510,7 +512,8 @@ def _check_loss_fn( "`sample_wise_grads_per_batch` is False, the implementation " f'assumes that `{loss_fn_name}` is a "per-example" loss function (see ' f"documentation for `{loss_fn_name}` for details). Please ensure " - "that this is the case." + "that this is the case.", + stacklevel=1, ) return reduction_type @@ -531,7 +534,8 @@ def _set_active_parameters(model: Module, layers: List[str]) -> List[Module]: warnings.warn( "Setting required grads for layer: {}, name: {}".format( ".".join(layer), name - ) + ), + stacklevel=1, ) param.requires_grad = True return layer_modules @@ -556,7 +560,8 @@ def _progress_bar_constructor( f"of the computation of {quantities_name}, " "only the number of batches processed can be " "displayed, and not the percentage completion of the computation, " - "nor any time estimates." + "nor any time estimates.", + stacklevel=1, ) return progress( @@ -989,7 +994,10 @@ def _compute_batch_loss_influence_function_base( "`reduction='sum'` loss function, or a `reduction='none'` " "and set `sample_grads_per_batch` to false." ) - warnings.warn(msg) + warnings.warn( + msg, + stacklevel=1, + ) return _loss * multiplier elif reduction_type == "sum": return _loss diff --git a/captum/insights/example.py b/captum/insights/example.py index df3b95af0..afd5da7c5 100644 --- a/captum/insights/example.py +++ b/captum/insights/example.py @@ -7,7 +7,8 @@ from captum.insights.attr_vis.example import * # noqa warnings.warn( - "Deprecated. Please import from captum.insights.attr_vis.example instead." + "Deprecated. Please import from captum.insights.attr_vis.example instead.", + stacklevel=1, ) diff --git a/captum/metrics/_utils/batching.py b/captum/metrics/_utils/batching.py index ce863d3f3..3f4eaff6b 100644 --- a/captum/metrics/_utils/batching.py +++ b/captum/metrics/_utils/batching.py @@ -61,7 +61,8 @@ def _divide_and_aggregate_metrics( "to compute the metrics, contains at least an instance of " "the original example and doesn't exceed the number of " "expanded n_perturb_samples." - ).format(max_examples_per_batch, bsz) + ).format(max_examples_per_batch, bsz), + stacklevel=1, ) max_inps_per_batch = ( diff --git a/captum/robust/_core/metrics/attack_comparator.py b/captum/robust/_core/metrics/attack_comparator.py index d568cf2a0..649fd6283 100644 --- a/captum/robust/_core/metrics/attack_comparator.py +++ b/captum/robust/_core/metrics/attack_comparator.py @@ -399,7 +399,8 @@ def _check_and_evaluate(input_list, key_list): for key in attack.additional_args: if key not in kwargs: warnings.warn( - f"Additional sample arg {key} not provided for {attack_key}" + f"Additional sample arg {key} not provided for {attack_key}", + stacklevel=1, ) else: additional_attack_args[key] = kwargs[key]