Skip to content

Commit

Permalink
Add stacklevel to across captum to satisfy flake8
Browse files Browse the repository at this point in the history
Summary:
Add stacklevel to silence flake8:

Warning  (FLAKE8) B028
    No explicit stacklevel argument found. The warn method from the warnings

stacklevel=1 default, stacklevel=2 in some spots that user might need more context

Differential Revision: D64518463
  • Loading branch information
Zach Carmichael authored and facebook-github-bot committed Oct 17, 2024
1 parent 74540b8 commit 6afbf2c
Show file tree
Hide file tree
Showing 16 changed files with 64 additions and 29 deletions.
3 changes: 2 additions & 1 deletion captum/_utils/av.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,8 @@ def _manage_loading_layers(
"Overwriting activations: load_from_disk is set to False. Removing all "
f"activations matching specified parameters {{path: {path}, "
f"model_id: {model_id}, layers: {layers}, identifier: {identifier}}} "
"before generating new activations."
"before generating new activations.",
stacklevel=1,
)
for layer in layers:
files = glob.glob(
Expand Down
3 changes: 2 additions & 1 deletion captum/_utils/progress.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,8 @@ def progress(
warnings.warn(
"Tried to show progress with tqdm "
"but tqdm is not installed. "
"Fall back to simply print out the progress."
"Fall back to simply print out the progress.",
stacklevel=1,
)
return SimpleProgress(
iterable, desc=desc, total=total, file=file, mininterval=mininterval
Expand Down
3 changes: 2 additions & 1 deletion captum/attr/_core/guided_backprop_deconvnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ def attribute(
# set hooks for overriding ReLU gradients
warnings.warn(
"Setting backward hooks on ReLU activations."
"The hooks will be removed after the attribution is finished"
"The hooks will be removed after the attribution is finished",
stacklevel=1,
)
try:
self.model.apply(self._register_hooks)
Expand Down
3 changes: 2 additions & 1 deletion captum/attr/_core/guided_grad_cam.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,8 @@ def attribute(
warnings.warn(
"Couldn't appropriately interpolate GradCAM attributions for some "
"input tensors, returning empty tensor for corresponding "
"attributions."
"attributions.",
stacklevel=1,
)
output_attr.append(torch.empty(0))

Expand Down
10 changes: 7 additions & 3 deletions captum/attr/_core/lime.py
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,8 @@ def construct_feature_mask(
if min_interp_features != 0:
warnings.warn(
"Minimum element in feature mask is not 0, shifting indices to"
" start at 0."
" start at 0.",
stacklevel=2,
)
feature_mask = tuple(
single_mask - min_interp_features for single_mask in feature_mask
Expand Down Expand Up @@ -1157,7 +1158,8 @@ def _attribute_kwargs( # type: ignore
"Attempting to construct interpretable model with > 10000 features."
"This can be very slow or lead to OOM issues. Please provide a feature"
"mask which groups input features to reduce the number of interpretable"
"features. "
"features. ",
stacklevel=1,
)

coefs: Tensor
Expand All @@ -1171,7 +1173,9 @@ def _attribute_kwargs( # type: ignore
"You are providing multiple inputs for Lime / Kernel SHAP "
"attributions. This trains a separate interpretable model "
"for each example, which can be time consuming. It is "
"recommended to compute attributions for one example at a time."
"recommended to compute attributions for one example at a "
"time.",
stacklevel=1,
)
output_list = []
for (
Expand Down
3 changes: 2 additions & 1 deletion captum/attr/_core/neuron/neuron_conductance.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,8 @@ def attribute(
"The neuron_selector provided is a callable. Please ensure that this"
" function only selects neurons from the given layer; aggregating"
" or performing other operations on the tensor may lead to inaccurate"
" results."
" results.",
stacklevel=1,
)
# pyre-fixme[6]: For 1st argument expected `Tensor` but got
# `TensorOrTupleOfTensorsGeneric`.
Expand Down
6 changes: 4 additions & 2 deletions captum/attr/_core/shapley_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,8 @@ def attribute(
warnings.warn(
"Feature mask is missing some integers between 0 and "
"num_features, for optimal performance, make sure each"
" consecutive integer corresponds to a feature."
" consecutive integer corresponds to a feature.",
stacklevel=1,
)
# modified_eval dimensions: 1D tensor with length
# equal to #num_examples * #features in batch
Expand Down Expand Up @@ -858,7 +859,8 @@ def attribute(
warnings.warn(
"You are attempting to compute Shapley Values with at least 10 "
"features, which will likely be very computationally expensive."
"Consider using Shapley Value Sampling instead."
"Consider using Shapley Value Sampling instead.",
stacklevel=1,
)

return super().attribute.__wrapped__(
Expand Down
3 changes: 2 additions & 1 deletion captum/attr/_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,8 @@ def configure_interpretable_embedding_layer(
"embeddings and compute attributions for each embedding dimension. "
"The original embedding layer must be set "
"back by calling `remove_interpretable_embedding_layer` function "
"after model interpretation is finished. "
"after model interpretation is finished. ",
stacklevel=1,
)
interpretable_emb = InterpretableEmbeddingBase(
embedding_layer, embedding_layer_name
Expand Down
9 changes: 6 additions & 3 deletions captum/attr/_utils/batching.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ def _batch_attribution(
warnings.warn(
"Internal batch size cannot be less than the number of input examples. "
"Defaulting to internal batch size of %d equal to the number of examples."
% num_examples
% num_examples,
stacklevel=1,
)
# Number of steps for each batch
step_count = max(1, internal_batch_size // num_examples)
Expand All @@ -62,7 +63,8 @@ def _batch_attribution(
"This method computes finite differences between evaluations at "
"consecutive steps, so internal batch size must be at least twice "
"the number of examples. Defaulting to internal batch size of %d"
" equal to twice the number of examples." % (2 * num_examples)
" equal to twice the number of examples." % (2 * num_examples),
stacklevel=1,
)

total_attr = None
Expand Down Expand Up @@ -161,7 +163,8 @@ def _batched_generator(
warnings.warn(
"""It looks like that the attribution for a gradient-based method is
computed in a `torch.no_grad` block or perhaps the inputs have no
requires_grad."""
requires_grad.""",
stacklevel=1,
)
if internal_batch_size is None:
# pyre-fixme[7]: Expected `Iterator[Tuple[typing.Tuple[Tensor, ...], typing.A...
Expand Down
6 changes: 5 additions & 1 deletion captum/influence/_core/similarity_influence.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,11 @@ def influence( # type: ignore[override]
"returned as a tensor with [inputs_idx, src_dataset_idx] pairs "
"which may have corrupted similarity scores."
)
warnings.warn(zero_warning, RuntimeWarning)
warnings.warn(
zero_warning,
RuntimeWarning,
stacklevel=1,
)
key = "-".join(["zero_acts", layer])
influences[key] = zero_acts

Expand Down
6 changes: 4 additions & 2 deletions captum/influence/_core/tracincp.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,8 @@ def __init__(
"Unable to determine the number of batches in training dataset "
"`train_dataset`. Therefore, if showing the progress of computations, "
"only the number of batches processed can be displayed, and not the "
"percentage completion of the computation, nor any time estimates."
"percentage completion of the computation, nor any time estimates.",
stacklevel=1,
)

@property
Expand Down Expand Up @@ -1232,7 +1233,8 @@ def _self_influence_by_checkpoints(
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
"nor any time estimates.",
stacklevel=1,
)

# pyre-fixme[3]: Return type must be annotated.
Expand Down
9 changes: 6 additions & 3 deletions captum/influence/_core/tracincp_fast_rand_proj.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,8 @@ def _self_influence_by_checkpoints(
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
"nor any time estimates.",
stacklevel=1,
)

# pyre-fixme[53]: Captured variable `inputs_len` is not annotated.
Expand Down Expand Up @@ -1040,7 +1041,8 @@ def __init__(
"each call to `influence` to compute influence scores or proponents "
"will be slower, but may avoid running out of memory."
)
% "`TracInCPFast`"
% "`TracInCPFast`",
stacklevel=1,
)

# pyre-fixme[4]: Attribute must be annotated.
Expand Down Expand Up @@ -1230,7 +1232,8 @@ def self_influence(
"random projections results only in approximate self influence "
"scores, there is no reason to use `TracInCPFastRandProj` when "
"calculating self influence scores."
)
),
stacklevel=1,
)
raise NotImplementedError

Expand Down
20 changes: 14 additions & 6 deletions captum/influence/_utils/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,8 @@ def _self_influence_by_batches_helper(
"Therefore, if showing the progress of the computation of self "
"influence scores, only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
"nor any time estimates.",
stacklevel=1,
)
# then create the progress bar
inputs_dataset = progress(
Expand Down Expand Up @@ -501,7 +502,8 @@ def _check_loss_fn(
f'please set the reduction attribute of `{loss_fn_name}` to "mean", '
f'i.e. `{loss_fn_name}.reduction = "mean"`. Note that if '
"`sample_wise_grads_per_batch` is True, the implementation "
"assumes the reduction is either a sum or mean reduction."
"assumes the reduction is either a sum or mean reduction.",
stacklevel=1,
)
reduction_type = "sum"
else:
Expand All @@ -510,7 +512,8 @@ def _check_loss_fn(
"`sample_wise_grads_per_batch` is False, the implementation "
f'assumes that `{loss_fn_name}` is a "per-example" loss function (see '
f"documentation for `{loss_fn_name}` for details). Please ensure "
"that this is the case."
"that this is the case.",
stacklevel=1,
)

return reduction_type
Expand All @@ -531,7 +534,8 @@ def _set_active_parameters(model: Module, layers: List[str]) -> List[Module]:
warnings.warn(
"Setting required grads for layer: {}, name: {}".format(
".".join(layer), name
)
),
stacklevel=1,
)
param.requires_grad = True
return layer_modules
Expand All @@ -556,7 +560,8 @@ def _progress_bar_constructor(
f"of the computation of {quantities_name}, "
"only the number of batches processed can be "
"displayed, and not the percentage completion of the computation, "
"nor any time estimates."
"nor any time estimates.",
stacklevel=1,
)

return progress(
Expand Down Expand Up @@ -989,7 +994,10 @@ def _compute_batch_loss_influence_function_base(
"`reduction='sum'` loss function, or a `reduction='none'` "
"and set `sample_grads_per_batch` to false."
)
warnings.warn(msg)
warnings.warn(
msg,
stacklevel=1,
)
return _loss * multiplier
elif reduction_type == "sum":
return _loss
Expand Down
3 changes: 2 additions & 1 deletion captum/insights/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
from captum.insights.attr_vis.example import * # noqa

warnings.warn(
"Deprecated. Please import from captum.insights.attr_vis.example instead."
"Deprecated. Please import from captum.insights.attr_vis.example instead.",
stacklevel=1,
)


Expand Down
3 changes: 2 additions & 1 deletion captum/metrics/_utils/batching.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ def _divide_and_aggregate_metrics(
"to compute the metrics, contains at least an instance of "
"the original example and doesn't exceed the number of "
"expanded n_perturb_samples."
).format(max_examples_per_batch, bsz)
).format(max_examples_per_batch, bsz),
stacklevel=1,
)

max_inps_per_batch = (
Expand Down
3 changes: 2 additions & 1 deletion captum/robust/_core/metrics/attack_comparator.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,8 @@ def _check_and_evaluate(input_list, key_list):
for key in attack.additional_args:
if key not in kwargs:
warnings.warn(
f"Additional sample arg {key} not provided for {attack_key}"
f"Additional sample arg {key} not provided for {attack_key}",
stacklevel=1,
)
else:
additional_attack_args[key] = kwargs[key]
Expand Down

0 comments on commit 6afbf2c

Please sign in to comment.