Skip to content

Commit

Permalink
Create TARGETS for captum/attr (#1263)
Browse files Browse the repository at this point in the history
Summary:

Split captum/attr a separate TARGETS

Differential Revision: D55293289
  • Loading branch information
yucu authored and facebook-github-bot committed Mar 25, 2024
1 parent c6e9c22 commit 7c31b01
Show file tree
Hide file tree
Showing 13 changed files with 202 additions and 173 deletions.
2 changes: 1 addition & 1 deletion captum/_utils/av.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import captum._utils.common as common
import torch
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr import LayerActivation
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
Expand Down
4 changes: 1 addition & 3 deletions captum/concept/_core/tcav.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@
from captum._utils.av import AV
from captum._utils.common import _format_tensor_into_tuples, _get_module_from_name
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr._core.layer.layer_gradient_x_activation import LayerGradientXActivation
from captum.attr._utils.attribution import LayerAttribution
from captum.attr import LayerActivation, LayerAttribution, LayerGradientXActivation
from captum.concept._core.cav import CAV
from captum.concept._core.concept import Concept, ConceptInterpreter
from captum.concept._utils.classifier import Classifier, DefaultClassifier
Expand Down
2 changes: 1 addition & 1 deletion captum/influence/_core/similarity_influence.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import captum._utils.common as common
import torch
from captum._utils.av import AV
from captum.attr._core.layer.layer_activation import LayerActivation
from captum.attr import LayerActivation
from captum.influence._core.influence import DataInfluence
from torch import Tensor
from torch.nn import Module
Expand Down
3 changes: 1 addition & 2 deletions captum/robust/_core/metrics/attack_comparator.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@
_format_additional_forward_args,
_reduce_list,
)
from captum.attr._utils.stat import Max, Mean, Min
from captum.attr._utils.summarizer import Summarizer
from captum.attr import Max, Mean, Min, Summarizer
from captum.log import log_usage
from captum.robust._core.perturbation import Perturbation
from torch import Tensor
Expand Down
39 changes: 39 additions & 0 deletions tests/attr/helpers/attribution_delta_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
from typing import Tuple, Union

import torch
from captum._utils.typing import Tensor
from tests.helpers import BaseTest


def assert_attribution_delta(
test: BaseTest,
inputs: Union[Tensor, Tuple[Tensor, ...]],
attributions: Union[Tensor, Tuple[Tensor, ...]],
n_samples: int,
delta: Tensor,
delta_thresh: Union[float, Tensor] = 0.0006,
is_layer: bool = False,
) -> None:
if not is_layer:
for input, attribution in zip(inputs, attributions):
test.assertEqual(attribution.shape, input.shape)
if isinstance(inputs, tuple):
bsz = inputs[0].shape[0]
else:
bsz = inputs.shape[0]
test.assertEqual([bsz * n_samples], list(delta.shape))

delta = torch.mean(delta.reshape(bsz, -1), dim=1)
assert_delta(test, delta, delta_thresh)


def assert_delta(
test: BaseTest, delta: Tensor, delta_thresh: Union[Tensor, float] = 0.0006
) -> None:
delta_condition = (delta.abs() < delta_thresh).all()
test.assertTrue(
delta_condition,
"Sum of SHAP values {} does"
" not match the difference of endpoints.".format(delta),
)
49 changes: 49 additions & 0 deletions tests/attr/helpers/get_config_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
from typing import Any, Tuple

import torch
from captum._utils.gradient import compute_gradients
from tests.helpers.basic_models import BasicModel, BasicModel5_MultiArgs
from torch import Tensor
from torch.nn import Module


def get_basic_config() -> Tuple[Module, Tensor, Tensor, Any]:
input = torch.tensor([1.0, 2.0, 3.0, 0.0, -1.0, 7.0], requires_grad=True).T
# manually percomputed gradients
grads = torch.tensor([-0.0, -0.0, -0.0, 1.0, 1.0, -0.0])
return BasicModel(), input, grads, None


def get_multiargs_basic_config() -> (
Tuple[Module, Tuple[Tensor, ...], Tuple[Tensor, ...], Any]
):
model = BasicModel5_MultiArgs()
additional_forward_args = ([2, 3], 1)
inputs = (
torch.tensor([[1.5, 2.0, 34.3], [3.4, 1.2, 2.0]], requires_grad=True),
torch.tensor([[3.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True),
)
grads = compute_gradients(
model, inputs, additional_forward_args=additional_forward_args
)
return model, inputs, grads, additional_forward_args


def get_multiargs_basic_config_large() -> (
Tuple[Module, Tuple[Tensor, ...], Tuple[Tensor, ...], Any]
):
model = BasicModel5_MultiArgs()
additional_forward_args = ([2, 3], 1)
inputs = (
torch.tensor(
[[10.5, 12.0, 34.3], [43.4, 51.2, 32.0]], requires_grad=True
).repeat_interleave(3, dim=0),
torch.tensor(
[[1.0, 3.5, 23.2], [2.3, 1.2, 0.3]], requires_grad=True
).repeat_interleave(3, dim=0),
)
grads = compute_gradients(
model, inputs, additional_forward_args=additional_forward_args
)
return model, inputs, grads, additional_forward_args
39 changes: 39 additions & 0 deletions tests/attr/helpers/neuron_layer_testing_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
from typing import Tuple

import torch
from torch import Tensor


def create_inps_and_base_for_deeplift_neuron_layer_testing() -> (
Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]
):
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)

b1 = torch.tensor([[0.0, 0.0, 0.0]], requires_grad=True)
b2 = torch.tensor([[0.0, 0.0, 0.0]], requires_grad=True)

inputs = (x1, x2)
baselines = (b1, b2)

return inputs, baselines


def create_inps_and_base_for_deepliftshap_neuron_layer_testing() -> (
Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]
):
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)

b1 = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], requires_grad=True
)
b2 = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], requires_grad=True
)

inputs = (x1, x2)
baselines = (b1, b2)

return inputs, baselines
62 changes: 16 additions & 46 deletions tests/attr/layer/test_layer_deeplift.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@

import torch
from captum.attr._core.layer.layer_deep_lift import LayerDeepLift, LayerDeepLiftShap
from tests.attr.helpers.neuron_layer_testing_util import (
create_inps_and_base_for_deeplift_neuron_layer_testing,
create_inps_and_base_for_deepliftshap_neuron_layer_testing,
)
from tests.helpers.basic import (
assert_delta,
assertTensorAlmostEqual,
Expand All @@ -26,7 +30,7 @@
class TestDeepLift(BaseTest):
def test_relu_layer_deeplift(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
inputs, baselines = create_inps_and_base_for_deeplift_neuron_layer_testing()

layer_dl = LayerDeepLift(model, model.relu)
attributions, delta = layer_dl.attribute(
Expand All @@ -40,7 +44,7 @@ def test_relu_layer_deeplift(self) -> None:

def test_relu_layer_deeplift_wo_mutliplying_by_inputs(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
inputs, baselines = create_inps_and_base_for_deeplift_neuron_layer_testing()

layer_dl = LayerDeepLift(model, model.relu, multiply_by_inputs=False)
attributions = layer_dl.attribute(
Expand All @@ -52,7 +56,7 @@ def test_relu_layer_deeplift_wo_mutliplying_by_inputs(self) -> None:

def test_relu_layer_deeplift_multiple_output(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
inputs, baselines = create_inps_and_base_for_deeplift_neuron_layer_testing()

layer_dl = LayerDeepLift(model, model.multi_relu)
attributions, delta = layer_dl.attribute(
Expand All @@ -69,7 +73,7 @@ def test_relu_layer_deeplift_multiple_output(self) -> None:

def test_relu_layer_deeplift_add_args(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
inputs, baselines = create_inps_and_base_for_deeplift_neuron_layer_testing()

layer_dl = LayerDeepLift(model, model.relu)
attributions, delta = layer_dl.attribute(
Expand All @@ -84,7 +88,7 @@ def test_relu_layer_deeplift_add_args(self) -> None:

def test_linear_layer_deeplift(self) -> None:
model = ReLULinearModel(inplace=True)
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
inputs, baselines = create_inps_and_base_for_deeplift_neuron_layer_testing()

layer_dl = LayerDeepLift(model, model.l3)
attributions, delta = layer_dl.attribute(
Expand All @@ -98,7 +102,7 @@ def test_linear_layer_deeplift(self) -> None:

def test_relu_deeplift_with_custom_attr_func(self) -> None:
model = ReLULinearModel()
inputs, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
inputs, baselines = create_inps_and_base_for_deeplift_neuron_layer_testing()
attr_method = LayerDeepLift(model, model.l3)
self._relu_custom_attr_func_assert(attr_method, inputs, baselines, [[2.0]])

Expand All @@ -117,7 +121,7 @@ def custom_att_func(mult, inp, baseline):

def test_linear_layer_deeplift_batch(self) -> None:
model = ReLULinearModel(inplace=True)
_, baselines = _create_inps_and_base_for_deeplift_neuron_layer_testing()
_, baselines = create_inps_and_base_for_deeplift_neuron_layer_testing()
x1 = torch.tensor(
[[-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0], [-10.0, 1.0, -5.0]],
requires_grad=True,
Expand Down Expand Up @@ -151,7 +155,7 @@ def test_relu_layer_deepliftshap(self) -> None:
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
) = create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.relu)
attributions, delta = layer_dl_shap.attribute(
inputs,
Expand All @@ -167,7 +171,7 @@ def test_relu_layer_deepliftshap_wo_mutliplying_by_inputs(self) -> None:
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
) = create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.relu, multiply_by_inputs=False)
attributions = layer_dl_shap.attribute(
inputs,
Expand All @@ -181,7 +185,7 @@ def test_relu_layer_deepliftshap_multiple_output(self) -> None:
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
) = create_inps_and_base_for_deepliftshap_neuron_layer_testing()

layer_dl = LayerDeepLiftShap(model, model.multi_relu)
attributions, delta = layer_dl.attribute(
Expand All @@ -201,7 +205,7 @@ def test_linear_layer_deepliftshap(self) -> None:
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
) = create_inps_and_base_for_deepliftshap_neuron_layer_testing()
layer_dl_shap = LayerDeepLiftShap(model, model.l3)
attributions, delta = layer_dl_shap.attribute(
inputs,
Expand All @@ -225,7 +229,7 @@ def test_relu_deepliftshap_with_custom_attr_func(self) -> None:
(
inputs,
baselines,
) = _create_inps_and_base_for_deepliftshap_neuron_layer_testing()
) = create_inps_and_base_for_deepliftshap_neuron_layer_testing()
attr_method = LayerDeepLiftShap(model, model.l3)
self._relu_custom_attr_func_assert(attr_method, inputs, baselines, [[2.0]])

Expand Down Expand Up @@ -290,37 +294,3 @@ def custom_attr_func(multipliers, inputs, baselines):
)

assertTensorAlmostEqual(self, attr[0], expected, 1e-19)


def _create_inps_and_base_for_deeplift_neuron_layer_testing() -> (
Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]
):
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)

b1 = torch.tensor([[0.0, 0.0, 0.0]], requires_grad=True)
b2 = torch.tensor([[0.0, 0.0, 0.0]], requires_grad=True)

inputs = (x1, x2)
baselines = (b1, b2)

return inputs, baselines


def _create_inps_and_base_for_deepliftshap_neuron_layer_testing() -> (
Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]
):
x1 = torch.tensor([[-10.0, 1.0, -5.0]], requires_grad=True)
x2 = torch.tensor([[3.0, 3.0, 1.0]], requires_grad=True)

b1 = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], requires_grad=True
)
b2 = torch.tensor(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], requires_grad=True
)

inputs = (x1, x2)
baselines = (b1, b2)

return inputs, baselines
4 changes: 2 additions & 2 deletions tests/attr/layer/test_layer_gradient_shap.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.gradient_shap import GradientShap
from captum.attr._core.layer.layer_gradient_shap import LayerGradientShap
from tests.attr.test_gradient_shap import _assert_attribution_delta
from tests.attr.helpers.attribution_delta_util import assert_attribution_delta
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
Expand Down Expand Up @@ -162,7 +162,7 @@ def _assert_attributions(
)
assertTensorTuplesAlmostEqual(self, attrs, expected, delta=0.005)
if expected_delta is None:
_assert_attribution_delta(
assert_attribution_delta(
self, inputs, attrs, n_samples, delta, is_layer=True
)
else:
Expand Down
Loading

0 comments on commit 7c31b01

Please sign in to comment.