From 9a086f5efd577f3d8f4602c00ee77a88f535ffde Mon Sep 17 00:00:00 2001 From: Teodora Sechkova Date: Wed, 24 May 2023 13:47:51 +0300 Subject: [PATCH] Fix pylint errors Signed-off-by: Teodora Sechkova --- art/attacks/evasion/patchfool.py | 10 +++++----- art/estimators/classification/pytorch.py | 12 +++++++++--- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/art/attacks/evasion/patchfool.py b/art/attacks/evasion/patchfool.py index 577fb8de50..bf9ea6cb02 100644 --- a/art/attacks/evasion/patchfool.py +++ b/art/attacks/evasion/patchfool.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/2203.08392 """ -from typing import TYPE_CHECKING, Optional, Union +from typing import Optional, Union import numpy as np @@ -73,7 +73,7 @@ def __init__( self.random_start = random_start self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> np.ndarray: + def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -110,12 +110,12 @@ def _generate_batch(self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None) x = x.to(self.estimator.device) y = y.to(self.estimator.device) - p = self._get_patch_index(x, layer=self.patch_layer) + patches = self._get_patch_index(x, layer=self.patch_layer) patch_size = self.estimator.patch_size mask = torch.zeros(x.shape).to(self.estimator.device) - for n, patch_idx in enumerate(p): + for n, patch_idx in enumerate(patches): row = (patch_idx // (x.shape[2] // patch_size)) * patch_size col = (patch_idx % (x.shape[2] // patch_size)) * patch_size mask[n, :, row : row + patch_size, col : col + patch_size] = 1 @@ -136,7 +136,7 @@ def _generate_batch(self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None) for i_max_iter in range(self.max_iter): optim.zero_grad() - loss_att = self._get_attention_loss(x_adv, p) + loss_att = self._get_attention_loss(x_adv, patches) loss_att_batch = torch.sum(loss_att, dim=1) model_outputs, _ = self.estimator._predict_framework(x_adv) diff --git a/art/estimators/classification/pytorch.py b/art/estimators/classification/pytorch.py index e90f4d2996..13beb1e151 100644 --- a/art/estimators/classification/pytorch.py +++ b/art/estimators/classification/pytorch.py @@ -1262,7 +1262,6 @@ def __init__( be divided by the second one. :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. """ - import torch super().__init__( model=model, @@ -1283,12 +1282,19 @@ def __init__( @property def patch_size(self): + """ + TODO + """ return self.model.patch_size - def get_attention_weights(self, x: Union[np.ndarray, "torch.Tensor"], batch_size: int = 128): + def get_attention_weights(self, x: Union[np.ndarray, "torch.Tensor"]) -> "torch.Tensor": + """ + TODO + """ + import torch from torch import fx - from torchvision.models.feature_extraction import get_graph_node_names, create_feature_extractor + from torchvision.models.feature_extraction import create_feature_extractor graph: fx.Graph = fx.Tracer().trace(self.model) # 'need_weights' is set to False in the implementation