From e4a294f046b8a2b5f228947eec052b862fb264ee Mon Sep 17 00:00:00 2001 From: myuito3 Date: Wed, 6 Dec 2023 09:52:44 +0900 Subject: [PATCH 1/5] Fix typo --- advgrads/adversarial/attacks/pi_fgsm.py | 2 +- advgrads/adversarial/attacks/simba.py | 5 ++++- configs/cifar10.yaml | 1 - configs/mnist.yaml | 1 - 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/advgrads/adversarial/attacks/pi_fgsm.py b/advgrads/adversarial/attacks/pi_fgsm.py index 2ef58bc..99ed8b8 100644 --- a/advgrads/adversarial/attacks/pi_fgsm.py +++ b/advgrads/adversarial/attacks/pi_fgsm.py @@ -77,7 +77,7 @@ def run_attack( c = x.shape[1] stack_kern, padding_size = project_kern(kern_size=3, channels=c) - stack_kern.to(x.device) + stack_kern = stack_kern.to(x.device) amplification = 0.0 for _ in range(self.max_iters): diff --git a/advgrads/adversarial/attacks/simba.py b/advgrads/adversarial/attacks/simba.py index 41f012b..96853a8 100644 --- a/advgrads/adversarial/attacks/simba.py +++ b/advgrads/adversarial/attacks/simba.py @@ -66,7 +66,10 @@ def __init__(self, config: SimBAAttackConfig) -> None: "SimBA is a minimum-norm attack, not a norm-constrained attack." ) if self.max_iters > 0: - raise ValueError() + raise ValueError( + "The maximum number of queries for SimBA is controlled by the " + "freq_dims parameter in the config." + ) self.loss = ( nn.CrossEntropyLoss(reduction="none") diff --git a/configs/cifar10.yaml b/configs/cifar10.yaml index 8dbc220..44e017c 100644 --- a/configs/cifar10.yaml +++ b/configs/cifar10.yaml @@ -7,7 +7,6 @@ attacks: method: fgsm norm: l_inf eps: 0.05 - num_iters: 0 targeted: false - method: i-fgsm diff --git a/configs/mnist.yaml b/configs/mnist.yaml index bf8d9f4..b7cf007 100644 --- a/configs/mnist.yaml +++ b/configs/mnist.yaml @@ -7,7 +7,6 @@ attacks: method: fgsm norm: l_inf eps: 0.3 - max_iters: 0 targeted: false - method: i-fgsm From 91b0d8eb24fdcf48a25e85931b68e8b2744fca7f Mon Sep 17 00:00:00 2001 From: myuito3 Date: Wed, 6 Dec 2023 19:52:31 +0900 Subject: [PATCH 2/5] Make ImageNet datasets and classification models --- advgrads/data/__init__.py | 4 +- advgrads/data/datasets/imagenet_dataset.py | 48 ++++++++++++ advgrads/models/__init__.py | 10 +++ advgrads/models/imagenet/__init__.py | 13 ++++ advgrads/models/imagenet/imagenet_model.py | 88 ++++++++++++++++++++++ advgrads/models/imagenet/inception.py | 57 ++++++++++++++ advgrads/models/imagenet/resnet.py | 51 +++++++++++++ advgrads/models/imagenet/vgg.py | 79 +++++++++++++++++++ advgrads_cli/attack.py | 26 +++++-- configs/imagenet.yaml | 32 ++++++++ 10 files changed, 399 insertions(+), 9 deletions(-) create mode 100644 advgrads/data/datasets/imagenet_dataset.py create mode 100644 advgrads/models/imagenet/__init__.py create mode 100644 advgrads/models/imagenet/imagenet_model.py create mode 100644 advgrads/models/imagenet/inception.py create mode 100644 advgrads/models/imagenet/resnet.py create mode 100644 advgrads/models/imagenet/vgg.py create mode 100644 configs/imagenet.yaml diff --git a/advgrads/data/__init__.py b/advgrads/data/__init__.py index 5d5f2a5..b9b29c9 100644 --- a/advgrads/data/__init__.py +++ b/advgrads/data/__init__.py @@ -16,6 +16,7 @@ from torch.utils.data import Dataset +from advgrads.data.datasets.imagenet_dataset import ImagenetDataset from advgrads.data.datasets.vision_dataset import ( MnistDataset, Cifar10Dataset, @@ -28,7 +29,8 @@ def get_dataset_class(name: str) -> Dataset: dataset_class_dict = { - "mnist": MnistDataset, "cifar10": Cifar10Dataset, + "imagenet": ImagenetDataset, + "mnist": MnistDataset, } all_dataset_names = list(dataset_class_dict.keys()) diff --git a/advgrads/data/datasets/imagenet_dataset.py b/advgrads/data/datasets/imagenet_dataset.py new file mode 100644 index 0000000..a98b9a1 --- /dev/null +++ b/advgrads/data/datasets/imagenet_dataset.py @@ -0,0 +1,48 @@ +# Copyright 2023 Makoto Yuito. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ImageNet dataset.""" + +from typing import List, Optional + +from torchvision import transforms +from torchvision.datasets import ImageNet + + +DATA_PATH = "./data/imagenet" + + +class ImagenetDataset(ImageNet): + """The ImageNet Dataset. + + Args: + transform: Transform objects for image preprocessing. + indices_to_use: List of image indices to be used. + """ + + def __init__( + self, + transform: transforms.Compose, + indices_to_use: Optional[List[int]] = None, + ) -> None: + super().__init__(root=DATA_PATH, split="val", transform=transform) + + all_samples = self.samples + self.samples = [] + for i in indices_to_use: + self.samples.append(all_samples[i]) + + @property + def num_classes(self) -> int: + return 1000 diff --git a/advgrads/models/__init__.py b/advgrads/models/__init__.py index 8b953c3..f1b95af 100644 --- a/advgrads/models/__init__.py +++ b/advgrads/models/__init__.py @@ -18,6 +18,12 @@ TradesMnistModelConfig, ) from advgrads.models.base_model import Model +from advgrads.models.imagenet.inception import InceptionV3ImagenetModelConfig +from advgrads.models.imagenet.resnet import Resnet50ImagenetModelConfig +from advgrads.models.imagenet.vgg import ( + Vgg16ImagenetModelConfig, + Vgg16bnImagenetModelConfig, +) from advgrads.models.pytorch_playground.cifar10_model import PtPgCifar10ModelConfig from advgrads.models.pytorch_playground.mnist_model import PtPgMnistModelConfig @@ -31,5 +37,9 @@ def get_model_config_class(name: str) -> Model: "ptpg-mnist": PtPgMnistModelConfig, "ptpg-cifar10": PtPgCifar10ModelConfig, "trades-mnist": TradesMnistModelConfig, + "inceptionv3-imagenet": InceptionV3ImagenetModelConfig, + "resnet50-imagenet": Resnet50ImagenetModelConfig, + "vgg16-imagenet": Vgg16ImagenetModelConfig, + "vgg16bn-imagenet": Vgg16bnImagenetModelConfig, } all_model_names = list(model_config_class_dict.keys()) diff --git a/advgrads/models/imagenet/__init__.py b/advgrads/models/imagenet/__init__.py new file mode 100644 index 0000000..ad7fc3a --- /dev/null +++ b/advgrads/models/imagenet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2023 Makoto Yuito. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/advgrads/models/imagenet/imagenet_model.py b/advgrads/models/imagenet/imagenet_model.py new file mode 100644 index 0000000..0119907 --- /dev/null +++ b/advgrads/models/imagenet/imagenet_model.py @@ -0,0 +1,88 @@ +# Copyright 2023 Makoto Yuito. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The base model for ImageNet.""" + +import os +from collections import OrderedDict +from dataclasses import dataclass, field +from typing import Tuple, Type + +import torch +import torch.nn as nn +import torchvision.transforms.functional as F +from torch import Tensor +from torchvision import transforms + +from advgrads.models.base_model import Model, ModelConfig + + +@dataclass +class ImagenetModelConfig(ModelConfig): + """The base configuration class for the ImageNet model.""" + + _target: Type = field(default_factory=lambda: ImagenetModel) + """Target class to instantiate.""" + crop_size: int = 224 + """Size of the image to be cropped, i.e., the size of the input to the model.""" + resize_size: int = 256 + """Size of the image to resize before cropping.""" + mean: Tuple[float, ...] = (0.485, 0.456, 0.406) + """Mean values per channel used to normalize the ImageNet image.""" + std: Tuple[float, ...] = (0.229, 0.224, 0.225) + """Std values per channel used to normalize the ImageNet image.""" + + +class ImagenetModel(Model): + """The base model classifying ImageNet dataset. + + Args: + config: The base model configuration. + """ + + config: ImagenetModelConfig + model: nn.Module + + def __init__(self, config: ImagenetModelConfig) -> None: + super().__init__(config) + self.mean = list(self.config.mean) + self.std = list(self.config.std) + + def load(self) -> None: + if not os.path.exists(self.config.checkpoint_path): + self.download() + + checkpoint = torch.load(self.config.checkpoint_path, map_location="cpu") + new_checkpoint = OrderedDict() + for key, value in checkpoint.items(): + new_key = "model." + key + new_checkpoint[new_key] = value + del checkpoint + + self.load_state_dict(new_checkpoint) + self.eval() + + def forward(self, x_input: Tensor) -> Tensor: + x_input = F.normalize(x_input, mean=self.mean, std=self.std) + return self.model(x_input) + + def get_transform(self) -> transforms.Compose: + transform = transforms.Compose( + [ + transforms.Resize(self.config.resize_size), + transforms.CenterCrop(self.config.crop_size), + transforms.ToTensor(), + ] + ) + return transform diff --git a/advgrads/models/imagenet/inception.py b/advgrads/models/imagenet/inception.py new file mode 100644 index 0000000..d4878a6 --- /dev/null +++ b/advgrads/models/imagenet/inception.py @@ -0,0 +1,57 @@ +# Copyright 2023 Makoto Yuito. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Inception model.""" + +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional, Type + +from torchvision.models import inception_v3 + +from advgrads.models.imagenet.imagenet_model import ImagenetModel, ImagenetModelConfig + + +@dataclass +class InceptionV3ImagenetModelConfig(ImagenetModelConfig): + """The configuration class for the Inception-v3 model.""" + + _target: Type = field(default_factory=lambda: InceptionV3ImagenetModel) + """Target class to instantiate.""" + checkpoint_path: Path = Path( + "checkpoints/imagenet/inception/inception_v3_google-0cc3c7bd.pth" + ) + """Path to the checkpoint file to be loaded.""" + download_url: Optional[ + str + ] = "https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth" + """URL to download the checkpoint file if it is not found.""" + crop_size: int = 299 + """Size of the image to be cropped, i.e., the size of the input to the model.""" + resize_size: int = 342 + """Size of the image to resize before cropping.""" + + +class InceptionV3ImagenetModel(ImagenetModel): + """The Inception-v3 model classifying ImageNet dataset. + + Args: + config: The Inception-v3 model configuration. + """ + + config: InceptionV3ImagenetModelConfig + + def __init__(self, config: InceptionV3ImagenetModelConfig) -> None: + super().__init__(config) + self.model = inception_v3(weights=None) diff --git a/advgrads/models/imagenet/resnet.py b/advgrads/models/imagenet/resnet.py new file mode 100644 index 0000000..7cca202 --- /dev/null +++ b/advgrads/models/imagenet/resnet.py @@ -0,0 +1,51 @@ +# Copyright 2023 Makoto Yuito. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The ResNet model.""" + +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional, Type + +from torchvision.models import resnet50 + +from advgrads.models.imagenet.imagenet_model import ImagenetModel, ImagenetModelConfig + + +@dataclass +class Resnet50ImagenetModelConfig(ImagenetModelConfig): + """The configuration class for the ResNet-50 model.""" + + _target: Type = field(default_factory=lambda: Resnet50ImagenetModel) + """Target class to instantiate.""" + checkpoint_path: Path = Path("checkpoints/imagenet/resnet/resnet50-0676ba61.pth") + """Path to the checkpoint file to be loaded.""" + download_url: Optional[ + str + ] = "https://download.pytorch.org/models/resnet50-0676ba61.pth" + """URL to download the checkpoint file if it is not found.""" + + +class Resnet50ImagenetModel(ImagenetModel): + """The ResNet-50 model classifying ImageNet dataset. + + Args: + config: The ResNet-50 model configuration. + """ + + config: Resnet50ImagenetModelConfig + + def __init__(self, config: Resnet50ImagenetModelConfig) -> None: + super().__init__(config) + self.model = resnet50(weights=None) diff --git a/advgrads/models/imagenet/vgg.py b/advgrads/models/imagenet/vgg.py new file mode 100644 index 0000000..9eb29ee --- /dev/null +++ b/advgrads/models/imagenet/vgg.py @@ -0,0 +1,79 @@ +# Copyright 2023 Makoto Yuito. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The VGG model.""" + +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional, Type + +from torchvision.models import vgg16, vgg16_bn + +from advgrads.models.imagenet.imagenet_model import ImagenetModel, ImagenetModelConfig + + +@dataclass +class Vgg16ImagenetModelConfig(ImagenetModelConfig): + """The configuration class for the VGG-16 model.""" + + _target: Type = field(default_factory=lambda: Vgg16ImagenetModel) + """Target class to instantiate.""" + checkpoint_path: Path = Path("checkpoints/imagenet/vgg/vgg16-397923af.pth") + """Path to the checkpoint file to be loaded.""" + download_url: Optional[ + str + ] = "https://download.pytorch.org/models/vgg16-397923af.pth" + """URL to download the checkpoint file if it is not found.""" + + +class Vgg16ImagenetModel(ImagenetModel): + """The VGG-16 model classifying ImageNet dataset. + + Args: + config: The VGG-16 model configuration. + """ + + config: Vgg16ImagenetModelConfig + + def __init__(self, config: Vgg16ImagenetModelConfig) -> None: + super().__init__(config) + self.model = vgg16(weights=None) + + +@dataclass +class Vgg16bnImagenetModelConfig(ImagenetModelConfig): + """The configuration class for the VGG-16-BN model.""" + + _target: Type = field(default_factory=lambda: Vgg16bnImagenetModel) + """Target class to instantiate.""" + checkpoint_path: Path = Path("checkpoints/imagenet/vgg/vgg16_bn-6c64b313.pth") + """Path to the checkpoint file to be loaded.""" + download_url: Optional[ + str + ] = "https://download.pytorch.org/models/vgg16_bn-6c64b313.pth" + """URL to download the checkpoint file if it is not found.""" + + +class Vgg16bnImagenetModel(ImagenetModel): + """The VGG-16-BN model classifying ImageNet dataset. + + Args: + config: The VGG-16-BN model configuration. + """ + + config: Vgg16bnImagenetModelConfig + + def __init__(self, config: Vgg16bnImagenetModelConfig) -> None: + super().__init__(config) + self.model = vgg16_bn(weights=None) diff --git a/advgrads_cli/attack.py b/advgrads_cli/attack.py index 2fac040..f79633b 100644 --- a/advgrads_cli/attack.py +++ b/advgrads_cli/attack.py @@ -49,19 +49,29 @@ def main(load_config) -> None: config = ExperimentConfig() config.__dict__.update(load_from_yaml(Path(load_config))) - image_indices = ( - index_samplers.get_arange(config.num_images) - if config.num_images is not None - else None - ) - dataset = get_dataset_class(config.data)(indices_to_use=image_indices) - dataloader = get_dataloader(dataset, batch_size=config.batch_size) - model_config = get_model_config_class(config.model)() model = model_config.setup() model.load() model.to(device) + if "imagenet" in config.model: + image_indices = ( + index_samplers.get_random(config.num_images, population=50000) + if config.num_images is not None + else None + ) + dataset = get_dataset_class(config.data)( + transform=model.get_transform(), indices_to_use=image_indices + ) + else: + image_indices = ( + index_samplers.get_arange(config.num_images) + if config.num_images is not None + else None + ) + dataset = get_dataset_class(config.data)(indices_to_use=image_indices) + dataloader = get_dataloader(dataset, batch_size=config.batch_size) + defense = None if config.thirdparty_defense is not None: defense_config = get_defense_config_class(config.thirdparty_defense)() diff --git a/configs/imagenet.yaml b/configs/imagenet.yaml new file mode 100644 index 0000000..02a7bf4 --- /dev/null +++ b/configs/imagenet.yaml @@ -0,0 +1,32 @@ +seed: 42 +data: imagenet +model: vgg16-imagenet + +attacks: + - + method: fgsm + norm: l_inf + eps: 0.05 + targeted: false + - + method: i-fgsm + norm: l_inf + eps: 0.05 + max_iters: 10 + targeted: false + - + method: mi-fgsm + norm: l_inf + eps: 0.05 + max_iters: 10 + targeted: false + - + method: square + norm: l_inf + eps: 0.05 + max_iters: 10000 + targeted: false + p_init: 0.05 + +num_images: 100 +batch_size: 16 \ No newline at end of file From 15a3d331fecbc1e630747e1225db9f621db1b529 Mon Sep 17 00:00:00 2001 From: myuito3 Date: Tue, 12 Dec 2023 13:53:45 +0900 Subject: [PATCH 3/5] Fix to allow metrics to be set for each attack method --- advgrads/adversarial/attacks/base_attack.py | 87 ++++++++++--------- advgrads/adversarial/attacks/deepfool.py | 14 +++ advgrads/adversarial/attacks/signhunter.py | 12 +++ advgrads/adversarial/attacks/simba.py | 18 ++++ advgrads/adversarial/attacks/square.py | 12 +++ .../adversarial/attacks/utils/result_heads.py | 5 +- advgrads/utils/metrics.py | 14 +-- advgrads_cli/attack.py | 18 ++-- 8 files changed, 124 insertions(+), 56 deletions(-) diff --git a/advgrads/adversarial/attacks/base_attack.py b/advgrads/adversarial/attacks/base_attack.py index 615c99d..c86d94f 100644 --- a/advgrads/adversarial/attacks/base_attack.py +++ b/advgrads/adversarial/attacks/base_attack.py @@ -74,7 +74,7 @@ def __init__(self, config: AttackConfig, **kwargs) -> None: if self.norm not in self.norm_allow_list: raise ValueError(f"Method does not support {self.norm} perturbation norm.") - def __call__(self, *args: Any, **kwargs: Any) -> Dict[ResultHeadNames, Any]: + def __call__(self, *args: Any, **kwargs: Any) -> Dict[ResultHeadNames, Tensor]: return self.get_outputs(*args, **kwargs) @property @@ -114,69 +114,74 @@ def run_attack( """ raise NotImplementedError + def sanity_check(self, x: Tensor, x_adv: Tensor) -> None: + """Ensure that the amount of perturbation is properly controlled. This method + is specifically used to check the amount of perturbation of norm-constrained + type attack methods. + + Args: + x: Original images. + x_adv: Perturbed images. + """ + if self.eps > 0.0: + deltas = x_adv - x + if self.norm == "l_inf": + real = ( + deltas.abs().max().half() + ) # ignore slight differences within the decimal point + msg = f"Perturbations beyond the l_inf sphere ({real})." + elif self.norm == "l_2": + real = torch.norm(deltas.view(x.shape[0], -1), p=2, dim=-1).max() + msg = f"Perturbations beyond the l_2 sphere ({real})." + elif self.norm == "l_0": + raise NotImplementedError + + assert real <= self.eps, msg + def get_outputs( self, - x: Tensor, - y: Tensor, + batch: Dict[str, Tensor], model: Model, thirdparty_defense: Optional[Defense] = None, **kwargs, - ) -> Dict[ResultHeadNames, Any]: + ) -> Dict[ResultHeadNames, Tensor]: """Returns raw attack results processed. Args: - x: Images to be searched for adversarial examples. - y: Ground truth labels of images. + batch: A batch including original images and labels. model: A model to be attacked. thirdparty_defense: Thirdparty defense method instance. """ + x, y = batch["images"], batch["labels"] attack_outputs = self.run_attack(x, y, model, **kwargs) - self.sanity_check(x, attack_outputs[ResultHeadNames.X_ADV]) + x_adv = attack_outputs[ResultHeadNames.X_ADV] + self.sanity_check(x, x_adv) # If a defensive method is defined, the process is performed here. This # corresponds to Section 5.2 (GRAY BOX: IMAGE TRANSFORMATIONS AT TEST TIME) in # the paper of Guo et al [https://arxiv.org/pdf/1711.00117.pdf]. - if thirdparty_defense is not None: - attack_outputs[ResultHeadNames.X_ADV] = thirdparty_defense( - attack_outputs[ResultHeadNames.X_ADV] - ) - with torch.no_grad(): - logits = model(attack_outputs[ResultHeadNames.X_ADV]) + if thirdparty_defense is not None: + logits = model(thirdparty_defense(x_adv)) + else: + logits = model(x_adv) preds = torch.argmax(logits, dim=-1) - cond = (preds == y) if self.targeted else (preds != y) - attack_outputs[ResultHeadNames.NUM_SUCCEED] = cond.sum() + succeed = (preds == y) if self.targeted else (preds != y) - if ResultHeadNames.QUERIES in attack_outputs.keys(): - attack_outputs[ResultHeadNames.QUERIES_SUCCEED] = attack_outputs[ - ResultHeadNames.QUERIES - ][cond] + attack_outputs[ResultHeadNames.PREDS] = preds + attack_outputs[ResultHeadNames.SUCCEED] = succeed + attack_outputs[ResultHeadNames.NUM_SUCCEED] = succeed.sum() - for key, value in attack_outputs.items(): - if isinstance(value, Tensor): - attack_outputs[key] = value.cpu() return attack_outputs - def sanity_check(self, x: Tensor, x_adv: Tensor) -> None: - """Ensure that the amount of perturbation is properly controlled. This method - is specifically used to check the amount of perturbation of norm-constrained - type attack methods. + def get_metrics_dict( + self, outputs: Dict[ResultHeadNames, Tensor], batch: Dict[str, Tensor] + ) -> Dict[str, Tensor]: + """Compute and returns metrics. Args: - x: Original images. - x_adv: Perturbed images. + outputs: The output to compute metrics dict to. + batch: Ground truth batch corresponding to outputs. """ - if self.eps > 0.0: - deltas = x_adv - x - if self.norm == "l_inf": - real = ( - deltas.abs().max().half() - ) # ignore slight differences within the decimal point - msg = f"Perturbations beyond the l_inf sphere ({real})." - elif self.norm == "l_2": - real = torch.norm(deltas.view(x.shape[0], -1), p=2, dim=-1).max() - msg = f"Perturbations beyond the l_2 sphere ({real})." - elif self.norm == "l_0": - raise NotImplementedError - assert real <= self.eps, msg + return {} diff --git a/advgrads/adversarial/attacks/deepfool.py b/advgrads/adversarial/attacks/deepfool.py index d5c1289..c805091 100644 --- a/advgrads/adversarial/attacks/deepfool.py +++ b/advgrads/adversarial/attacks/deepfool.py @@ -128,3 +128,17 @@ def run_attack( ) return {ResultHeadNames.X_ADV: x_adv} + + def get_metrics_dict( + self, outputs: Dict[ResultHeadNames, Tensor], batch: Dict[str, Tensor] + ) -> Dict[str, Tensor]: + metrics_dict = {} + succeed = outputs[ResultHeadNames.SUCCEED] + + # perturbation norm + l2_norm_succeed = torch.norm( + outputs[ResultHeadNames.X_ADV] - batch["images"], p=2, dim=[1, 2, 3] + )[succeed] + metrics_dict["l2_norm"] = l2_norm_succeed + + return metrics_dict diff --git a/advgrads/adversarial/attacks/signhunter.py b/advgrads/adversarial/attacks/signhunter.py index e881bd9..b338a59 100644 --- a/advgrads/adversarial/attacks/signhunter.py +++ b/advgrads/adversarial/attacks/signhunter.py @@ -135,3 +135,15 @@ def run_attack( h = 0 return {ResultHeadNames.X_ADV: x_adv, ResultHeadNames.QUERIES: n_queries} + + def get_metrics_dict( + self, outputs: Dict[ResultHeadNames, Tensor], batch: Dict[str, Tensor] + ) -> Dict[str, Tensor]: + metrics_dict = {} + succeed = outputs[ResultHeadNames.SUCCEED] + + # query + queries_succeed = outputs[ResultHeadNames.QUERIES][succeed] + metrics_dict[ResultHeadNames.QUERIES_SUCCEED] = queries_succeed + + return metrics_dict diff --git a/advgrads/adversarial/attacks/simba.py b/advgrads/adversarial/attacks/simba.py index 96853a8..3335bc3 100644 --- a/advgrads/adversarial/attacks/simba.py +++ b/advgrads/adversarial/attacks/simba.py @@ -187,3 +187,21 @@ def run_attack( x_best, _, _, _ = self.get_data(torch.arange(x.shape[0])) return {ResultHeadNames.X_ADV: x_best, ResultHeadNames.QUERIES: n_queries} + + def get_metrics_dict( + self, outputs: Dict[ResultHeadNames, Tensor], batch: Dict[str, Tensor] + ) -> Dict[str, Tensor]: + metrics_dict = {} + succeed = outputs[ResultHeadNames.SUCCEED] + + # query + queries_succeed = outputs[ResultHeadNames.QUERIES][succeed] + metrics_dict[ResultHeadNames.QUERIES_SUCCEED] = queries_succeed + + # perturbation norm + l2_norm_succeed = torch.norm( + outputs[ResultHeadNames.X_ADV] - batch["images"], p=2, dim=[1, 2, 3] + )[succeed] + metrics_dict["l2_norm"] = l2_norm_succeed + + return metrics_dict diff --git a/advgrads/adversarial/attacks/square.py b/advgrads/adversarial/attacks/square.py index 09a405c..6d32ac9 100644 --- a/advgrads/adversarial/attacks/square.py +++ b/advgrads/adversarial/attacks/square.py @@ -191,3 +191,15 @@ def run_attack( n_queries[idx_to_fool] += 1 return {ResultHeadNames.X_ADV: x_best, ResultHeadNames.QUERIES: n_queries} + + def get_metrics_dict( + self, outputs: Dict[ResultHeadNames, Tensor], batch: Dict[str, Tensor] + ) -> Dict[str, Tensor]: + metrics_dict = {} + succeed = outputs[ResultHeadNames.SUCCEED] + + # query + queries_succeed = outputs[ResultHeadNames.QUERIES][succeed] + metrics_dict[ResultHeadNames.QUERIES_SUCCEED] = queries_succeed + + return metrics_dict diff --git a/advgrads/adversarial/attacks/utils/result_heads.py b/advgrads/adversarial/attacks/utils/result_heads.py index bbe35f5..5bcb2ac 100644 --- a/advgrads/adversarial/attacks/utils/result_heads.py +++ b/advgrads/adversarial/attacks/utils/result_heads.py @@ -22,9 +22,12 @@ class ResultHeadNames(Enum): X_ADV = "x_adv" SHAPE = "shape" + PREDS = "preds" + SUCCEED = "succeed" + NUM_SUCCEED = "num_succeed" SUCCESS_RATE = "success_rate" + QUERIES = "queries" QUERIES_SUCCEED = "queries_succeed" MEAN_QUERY = "mean_query" MEDIAN_QUERY = "median_query" - NUM_SUCCEED = "num_succeed" diff --git a/advgrads/utils/metrics.py b/advgrads/utils/metrics.py index 57a290b..38ebce5 100644 --- a/advgrads/utils/metrics.py +++ b/advgrads/utils/metrics.py @@ -14,7 +14,7 @@ """Eval metrics.""" -import numpy as np +import torch class SuccessRateMeter: @@ -55,8 +55,12 @@ def __init__(self) -> None: self.reset() def __str__(self) -> str: - return "Mean Query: {:.2f} Median Query: {:.2f} ".format( - self.get_mean(), self.get_median() + return ( + "Mean Query: {:.2f} Median Query: {:.2f} ".format( + self.get_mean(), self.get_median() + ) + if self.queries + else "" ) def reset(self) -> None: @@ -73,8 +77,8 @@ def update(self, queries: list) -> None: def get_mean(self) -> float: """Calculate the mean from the stacked queries.""" - return float(np.mean(self.queries)) + return float(torch.mean(torch.tensor(self.queries).float())) def get_median(self) -> float: """Calculate the median from the stacked queries.""" - return float(np.median(self.queries)) + return float(torch.median(torch.tensor(self.queries).float())) diff --git a/advgrads_cli/attack.py b/advgrads_cli/attack.py index f79633b..2db0db3 100644 --- a/advgrads_cli/attack.py +++ b/advgrads_cli/attack.py @@ -92,15 +92,15 @@ def main(load_config) -> None: # Currently we use gt+1 as the target label. labels = (labels + 1) % dataset.num_classes - images, labels = images.to(device), labels.to(device) - attack_outputs = attack(images, labels, model, thirdparty_defense=defense) - - if ResultHeadNames.NUM_SUCCEED in attack_outputs.keys(): - success_rate_meter.update( - attack_outputs[ResultHeadNames.NUM_SUCCEED], len(images) - ) - if ResultHeadNames.QUERIES_SUCCEED in attack_outputs.keys(): - query_meter.update(attack_outputs[ResultHeadNames.QUERIES_SUCCEED]) + batch = {"images": images.to(device), "labels": labels.to(device)} + attack_outputs = attack(batch, model, thirdparty_defense=defense) + success_rate_meter.update( + attack_outputs[ResultHeadNames.NUM_SUCCEED], len(images) + ) + + metrics_dict = attack.get_metrics_dict(attack_outputs, batch) + if ResultHeadNames.QUERIES_SUCCEED in metrics_dict.keys(): + query_meter.update(metrics_dict[ResultHeadNames.QUERIES_SUCCEED]) console_log(str(success_rate_meter) + str(query_meter)) From 0476ce3dce8290ceb15d101d8b6cb7bb200ede7b Mon Sep 17 00:00:00 2001 From: myuito3 Date: Tue, 12 Dec 2023 14:43:20 +0900 Subject: [PATCH 4/5] Set the Pillow version to the latest one --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 64249e0..d55f159 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,5 +2,5 @@ torch==2.0.1 --index-url https://download.pytorch.org/whl/cu117 torchvision==0.15.2 --index-url https://download.pytorch.org/whl/cu117 rich==13.5.2 PyYAML==6.0.1 -Pillow==9.5.0 +Pillow==10.1.0 click==8.1.7 \ No newline at end of file From d98f150884e77eeaadfb98326f1dddb193531a18 Mon Sep 17 00:00:00 2001 From: myuito3 Date: Tue, 12 Dec 2023 15:43:24 +0900 Subject: [PATCH 5/5] v0.1.0 --- README.md | 14 +++++++++++++- requirements.txt | 3 ++- setup.py | 10 ++++++++-- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d4be329..a058282 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,15 @@ # AdvGrads +

+ + + Latest Release + + + License +

+ ## 🌐 About @@ -18,11 +27,13 @@ Currently supported attack methods are as follows: | Method | Type | References | | :------------------ | :------------------ | :------------------ | | DeepFool | White-box | 📃[DeepFool: a simple and accurate method to fool deep neural networks](https://arxiv.org/abs/1511.04599) | +| DI-MI-FGSM | White-box | 📃[Improving Transferability of Adversarial Examples with Input Diversity](https://arxiv.org/abs/1803.06978) | | FGSM | White-box | 📃[Explaining and Harnessing Adversarial Examples](https://arxiv.org/abs/1412.6572) | | I-FGSM (BIM) | White-box | 📃[Adversarial examples in the physical world](https://arxiv.org/abs/1607.02533) | | MI-FGSM (MIM) | White-box | 📃[Boosting Adversarial Attacks with Momentum](https://arxiv.org/abs/1710.06081) | | NI-FGSM | White-box | 📃[Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks](https://arxiv.org/abs/1908.06281) | | PGD | White-box | 📃[Towards Deep Learning Models Resistant to Adversarial Attacks](https://arxiv.org/abs/1706.06083) | +| PI-FGSM | White-box | 📃[Patch-wise Attack for Fooling Deep Neural Network](https://arxiv.org/abs/2007.06765) | | SI-NI-FGSM | White-box | 📃[Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks](https://arxiv.org/abs/1908.06281) | | SignHunter | Black-box | 📃[Sign Bits Are All You Need for Black-Box Attacks](https://openreview.net/forum?id=SygW0TEFwH) | | SimBA | Black-box | 📃[Simple Black-box Adversarial Attacks](https://arxiv.org/abs/1905.07121) | @@ -57,6 +68,8 @@ py -3.9 -m venv [ENV_NAME] After creating and activating your virtual environment, you can install necessary libraries via the requirements.txt. ```bash +git clone https://github.com/myuito3/AdvGrads.git +cd AdvGrads/ pip install -r requirements.txt ``` @@ -64,7 +77,6 @@ pip install -r requirements.txt Install AdvGrads in editable mode from source code: ```bash -git clone https://github.com/myuito3/AdvGrads.git python -m pip install -e . ``` diff --git a/requirements.txt b/requirements.txt index d55f159..3cc357c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,5 @@ torchvision==0.15.2 --index-url https://download.pytorch.org/whl/cu117 rich==13.5.2 PyYAML==6.0.1 Pillow==10.1.0 -click==8.1.7 \ No newline at end of file +click==8.1.7 +scipy==1.11.4 \ No newline at end of file diff --git a/setup.py b/setup.py index 26c8eb6..6e77c64 100644 --- a/setup.py +++ b/setup.py @@ -2,10 +2,16 @@ setup( name="advgrads", - version="1.0", + version="0.1.0", author="Makoto Yuito", description="AdvGrads - An all-in-one tool for adversarial attacks on image recognition", packages=find_packages(), license="Apache 2.0", - keywords="Adversarial Attacks,", + keywords=[ + "deep-learning", + "pytorch", + "adversarial-attacks", + "adversarial-robustness", + ], + url="https://github.com/myuito3/AdvGrads", )