Skip to content

Commit

Permalink
resolve confilcts between different branches.
Browse files Browse the repository at this point in the history
  • Loading branch information
HaojieYuan committed Jul 25, 2020
2 parents 1ed0cc1 + c1fda1f commit 72009bd
Show file tree
Hide file tree
Showing 2,922 changed files with 1,359,386 additions and 11,119 deletions.
13 changes: 13 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,16 @@ __pycache__
cifar10_models/__pycache__
cifar10_models/state_dicts
cifar10_models/state_dicts.zip
controller_best.ckpt
benchmark/__MACOSX
benchmark/models-r1.12.0.zip
benchmark/pretrained
benchmark/1stHGD
benchmark/attacks/DI/DI-2-FGSM-master.zip
benchmark/attacks/DI/M-DI-2-FGSM_attack_out_ens
benchmark/attacks/DI/M-DI-2-FGSM_attack_out_inception_resnet_v2
benchmark/attacks/DI/M-DI-2-FGSM_attack_out_inception_v3
benchmark/attacks/DI/M-DI-2-FGSM_attack_out_inception_v4
benchmark/attacks/DI/M-DI-2-FGSM_attack_out_resnet
benchmark/attacks/TI/dataset/images
benchmark/attacks/TI/out
30 changes: 20 additions & 10 deletions attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,21 @@
import numpy as np
import math

import pdb


def attack(img_batch, model, aug_list=None, type='iterative', momentum_mu=None,
y=None, eps=5, eps_iter=2, nb_iter=3, ord=2, clip_min=0, clip_max=1):

device = img_batch.device

if ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf or 2.")
l2 = (clip_max-clip_min)/255 * math.sqrt(img_batch.shape[1]*img_batch.shape[2]*img_batch.shape[3])
if ord == 2:
eps = eps*l2
eps_iter = eps_iter*l2

x0 = img_batch.clone().detach().to(torch.float).requires_grad_(False)
if y is None:
_, y = torch.max(model(x0), 1)
Expand All @@ -26,20 +30,26 @@ def attack(img_batch, model, aug_list=None, type='iterative', momentum_mu=None,
adv_x = x + eta
if clip_min is not None or clip_max is not None:
adv_x = torch.clamp(adv_x, clip_min, clip_max)

i = 0
if momentum_mu is not None:
momentum = eta.clone().detach()
loss_fn = torch.nn.CrossEntropyLoss()
while i < nb_iter:
adv_x_tmp = adv_x.clone().detach().to(torch.float).requires_grad_(True)
adv_x_tmp = adv_x.clone().detach().to(torch.float).requires_grad_(True)


adv_x_list = [adv_x_tmp]
if aug_list is not None:
adv_x_list = [aug_func[1](aug_func[0](adv_x_tmp)) for aug_func in aug_list]
adv_x_list.extend([aug_func[1](aug_func[0](adv_x_tmp)) for aug_func in aug_list['augs']])
weights = aug_list['weights']
else:
adv_x_list = [adv_x_tmp]
weights = [1]

loss = torch.tensor(0.).to(device)
for j in adv_x_list:
loss = loss + loss_fn(model(j), y)
for j, k in zip(adv_x_list, weights):
loss = loss + k * loss_fn(model(j), y)

if not targeted:
loss = -loss
adv_x_tmp = single_step(loss, adv_x_tmp, eps_iter, ord, clip_min=clip_min, clip_max=clip_max)
Expand All @@ -57,7 +67,7 @@ def attack(img_batch, model, aug_list=None, type='iterative', momentum_mu=None,
if clip_min is not None or clip_max is not None:
adv_x = torch.clamp(adv_x, clip_min, clip_max)
i += 1

return adv_x


Expand All @@ -73,7 +83,7 @@ def single_step(loss, adv_x, eps, ord, clip_min=None, clip_max=None):
adv_x = torch.clamp(adv_x, clip_min, clip_max)

return adv_x


def optimize_linear(grad, eps, ord=np.inf):
"""
Expand Down
104 changes: 56 additions & 48 deletions aug_search.py
Original file line number Diff line number Diff line change
@@ -1,70 +1,78 @@
import torch
import math

import random

# TODO: color augmentation
AUG_TYPE = {0: 'resize_padding', 1: 'translation', 2: 'rotation',
AUG_TYPE = {0: 'resize_padding', 1: 'translation', 2: 'rotation',
3: 'gaussian_noise', 4: 'horizontal_flip', 5: 'vertical_flip'}

def augmentation(op_type, magnitude):
def augmentation(img_tensor, op_type, magnitude):
''' augmentation that capable of backward.
returns a function that takes img tensor values from 0 to 1
with given magnitude, augmentations are done with random directions.
returns augmented img tensor
'''
if op_type == 'resize_padding':
def aug_func(img_tensor):
img_w = img_tensor.shape[2]
img_h = img_tensor.shape[3]
w_modified = 2*int(0.01*magnitude*img_w)
h_modified = 2*int(0.01*magnitude*img_h)
img_tensor = torch.nn.functional.interpolate(img_tensor,
[img_w-w_modified, img_h-h_modified])
h_padding = h_modified//2
w_padding = w_modified//2
img_tensor = torch.nn.functional.pad(img_tensor, (h_padding, h_padding, w_padding, w_padding),
mode='constant', value=0)
return img_tensor
img_w = img_tensor.shape[2]
img_h = img_tensor.shape[3]
w_modified = 2*int(0.01*magnitude*img_w)
h_modified = 2*int(0.01*magnitude*img_h)
img_tensor = torch.nn.functional.interpolate(img_tensor,
[img_w-w_modified, img_h-h_modified])

h_padding_t = random.choice(range(0, h_modified+1))
h_padding_b = h_modified - h_padding_t
w_padding_l = random.choice(range(0, w_modified+1))
w_padding_r = w_modified - w_padding_l
#h_padding = h_modified//2
#w_padding = w_modified//2
img_tensor = torch.nn.functional.pad(img_tensor, (h_padding_t, h_padding_b, w_padding_l, w_padding_r),
mode='constant', value=0)
return img_tensor

elif op_type == 'translation':
def aug_func(img_tensor):
magnitude_ = magnitude-5 # 0to11 -> -5to5
w_modified = 0.03*magnitude_
h_modified = 0.03*magnitude_
trans_M = torch.Tensor([[1., 0., w_modified],
[0., 1., h_modified]])
batch_size = img_tensor.shape[0]
trans_M = trans_M.unsqueeze(0).repeat(batch_size, 1, 1)
grid = torch.nn.functional.affine_grid(trans_M, img_tensor.shape)
img_tensor = torch.nn.functional.grid_sample(img_tensor, grid.to(img_tensor.device))
return img_tensor

w_direction = random.choice([-1, 1])
h_direction = random.choice([-1, 1])

#magnitude_ = magnitude-5 # 0to11 -> -5to5
magnitude_ = magnitude
w_modified = w_direction*0.02*magnitude_
h_modified = h_direction*0.02*magnitude_
trans_M = torch.Tensor([[1., 0., w_modified],
[0., 1., h_modified]])
batch_size = img_tensor.shape[0]
trans_M = trans_M.unsqueeze(0).repeat(batch_size, 1, 1)
grid = torch.nn.functional.affine_grid(trans_M, img_tensor.shape)
img_tensor = torch.nn.functional.grid_sample(img_tensor, grid.to(img_tensor.device))
return img_tensor

elif op_type == 'rotation':
def aug_func(img_tensor):
magnitude_ = magnitude-5 # 0to11 -> -5to5
rot_deg = torch.tensor(math.pi*magnitude_/30.) # -pi/6 to pi/6
rot_M = torch.Tensor([[torch.cos(rot_deg), -torch.sin(rot_deg), 0],
[torch.sin(rot_deg), torch.cos(rot_deg), 0]])
batch_size = img_tensor.shape[0]
rot_M = rot_M.unsqueeze(0).repeat(batch_size, 1, 1)
grid = torch.nn.functional.affine_grid(rot_M, img_tensor.shape)
img_tensor = torch.nn.functional.grid_sample(img_tensor, grid.to(img_tensor.device))
return img_tensor
rot_direction = random.choice([-1, 1])
#magnitude_ = magnitude-5 # 0to11 -> -5to5
magnitude_ = magnitude
rot_deg = torch.tensor(rot_direction*math.pi*magnitude_/60.) # -pi/6 to pi/6
rot_M = torch.Tensor([[torch.cos(rot_deg), -torch.sin(rot_deg), 0],
[torch.sin(rot_deg), torch.cos(rot_deg), 0]])
batch_size = img_tensor.shape[0]
rot_M = rot_M.unsqueeze(0).repeat(batch_size, 1, 1)
grid = torch.nn.functional.affine_grid(rot_M, img_tensor.shape)
img_tensor = torch.nn.functional.grid_sample(img_tensor, grid.to(img_tensor.device))
return img_tensor

elif op_type == 'gaussian_noise':
def aug_func(img_tensor):
noise = torch.randn_like(img_tensor)
img_tensor = img_tensor + noise * magnitude/60
img_tensor = torch.clamp(img_tensor, 0, 1)
return img_tensor
noise = torch.randn_like(img_tensor)
img_tensor = img_tensor + noise * magnitude/60
img_tensor = torch.clamp(img_tensor, 0, 1)
return img_tensor

elif op_type == 'horizontal_flip':
def aug_func(img_tensor):
return torch.flip(img_tensor, [3])
return torch.flip(img_tensor, [3])

elif op_type == 'vertical_flip':
def aug_func(img_tensor):
return torch.flip(img_tensor, [2])
return torch.flip(img_tensor, [2])
else:
print(op_type)
assert False, "Unknown augmentation type."

return aug_func
return img_tensor


21 changes: 21 additions & 0 deletions benchmark/attacks/DI/LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2018 Cihang Xie

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
41 changes: 41 additions & 0 deletions benchmark/attacks/DI/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Improving Transferability of Adversarial Examples with Input Diversity

This paper proposed to improve the transferability of adversarial examples by creating diverse input patterns (https://arxiv.org/abs/1803.06978). Instead of only using the original images to generate adversarial examples, the proposed method, Diverse Input Iterative Fast Gradient Sign
Method (DI<sup>2</sup>-FGSM), applies random transformations to the input images at each iteration. The generated adversarial examples are much more transferable than those generated by FGSM and I-FGSM. An example is shown below:

![demo](demo.png)


## Extension
To improve the transferability further, we
- integrate momentum term into the attack process (https://arxiv.org/abs/1710.06081);
- attack multiple networks simultaneously (https://arxiv.org/abs/1611.02770).

By evaluating this enhanced attack w.r.t. the top 3 defense submissions and 3 official baselines from NIPS 2017 adversarial competition (https://www.kaggle.com/c/nips-2017-non-targeted-adversarial-attack), it reaches an average success rate of 73.0%, which outperforms the top 1 attack submission in the NIPS competition by a large margin of 6.6%. Please refer to the Table 3 in the paper for details.


## Relationships between different attacks

Different attacks can be related via different parameter settings, as shown below:

<img src="https://github.com/cihangxie/DI-2-FGSM/blob/master/relationship.png" width="50%" height="50%">

## Inception_v3 model

- http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz

## Acknowledgements

- For the implementations of random resizing and random padding (https://arxiv.org/abs/1711.01991), the original version is available at https://github.com/cihangxie/NIPS2017_adv_challenge_defense. We adopt a more user-friendly re-implementation https://github.com/anishathalye/obfuscated-gradients in our repo only for releasing purpose.

## Citing this work

If you find this work is useful in your research, please consider citing:

@inproceedings{xie2019improving,
title={Improving Transferability of Adversarial Examples with Input Diversity},
author={Xie, Cihang and Zhang, Zhishuai and Zhou, Yuyin and Bai, Song and Wang, Jianyu and Ren, Zhou and Yuille, Alan},
Booktitle = {Computer Vision and Pattern Recognition},
year={2019},
organization={IEEE}
}
Binary file added benchmark/attacks/DI/__MACOSX/._DI-2-FGSM-master
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 72009bd

Please sign in to comment.