-
Notifications
You must be signed in to change notification settings - Fork 26
/
integrated_gradients.py
27 lines (25 loc) · 1.38 KB
/
integrated_gradients.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import numpy as np
import torch
from utils import pre_processing
# integrated gradients
def integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, baseline, steps=50, cuda=False):
if baseline is None:
baseline = 0 * inputs
# scale inputs and compute gradients
scaled_inputs = [baseline + (float(i) / steps) * (inputs - baseline) for i in range(0, steps + 1)]
grads, _ = predict_and_gradients(scaled_inputs, model, target_label_idx, cuda)
avg_grads = np.average(grads[:-1], axis=0)
avg_grads = np.transpose(avg_grads, (1, 2, 0))
delta_X = (pre_processing(inputs, cuda) - pre_processing(baseline, cuda)).detach().squeeze(0).cpu().numpy()
delta_X = np.transpose(delta_X, (1, 2, 0))
integrated_grad = delta_X * avg_grads
return integrated_grad
def random_baseline_integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, steps, num_random_trials, cuda):
all_intgrads = []
for i in range(num_random_trials):
integrated_grad = integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, \
baseline=255.0 *np.random.random(inputs.shape), steps=steps, cuda=cuda)
all_intgrads.append(integrated_grad)
print('the trial number is: {}'.format(i))
avg_intgrads = np.average(np.array(all_intgrads), axis=0)
return avg_intgrads