-
Notifications
You must be signed in to change notification settings - Fork 2
/
train_cifar10_dmi.py
130 lines (112 loc) · 7.18 KB
/
train_cifar10_dmi.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import os
import time
import math
import argparse
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import torch.nn.functional as F
from torchvision import datasets, transforms
from utils import train, test, get_pred
from dataset import DATASET_CUSTOM
from networks.wideresnet import Wide_ResNet
from augmentation.autoaugment import CIFAR10Policy
from augmentation.cutout import Cutout
def log(path, str):
print(str)
with open(path, 'a') as file:
file.write(str)
def main():
# Settings
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10')
parser.add_argument('--batch_size', type=int, default=128, help='input batch size for training')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=1e-6, help='learning rate')
parser.add_argument('--dp', type=float, default=0.2, help='dropout rate')
parser.add_argument('--aug', type=str, default='strong', help='Type of data augmentation {none, standard, strong}')
parser.add_argument('--noise_pattern', type=str, default='uniform', help='Noise pattern')
parser.add_argument('--noise_rate', type=float, default=0.2, help='Noise rate')
parser.add_argument('--val_size', type=int, default=5000, help='size of (noisy) validation set')
parser.add_argument('--save_model', action='store_true', default=False, help='For Saving the current Model')
parser.add_argument('--teacher_path', type=str, default=None, help='Path of the teacher model')
parser.add_argument('--init_path', type=str, default=None, help='DMI requires a pretrained model to initialize')
parser.add_argument('--gpu_id', type=int, default=0, help='index of gpu to use')
parser.add_argument('--test_batch_size', type=int, default=200, help='input batch size for testing')
parser.add_argument('--seed', type=int, default=0, help='random seed (default: 0)')
args = parser.parse_args()
if args.teacher_path is None:
exp_name = 'dmi_cifar10_{}{:.1f}_dp{:.1f}_aug{}_seed{}'.format(args.noise_pattern, args.noise_rate, args.dp, args.aug, args.seed)
else:
exp_name = 'dmi_cifar10_{}{:.1f}_dp{:.1f}_aug{}_student_seed{}'.format(args.noise_pattern, args.noise_rate, args.dp, args.aug, args.seed)
logpath = '{}.txt'.format(exp_name)
log(logpath, 'Settings: {}\n'.format(args))
torch.manual_seed(args.seed)
device = torch.device('cuda:'+str(args.gpu_id) if torch.cuda.is_available() else 'cpu')
# Datasets
root = './data/CIFAR10'
num_classes = 10
kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {}
if args.aug=='standard':
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
elif args.aug=='strong':
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4, fill=128), transforms.RandomHorizontalFlip(),
CIFAR10Policy(),
transforms.ToTensor(),
Cutout(n_holes=1, length=16), # (https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py)
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
train_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
dataset = datasets.CIFAR10(root, train=True, download=True)
data, label = dataset.data, dataset.targets
label_noisy= list(pd.read_csv(os.path.join('./data/CIFAR10/label_noisy', args.noise_pattern+str(args.noise_rate)+'.csv'))['label_noisy'].values.astype(int))
train_dataset = DATASET_CUSTOM(root, data[:-args.val_size], label_noisy[:-args.val_size], transform=train_transform)
val_dataset = DATASET_CUSTOM(root, data[-args.val_size:], label_noisy[-args.val_size:], transform=test_transform)
test_dataset = datasets.CIFAR10(root, train=False, transform=test_transform)
if args.teacher_path is not None:
teacher_model = Wide_ResNet(args.dp, use_log_softmax=False).to(device)
teacher_model.load_state_dict(torch.load(args.teacher_path))
distill_dataset = DATASET_CUSTOM(root, data[:-args.val_size], label_noisy[:-args.val_size], transform=test_transform)
pred = get_pred(teacher_model, device, distill_dataset, args.test_batch_size)
log(logpath, 'distilled noise rate: {:.2f}\n'.format(1-(np.array(label[:-args.val_size])==pred).sum()/len(pred)))
train_dataset.targets = pred
del teacher_model
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
# Building model
def DMI_loss(output, target):
outputs = F.softmax(output, dim=1)
targets = target.reshape(target.size(0), 1)
y_onehot = torch.FloatTensor(target.size(0), num_classes)
y_onehot.zero_()
targets = targets.cpu()
y_onehot.scatter_(1, targets, 1)
y_onehot = y_onehot.transpose(0, 1).to(device)
mat = y_onehot @ outputs
return -1.0 * torch.log(torch.abs(torch.det(mat.float())) + 0.001)
model = Wide_ResNet(args.dp, use_log_softmax=False).to(device)
model.load_state_dict(torch.load(args.init_path))
# Training
val_best, epoch_best, test_at_best = 0, 0, 0
for epoch in range(1, args.epochs + 1):
t0 = time.time()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
_, train_acc = train(args, model, device, train_loader, optimizer, epoch, criterion=DMI_loss)
_, val_acc = test(args, model, device, val_loader, criterion=F.cross_entropy)
_, test_acc = test(args, model, device, test_loader, criterion=F.cross_entropy)
if val_acc>val_best:
val_best, test_at_best, epoch_best = val_acc, test_acc, epoch
if args.save_model:
torch.save(model.state_dict(), '{}_best.pth'.format(exp_name))
log(logpath, 'Epoch: {}/{}, Time: {:.1f}s. '.format(epoch, args.epochs, time.time()-t0))
log(logpath, 'Train: {:.2f}%, Val: {:.2f}%, Test: {:.2f}%; Val_best: {:.2f}%, Test_at_best: {:.2f}%, Epoch_best: {}\n'.format(
100*train_acc, 100*val_acc, 100*test_acc, 100*val_best, 100*test_at_best, epoch_best))
# Saving
if args.save_model:
torch.save(model.state_dict(), '{}_last.pth'.format(exp_name))
if __name__ == '__main__':
main()