Skip to content

Commit

Permalink
feat(tqdm): [progress bar] add tqdm in test.py and train.py
Browse files Browse the repository at this point in the history
  • Loading branch information
eddiehe99 committed Oct 7, 2023
1 parent 4dae9cd commit 1d64741
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 8 deletions.
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ timm >= 0.6.0
scipy
efficientnet_pytorch
pytorch_metric_learning
tqdm
10 changes: 7 additions & 3 deletions test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import scipy.io
import yaml
import math
from tqdm import tqdm
from model import ft_net, ft_net_dense, ft_net_hr, ft_net_swin, ft_net_swinv2, ft_net_efficient, ft_net_NAS, ft_net_convnext, PCB, PCB_test
from utils import fuse_all_conv_bn
#fp16
Expand Down Expand Up @@ -172,7 +173,8 @@ def fliplr(img):

def extract_feature(model,dataloaders):
#features = torch.FloatTensor()
count = 0
# count = 0
pbar = tqdm()
if opt.linear_num <= 0:
if opt.use_swin or opt.use_swinv2 or opt.use_dense or opt.use_convnext:
opt.linear_num = 1024
Expand All @@ -186,8 +188,9 @@ def extract_feature(model,dataloaders):
for iter, data in enumerate(dataloaders):
img, label = data
n, c, h, w = img.size()
count += n
print(count)
# count += n
# print(count)
pbar.update(n)
ff = torch.FloatTensor(n,opt.linear_num).zero_().cuda()

if opt.PCB:
Expand Down Expand Up @@ -222,6 +225,7 @@ def extract_feature(model,dataloaders):
start = iter*opt.batchsize
end = min( (iter+1)*opt.batchsize, len(dataloaders.dataset))
features[ start:end, :] = ff
pbar.close()
return features

def get_id(img_path):
Expand Down
31 changes: 26 additions & 5 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
#from PIL import Image
import time
import os
import collections
from tqdm import tqdm
from model import ft_net, ft_net_dense, ft_net_hr, ft_net_swin, ft_net_swinv2, ft_net_convnext, ft_net_efficient, ft_net_NAS, PCB
from random_erasing import RandomErasing
from dgfolder import DGFolder
Expand Down Expand Up @@ -233,7 +235,7 @@ def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
criterion_sphere = losses.SphereFaceLoss(num_classes=opt.nclasses, embedding_size=512, margin=4)
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# print('-' * 10)

# Each epoch has a training and validation phase
for phase in ['train', 'val']:
Expand All @@ -242,13 +244,19 @@ def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
else:
model.train(False) # Set model to evaluate mode

# Phases 'train' and 'val' are visualized in two separate progress bars
pbar = tqdm()
pbar.reset(total=len(dataloaders[phase].dataset))
ordered_dict = collections.OrderedDict(phase="", Loss="", Acc="")

running_loss = 0.0
running_corrects = 0.0
# Iterate over data.
for iter, data in enumerate(dataloaders[phase]):
# get the inputs
inputs, labels = data
now_batch_size,c,h,w = inputs.shape
pbar.update(now_batch_size) # update the pbar even in the last batch
if now_batch_size<opt.batchsize: # skip the last batch
continue
#print(inputs.shape)
Expand All @@ -261,7 +269,7 @@ def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
# if we use low precision, input also need to be fp16
#if fp16:
# inputs = inputs.half()

# zero the parameter gradients
optimizer.zero_grad()

Expand Down Expand Up @@ -377,16 +385,29 @@ def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
# statistics
if int(version[0])>0 or int(version[2]) > 3: # for the new version like 0.4.0, 0.5.0 and 1.0.0
running_loss += loss.item() * now_batch_size
ordered_dict["Loss"] = f"{loss.item():.4f}"
else : # for the old version like 0.3.0 and 0.3.1
running_loss += loss.data[0] * now_batch_size
ordered_dict["Loss"] = f"{loss.data[0]:.4f}"
del loss
running_corrects += float(torch.sum(preds == labels.data))
# Refresh the progress bar in every batch
ordered_dict["phase"] = phase
ordered_dict[
"Acc"
] = f"{(float(torch.sum(preds == labels.data)) / now_batch_size):.4f}"
pbar.set_postfix(ordered_dict=ordered_dict)

epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]

print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# print('{} Loss: {:.4f} Acc: {:.4f}'.format(
# phase, epoch_loss, epoch_acc))
ordered_dict["phase"] = phase
ordered_dict["Loss"] = f"{epoch_loss:.4f}"
ordered_dict["Acc"] = f"{epoch_acc:.4f}"
pbar.set_postfix(ordered_dict=ordered_dict)
pbar.close()

y_loss[phase].append(epoch_loss)
y_err[phase].append(1.0-epoch_acc)
Expand All @@ -400,7 +421,7 @@ def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
if phase == 'val':
draw_curve(epoch)
if phase == 'train':
scheduler.step()
scheduler.step()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
Expand Down

0 comments on commit 1d64741

Please sign in to comment.