Skip to content

Commit

Permalink
Update format and docs for Pre-trained Model Selection
Browse files Browse the repository at this point in the history
  • Loading branch information
thucbx99 committed Aug 3, 2022
1 parent 1f23eee commit e389504
Show file tree
Hide file tree
Showing 15 changed files with 177 additions and 146 deletions.
1 change: 1 addition & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ Transfer Learning
tllib/reweight
tllib/normalization
tllib/regularization
tllib/ranking


.. toctree::
Expand Down
37 changes: 37 additions & 0 deletions docs/tllib/ranking.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
=====================
Ranking
=====================



.. _H_score:

H-score
-------------------------------------------

.. autofunction:: tllib.ranking.hscore.h_score


.. _LEEP:

LEEP: Log Expected Empirical Prediction
-------------------------------------------

.. autofunction:: tllib.ranking.leep.log_expected_empirical_prediction


.. _NCE:

NCE: Negative Conditional Entropy
-------------------------------------------

.. autofunction:: tllib.ranking.nce.negative_conditional_entropy


.. _LogME:

LogME: Log Maximum Evidence
-------------------------------------------

.. autofunction:: tllib.ranking.logme.log_maximum_evidence

33 changes: 18 additions & 15 deletions examples/model_selection/hscore.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
sys.path.append('.')
import utils


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


Expand All @@ -36,22 +35,26 @@ def main(args):
data_transform = utils.get_transform(resizing=args.resizing)
print("data_transform: ", data_transform)
model = utils.get_model(args.arch, args.pretrained).to(device)
score_dataset, num_classes = utils.get_dataset(args.data, args.root, data_transform, args.sample_rate, args.num_samples_per_classes)
score_loader = DataLoader(score_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
score_dataset, num_classes = utils.get_dataset(args.data, args.root, data_transform, args.sample_rate,
args.num_samples_per_classes)
score_loader = DataLoader(score_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
pin_memory=True)
print(f'Using {len(score_dataset)} samples for ranking')
features, predictions, targets = utils.forwarding_dataset(score_loader, model, layer=eval(f'model.{args.layer}'), device=device)
features, predictions, targets = utils.forwarding_dataset(score_loader, model,
layer=eval(f'model.{args.layer}'), device=device)
if args.save_features:
np.save(os.path.join(logger.get_save_dir(), 'features.npy'), features)
np.save(os.path.join(logger.get_save_dir(), 'preds.npy'), predictions)
np.save(os.path.join(logger.get_save_dir(), 'targets.npy'), targets)

print('Conducting transferability calculation')
result = h_score(features, targets)

logger.write(f'# {result:.4f} # data_{args.data}_sr{args.sample_rate}_sc{args.num_samples_per_classes}_model_{args.arch}_layer_{args.layer}\n')

logger.write(
f'# {result:.4f} # data_{args.data}_sr{args.sample_rate}_sc{args.num_samples_per_classes}_model_{args.arch}_layer_{args.layer}\n')
print(f'Results saved in {logger.get_result_dir()}')
logger.close()


if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ranking pre-trained models with HScore')
Expand All @@ -68,22 +71,22 @@ def main(args):
parser.add_argument('-sc', '--num-samples-per-classes', default=None, type=int,
help='number of samples per classes.')
parser.add_argument('-b', '--batch-size', default=48, type=int,
metavar='N', help='mini-batch size (default: 48)')
metavar='N', help='mini-batch size (default: 48)')
parser.add_argument('--resizing', default='res.', type=str)

# model
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=utils.get_model_names(),
help='model to be ranked: ' +
' | '.join(utils.get_model_names()) +
' (default: resnet50)')
' | '.join(utils.get_model_names()) +
' (default: resnet50)')
parser.add_argument('-l', '--layer', default='fc',
help='before which layer features are extracted')
parser.add_argument('--pretrained', default=None,
help="pretrained checkpoint of the backbone. "
"(default: None, use the ImageNet supervised pretrained backbone)")
help="pretrained checkpoint of the backbone. "
"(default: None, use the ImageNet supervised pretrained backbone)")
parser.add_argument("--save_features", action='store_true',
help="whether to save extracted features")
help="whether to save extracted features")

args = parser.parse_args()
main(args)
main(args)
4 changes: 0 additions & 4 deletions examples/model_selection/hscore.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#!/usr/bin/env bash


# Ranking Pre-trained Model
# ======================================================================================================================
# CIFAR10
Expand Down Expand Up @@ -54,7 +53,6 @@ CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/caltech101 -d Caltech101 -a dense
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/caltech101 -d Caltech101 -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/caltech101 -d Caltech101 -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# DTD
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/dtd -d DTD -a resnet50 -l fc --save_features
Expand All @@ -81,7 +79,6 @@ CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/Oxford-IIIT -d OxfordIIITPets -a
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/Oxford-IIIT -d OxfordIIITPets -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/Oxford-IIIT -d OxfordIIITPets -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# StanfordCars
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/stanford_cars -d StanfordCars -a resnet50 -l fc --save_features
Expand All @@ -95,7 +92,6 @@ CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/stanford_cars -d StanfordCars -a
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/stanford_cars -d StanfordCars -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/stanford_cars -d StanfordCars -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# SUN397
CUDA_VISIBLE_DEVICES=0 python hscore.py ./data/SUN397 -d SUN397 -a resnet50 -l fc --save_features
Expand Down
36 changes: 20 additions & 16 deletions examples/model_selection/leep.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
sys.path.append('.')
import utils


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


Expand All @@ -36,24 +35,29 @@ def main(args):
data_transform = utils.get_transform(resizing=args.resizing)
print("data_transform: ", data_transform)
model = utils.get_model(args.arch, args.pretrained).to(device)
score_dataset, num_classes = utils.get_dataset(args.data, args.root, data_transform, args.sample_rate, args.num_samples_per_classes)
score_loader = DataLoader(score_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
score_dataset, num_classes = utils.get_dataset(args.data, args.root, data_transform, args.sample_rate,
args.num_samples_per_classes)
score_loader = DataLoader(score_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
pin_memory=True)
print(f'Using {len(score_dataset)} samples for ranking')
features, predictions, targets = utils.forwarding_dataset(score_loader, model, layer=eval(f'model.{args.layer}'), device=device)
features, predictions, targets = utils.forwarding_dataset(score_loader, model,
layer=eval(f'model.{args.layer}'), device=device)
if args.save_features:
np.save(os.path.join(logger.get_save_dir(), 'features.npy'), features)
np.save(os.path.join(logger.get_save_dir(), 'preds.npy'), predictions)
np.save(os.path.join(logger.get_save_dir(), 'targets.npy'), targets)

print('Conducting transferability calculation')
result = leep(predictions, targets)

logger.write(f'# {result:.4f} # data_{args.data}_sr{args.sample_rate}_sc{args.num_samples_per_classes}_model_{args.arch}_layer_{args.layer}\n')

logger.write(
f'# {result:.4f} # data_{args.data}_sr{args.sample_rate}_sc{args.num_samples_per_classes}_model_{args.arch}_layer_{args.layer}\n')
logger.close()


if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ranking pre-trained models with LEEP (Log Expected Empirical Prediction)')
parser = argparse.ArgumentParser(
description='Ranking pre-trained models with LEEP (Log Expected Empirical Prediction)')

# dataset
parser.add_argument('root', metavar='DIR',
Expand All @@ -67,22 +71,22 @@ def main(args):
parser.add_argument('-sc', '--num-samples-per-classes', default=None, type=int,
help='number of samples per classes.')
parser.add_argument('-b', '--batch-size', default=48, type=int,
metavar='N', help='mini-batch size (default: 48)')
metavar='N', help='mini-batch size (default: 48)')
parser.add_argument('--resizing', default='res.', type=str)

# model
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=utils.get_model_names(),
help='model to be ranked: ' +
' | '.join(utils.get_model_names()) +
' (default: resnet50)')
' | '.join(utils.get_model_names()) +
' (default: resnet50)')
parser.add_argument('-l', '--layer', default='fc',
help='before which layer features are extracted')
parser.add_argument('--pretrained', default=None,
help="pretrained checkpoint of the backbone. "
"(default: None, use the ImageNet supervised pretrained backbone)")
help="pretrained checkpoint of the backbone. "
"(default: None, use the ImageNet supervised pretrained backbone)")
parser.add_argument("--save_features", action='store_true',
help="whether to save extracted features")
help="whether to save extracted features")

args = parser.parse_args()
main(args)
main(args)
4 changes: 0 additions & 4 deletions examples/model_selection/leep.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#!/usr/bin/env bash


# Ranking Pre-trained Model
# ======================================================================================================================
# CIFAR10
Expand Down Expand Up @@ -54,7 +53,6 @@ CUDA_VISIBLE_DEVICES=0 python leep.py ./data/caltech101 -d Caltech101 -a densene
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/caltech101 -d Caltech101 -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/caltech101 -d Caltech101 -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# DTD
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/dtd -d DTD -a resnet50 -l fc --save_features
Expand All @@ -81,7 +79,6 @@ CUDA_VISIBLE_DEVICES=0 python leep.py ./data/Oxford-IIIT -d OxfordIIITPets -a de
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/Oxford-IIIT -d OxfordIIITPets -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/Oxford-IIIT -d OxfordIIITPets -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# StanfordCars
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/stanford_cars -d StanfordCars -a resnet50 -l fc --save_features
Expand All @@ -95,7 +92,6 @@ CUDA_VISIBLE_DEVICES=0 python leep.py ./data/stanford_cars -d StanfordCars -a de
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/stanford_cars -d StanfordCars -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/stanford_cars -d StanfordCars -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# SUN397
CUDA_VISIBLE_DEVICES=0 python leep.py ./data/SUN397 -d SUN397 -a resnet50 -l fc --save_features
Expand Down
32 changes: 17 additions & 15 deletions examples/model_selection/logme.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
sys.path.append('.')
import utils


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


Expand All @@ -26,7 +25,6 @@ def main(args):
print(args)
print(f'Calc Transferabilities of {args.arch} on {args.data}')


try:
features = np.load(os.path.join(logger.get_save_dir(), 'features.npy'))
predictions = np.load(os.path.join(logger.get_save_dir(), 'preds.npy'))
Expand All @@ -37,21 +35,25 @@ def main(args):
data_transform = utils.get_transform(resizing=args.resizing)
print("data_transform: ", data_transform)
model = utils.get_model(args.arch, args.pretrained).to(device)
score_dataset, num_classes = utils.get_dataset(args.data, args.root, data_transform, args.sample_rate, args.num_samples_per_classes)
score_loader = DataLoader(score_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
score_dataset, num_classes = utils.get_dataset(args.data, args.root, data_transform, args.sample_rate,
args.num_samples_per_classes)
score_loader = DataLoader(score_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
pin_memory=True)
print(f'Using {len(score_dataset)} samples for ranking')
features, predictions, targets = utils.forwarding_dataset(score_loader, model, layer=eval(f'model.{args.layer}'), device=device)
features, predictions, targets = utils.forwarding_dataset(score_loader, model,
layer=eval(f'model.{args.layer}'), device=device)
if args.save_features:
np.save(os.path.join(logger.get_save_dir(), 'features.npy'), features)
np.save(os.path.join(logger.get_save_dir(), 'preds.npy'), predictions)
np.save(os.path.join(logger.get_save_dir(), 'targets.npy'), targets)

print('Conducting transferability calculation')
result = logme(features, targets)

logger.write(f'# {result:.4f} # data_{args.data}_sr{args.sample_rate}_sc{args.num_samples_per_classes}_model_{args.arch}_layer_{args.layer}\n')

logger.write(
f'# {result:.4f} # data_{args.data}_sr{args.sample_rate}_sc{args.num_samples_per_classes}_model_{args.arch}_layer_{args.layer}\n')
logger.close()


if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ranking pre-trained models with LogME (Log Maximum Evidence)')
Expand All @@ -68,22 +70,22 @@ def main(args):
parser.add_argument('-sc', '--num-samples-per-classes', default=None, type=int,
help='number of samples per classes.')
parser.add_argument('-b', '--batch-size', default=48, type=int,
metavar='N', help='mini-batch size (default: 48)')
metavar='N', help='mini-batch size (default: 48)')
parser.add_argument('--resizing', default='res.', type=str)

# model
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=utils.get_model_names(),
help='model to be ranked: ' +
' | '.join(utils.get_model_names()) +
' (default: resnet50)')
' | '.join(utils.get_model_names()) +
' (default: resnet50)')
parser.add_argument('-l', '--layer', default='fc',
help='before which layer features are extracted')
parser.add_argument('--pretrained', default=None,
help="pretrained checkpoint of the backbone. "
"(default: None, use the ImageNet supervised pretrained backbone)")
help="pretrained checkpoint of the backbone. "
"(default: None, use the ImageNet supervised pretrained backbone)")
parser.add_argument("--save_features", action='store_true',
help="whether to save extracted features")
help="whether to save extracted features")

args = parser.parse_args()
main(args)
main(args)
4 changes: 0 additions & 4 deletions examples/model_selection/logme.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#!/usr/bin/env bash


# Ranking Pre-trained Model
# ======================================================================================================================
# CIFAR10
Expand Down Expand Up @@ -54,7 +53,6 @@ CUDA_VISIBLE_DEVICES=0 python logme.py ./data/caltech101 -d Caltech101 -a densen
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/caltech101 -d Caltech101 -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/caltech101 -d Caltech101 -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# DTD
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/dtd -d DTD -a resnet50 -l fc --save_features
Expand All @@ -81,7 +79,6 @@ CUDA_VISIBLE_DEVICES=0 python logme.py ./data/Oxford-IIIT -d OxfordIIITPets -a d
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/Oxford-IIIT -d OxfordIIITPets -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/Oxford-IIIT -d OxfordIIITPets -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# StanfordCars
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/stanford_cars -d StanfordCars -a resnet50 -l fc --save_features
Expand All @@ -95,7 +92,6 @@ CUDA_VISIBLE_DEVICES=0 python logme.py ./data/stanford_cars -d StanfordCars -a d
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/stanford_cars -d StanfordCars -a mobilenet_v2 -l classifier[-1] --save_features
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/stanford_cars -d StanfordCars -a mnasnet1_0 -l classifier[-1] --save_features


# ======================================================================================================================
# SUN397
CUDA_VISIBLE_DEVICES=0 python logme.py ./data/SUN397 -d SUN397 -a resnet50 -l fc --save_features
Expand Down
Loading

0 comments on commit e389504

Please sign in to comment.