Skip to content
This repository has been archived by the owner on Jan 5, 2024. It is now read-only.

Commit

Permalink
update tests/examples to MajajHong identifier; update model reference…
Browse files Browse the repository at this point in the history
…s; rename pytorch_model->torchvision_model (#63)

* update unit tests to use MajajHong identifier

* sneak in fixes to model references

* rename pytorch_model to torchvision_model

* update example with MajajHong identifier
  • Loading branch information
mschrimpf authored Aug 9, 2020
1 parent f62551c commit 2d1e659
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 36 deletions.
18 changes: 9 additions & 9 deletions candidate_models/base_models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@
_logger = logging.getLogger(__name__)


def pytorch_model(function, image_size):
def torchvision_model(identifier, image_size):
module = import_module(f'torchvision.models')
model_ctr = getattr(module, function)
model_ctr = getattr(module, identifier)
from model_tools.activations.pytorch import load_preprocess_images
preprocessing = functools.partial(load_preprocess_images, image_size=image_size)
wrapper = PytorchWrapper(identifier=function, model=model_ctr(pretrained=True), preprocessing=preprocessing)
wrapper = PytorchWrapper(identifier=identifier, model=model_ctr(pretrained=True), preprocessing=preprocessing)
wrapper.image_size = image_size
return wrapper

Expand Down Expand Up @@ -349,12 +349,12 @@ def __init__(self):
super(BaseModelPool, self).__init__(reload=True)

_key_functions = {
'alexnet': lambda: pytorch_model('alexnet', image_size=224),
'squeezenet1_0': lambda: pytorch_model('squeezenet1_0', image_size=224),
'squeezenet1_1': lambda: pytorch_model('squeezenet1_1', image_size=224),
'resnet-18': lambda: pytorch_model('resnet18', image_size=224),
'resnet-34': lambda: pytorch_model('resnet34', image_size=224),
'resnet-50-pytorch': lambda: pytorch_model('resnet50', image_size=224),
'alexnet': lambda: torchvision_model('alexnet', image_size=224),
'squeezenet1_0': lambda: torchvision_model('squeezenet1_0', image_size=224),
'squeezenet1_1': lambda: torchvision_model('squeezenet1_1', image_size=224),
'resnet-18': lambda: torchvision_model('resnet18', image_size=224),
'resnet-34': lambda: torchvision_model('resnet34', image_size=224),
'resnet-50-pytorch': lambda: torchvision_model('resnet50', image_size=224),
'resnet-50-robust': lambda: robust_model('resnet50', image_size=224),

'vgg-16': lambda: keras_model('vgg16', 'VGG16', image_size=224),
Expand Down
18 changes: 10 additions & 8 deletions candidate_models/base_models/models.csv
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ publisher = {Curran Associates, Inc.},
url = {http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf}
}
",0.577,
CORnet-S,https://www.biorxiv.org/content/early/2018/09/04/408385,"@incollection{Kubilius2018CORnet,
CORnet-S,https://papers.nips.cc/paper/9441-brain-like-object-recognition-with-high-performing-shallow-recurrent-anns,"@incollection{Kubilius2018CORnet,
title = {CORnet: Modeling the Neural and Behavioral Mechanisms of Core Object Recognition},
author = {Jonas Kubilius and Martin Schrimpf and Aran Nayebi and Daniel Bear and Daniel L. K. Yamins and James J. DiCarlo},
year = {2018},
Expand Down Expand Up @@ -1376,11 +1376,13 @@ booktitle = {The European Conference on Computer Vision (ECCV)},
month = {sep},
year = {2018}
} ",0.854,0.976
fixres_resnext101_32x48d_wsl,http://openaccess.thecvf.com/content_ECCV_2018/html/Dhruv_Mahajan_Exploring_the_Limits_ECCV_2018_paper.html,"@article{touvron2019fixres,
author = {Hugo, Touvron and Vedaldi, Andrea and Douze, Matthijs and J{\'e}gou, Herv{\'e}},
title = ""{Fixing the train-test resolution discrepancy}"",
journal = {arXiv preprint},
url={https://arxiv.org/abs/1906.06423},
year = {2019},
month = {jun},
fixres_resnext101_32x48d_wsl,http://papers.nips.cc/paper/9035-fixing-the-train-test-resolution-discrepancy,"@incollection{NIPS2019_9035,
title = {Fixing the train-test resolution discrepancy},
author = {Touvron, Hugo and Vedaldi, Andrea and Douze, Matthijs and Jegou, Herve},
booktitle = {Advances in Neural Information Processing Systems 32},
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
pages = {8252--8262},
year = {2019},
publisher = {Curran Associates, Inc.},
url = {http://papers.nips.cc/paper/9035-fixing-the-train-test-resolution-discrepancy.pdf}
}",0.863,
11 changes: 1 addition & 10 deletions examples/score-model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@
"name": "stderr",
"text": [
"alexnet is accessed again and reloaded\n",
"cross-validation: 100%|██████████| 10/10 [00:21<00:00, 2.15s/it]\u001b[A\n",
"cross-validation: 100%|██████████| 10/10 [00:21<00:00, 2.15s/it]\u001B[A\n",
"layers: 100%|██████████| 7/7 [02:43<00:00, 23.40s/it]\n",
"activations: 100%|██████████| 3200/3200 [00:24<00:00, 133.19it/s]\n",
"layer packaging: 100%|██████████| 1/1 [00:00<00:00, 16.89it/s]\n",
Expand Down Expand Up @@ -336,15 +336,6 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
},
"pycharm": {
"stem_cell": {
"cell_type": "raw",
"source": [],
"metadata": {
"collapsed": false
}
}
}
},
"nbformat": 4,
Expand Down
18 changes: 9 additions & 9 deletions tests/test___init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,38 +33,38 @@ def load(model_name=model_name, layer=layer, region=region, pca_components=pca_c
def test_alexnet_conv2_V4(self):
model = self.layer_candidate('alexnet', layer='features.5', region='V4', pca_components=1000)
score = score_model(model_identifier='alexnet-f5-pca_1000', model=model,
benchmark_identifier='dicarlo.Majaj2015.V4-pls')
benchmark_identifier='dicarlo.MajajHong2015.V4-pls')
assert score.raw.sel(aggregation='center').max() == approx(0.633703, abs=0.005)

@pytest.mark.memory_intense
def test_alexnet_conv5_V4(self):
model = self.layer_candidate('alexnet', layer='features.12', region='V4', pca_components=1000)
score = score_model(model_identifier='alexnet-f12-pca_1000', model=model,
benchmark_identifier='dicarlo.Majaj2015.V4-pls')
benchmark_identifier='dicarlo.MajajHong2015.V4-pls')
assert score.raw.sel(aggregation='center') == approx(0.490769, abs=0.005)

@pytest.mark.memory_intense
def test_alexnet_conv5_IT(self):
model = self.layer_candidate('alexnet', layer='features.12', region='IT', pca_components=1000)
score = score_model(model_identifier='alexnet-f12-pca_1000', model=model,
benchmark_identifier='dicarlo.Majaj2015.IT-pls')
benchmark_identifier='dicarlo.MajajHong2015.IT-pls')
assert score.raw.sel(aggregation='center') == approx(0.590345, abs=0.005)

@pytest.mark.memory_intense
def test_alexnet_conv3_IT_mask(self):
model = self.layer_candidate('alexnet', layer='features.6', region='IT', pca_components=None)
np.random.seed(123)
score = score_model(model_identifier='alexnet-f6', model=model,
benchmark_identifier='dicarlo.Majaj2015.IT-mask')
benchmark_identifier='dicarlo.MajajHong2015.IT-mask')
assert score.raw.sel(aggregation='center') == approx(0.607037, abs=0.005)

@pytest.mark.memory_intense
def test_repeat_same_result(self):
model = self.layer_candidate('alexnet', layer='features.12', region='IT', pca_components=1000)
score1 = score_model(model_identifier='alexnet-f12-pca_1000', model=model,
benchmark_identifier='dicarlo.Majaj2015.IT-pls')
benchmark_identifier='dicarlo.MajajHong2015.IT-pls')
score2 = score_model(model_identifier='alexnet-f12-pca_1000', model=model,
benchmark_identifier='dicarlo.Majaj2015.IT-pls')
benchmark_identifier='dicarlo.MajajHong2015.IT-pls')
assert (score1 == score2).all()

def test_newmodel_pytorch(self):
Expand Down Expand Up @@ -104,7 +104,7 @@ def forward(self, x):
candidate = TemporalIgnore(candidate)

ceiled_score = score_model(model_identifier=model_id, model=candidate,
benchmark_identifier='dicarlo.Majaj2015.IT-pls')
benchmark_identifier='dicarlo.MajajHong2015.IT-pls')
score = ceiled_score.raw
assert score.sel(aggregation='center') == approx(.0820823, abs=.01)

Expand Down Expand Up @@ -132,14 +132,14 @@ class TestBrainTranslated:
('alexnet', .59033, True),
('CORnet-S', .600, False),
])
def test_Majaj2015ITpls(self, model_identifier, expected_score, attach_hook):
def test_MajajHong2015ITpls(self, model_identifier, expected_score, attach_hook):
model = brain_translated_pool[model_identifier]
if attach_hook:
activations_model = model.layer_model._layer_model.activations_model
LayerPCA.hook(activations_model, n_components=1000)
identifier = activations_model.identifier + "-pca_1000"
activations_model.identifier = identifier
score = score_model(model_identifier, 'dicarlo.Majaj2015.IT-pls', model=model)
score = score_model(model_identifier, 'dicarlo.MajajHong2015.IT-pls', model=model)
assert score.raw.sel(aggregation='center') == approx(expected_score, abs=0.005)

@pytest.mark.parametrize(['model_identifier', 'expected_score'], [
Expand Down

0 comments on commit 2d1e659

Please sign in to comment.