-
Notifications
You must be signed in to change notification settings - Fork 1
/
visda2017_tgt_ours_pb_teachaug_directed.yaml
114 lines (95 loc) · 2.6 KB
/
visda2017_tgt_ours_pb_teachaug_directed.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# @package _global_
# to execute this experiment run:
# python run.py experiment=example_simple.yaml
defaults:
- override /trainer: default.yaml
- override /model: ours_pbta_directed.yaml
- override /data: tgt_teachaug_train_datamodule.yaml
- override /callbacks: default.yaml
- override /logger: many_loggers.yaml
# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters
# name of the run determines folder name in logs
# it's also accessed by loggers
seed: 1
source_task: "Synthetic"
target_task: "Real"
data:
dataset:
_target_: src.data.components.visda2017.VisDA2017
root: ${paths.data_dir}/visda2017
task: ${target_task}
download: True
batch_size: 64
num_workers: 8
pin_memory: True
M: 1
K: 1
model:
net:
backbone:
_target_: timm.create_model
model_name: "tv_resnet101"
pretrained: True
num_classes: 0
num_classes: 12
bottleneck:
_target_: src.models.components.net_utils.BaseBottleneck
in_features: 2048
out_features: ${model.net.bottleneck_dim}
apply_init: False
bottleneck_dim: 256
head:
_target_: src.models.components.net_utils.BaseHead
in_features: ${model.net.bottleneck_dim}
num_classes: ${model.net.num_classes}
apply_init: False
finetune: True
pretrained_path: ${paths.root_dir}/pretrained_model/visda2017/${source_task}_seed${seed}_resnet101.pth
optimizer_args:
lr: 0.0025
momentum: 0.9
nesterov: True
weight_decay: 0.001
scheduler_args:
type: "exponential"
power: 0.75
gamma: 10.0
aug_optimizer_args:
lr: 0.002
weight_decay: 0.01
lambda_neg: 0.08
lambda_neg_2: 0.08
lambda_aug: 1.0
lambda_strong: 0.2
net_momentum: 0.99
warmup_epoch: 0
interval_epoch: 4
init_aug: "random"
K: 3
M: 6
MM: 6
optimize_params: "back_bottle_head"
train_acc:
_target_: torchmetrics.classification.accuracy.Accuracy
task: "multiclass"
average: "macro"
num_classes: ${model.net.num_classes}
val_acc:
_target_: torchmetrics.classification.accuracy.Accuracy
task: "multiclass"
average: "macro"
num_classes: ${model.net.num_classes}
test_acc:
_target_: torchmetrics.classification.accuracy.Accuracy
task: "multiclass"
average: "macro"
num_classes: ${model.net.num_classes}
trainer:
max_epochs: 133 # including data augmentation epoch
task_name: VisDA2017_${source_task}2${target_task}
method_name: ours_pbta_directed
logger:
mlflow:
experiment_name: ${task_name}
run_name: ${method_name}