-
Notifications
You must be signed in to change notification settings - Fork 2
/
ade20k_convnext.yaml
55 lines (47 loc) · 2.55 KB
/
ade20k_convnext.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
DEVICE : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)
SAVE_DIR : 'output-dir-path' # output folder name used for saving the model, logs and inference results
ADDENDUM : 'will-be-appended-at-the-end-of-logfoldername'
MODEL:
NAME : UperNetForSemanticSegmentation # name of the model you are using
BACKBONE : ConvNeXt-T_CVST # model variant
PRETRAINED : 'Location-of-pretrained-backbone-model' # backbone model's weight
DATASET:
NAME : ADE20K # dataset name to be trained with (camvid, cityscapes, ade20k)
ROOT : data-dir-path # dataset root path
IGNORE_LABEL : -1
N_CLS : 151
SEED : 0
TRAIN:
BASE_SIZE : 520
IMAGE_SIZE : [512, 512] # training image size in (h, w)
BATCH_SIZE : 16 # batch size used to train
EPOCHS : 128 # number of epochs to train
EVAL_INTERVAL : 32 # evaluation interval during training
ADVERSARIAL : true
ATTACK : pgd
LOSS_FN : pgd
EPS : 4
N_ITERS : 20
FREEZE : false
AMP : false # use AMP in training
DDP : true # use DDP training
LOSS:
NAME : CrossEntropy # loss function name (ohemce, ce, dice)
CLS_WEIGHTS : false # use class weights in loss calculation
OPTIMIZER:
NAME : AdamW # optimizer name, default=sgd
LR : 0.0001 # initial learning rate used in optimizer
WEIGHT_DECAY : 0.05 # decay rate used in optimizer
SCHEDULER:
NAME : warmuppolylr # scheduler name
POWER : 1.0 # scheduler power
WARMUP : 5 # warmup epochs used in scheduler
WARMUP_RATIO : 0.00001 # warmup ratio
EVAL:
NAME : ADE20K # dataset name to be trained with (camvid, cityscapes, ade20k)
BACKBONE : ConvNeXt-T_CVST # model variant
N_CLS : 151
MODEL_PATH : 'path to checkpoint of evaluation model'
BASE_SIZE : 520
IMAGE_SIZE : [512, 512]
BATCH_SIZE : 16 # evaluation image size in (h, w)