-
Notifications
You must be signed in to change notification settings - Fork 15
/
train_config.yaml
94 lines (87 loc) · 3.61 KB
/
train_config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
train:
weights: '' # Model weight path
data: '' # data config path
epochs: 300
batch_size: 64
image_size: 640 # Train, val image size
rect: false # Use rectangular training
resume: false # resume previous training
validate_period: 1 # Run validation on every x epoch
auto_anchor: true # Check anchors and auto-fix anchors
cache_image: null # Use caching images. This should be either 'mem', 'disk', 'dynamic_mem', 'dynamic_disk'
n_skip: 0 # skip image by n which reduces images to be used
image_weights: false # Use weighted image selection for training
device: "" # CUDA device. "" will use all GPUs. EX) '0,2' or 'cpu'
multi_scale: false # Use multi scaled training (+/- 50% image size)
single_cls: false # Train multi-class data as single-class
sync_bn: false # Use SyncBatchNorm, only available in DDP mode
workers: 18 # Maximum number of dataloader workers
linear_lr: false # Use linear learning rate
label_smoothing: 0.0 # Label smoothing epsilon
freeze: 0 # Number of layers to freeze.
save_period: -1 # Save checkpoint on every x epochs (disabled if < 1)
log_dir: "exp" # Default log root directory
plot: true # plot results or not.
label_type: "segments" # Label type to use ("labels", "segments")
patience: 30 # for early stopping
hyper_params:
optimizer: 'SGD'
optimizer_params:
lr: 0.01 # lr0
momentum: 0.937
nesterov: true
# optimizer: 'Adam'
# optimizer_param:
# betas: [0.937, 0.999]
lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
momentum: 0.937 # SGD momentum/Adam beta1
weight_decay: 0.0005 # optimizer weight decay 5e-4
warmup_epochs: 3.0 # warmup epochs (fractions ok)
warmup_momentum: 0.8 # warmup initial momentum
warmup_bias_lr: 0.1 # warmup initial bias lr
box: 0.05 # box loss gain
cls: 0.5 # cls loss gain
cls_pw: 1.0 # cls BCELoss positive_weight
obj: 1.0 # obj loss gain (scale with pixels)
obj_pw: 1.0 # obj BCELoss positive_weight
conf_t: 0.1 # confidence threshold
iou_t: 0.20 # IoU training threshold
anchor_t: 4.0 # anchor-multiple threshold
# anchors: 3 # anchors per output layer (0 to ignore)
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
yolo_augmentation:
augment: true # Use augmentation
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
degrees: 0.0 # image rotation (+/- deg)
translate: 0.1 # image translation (+/- fraction)
scale: 0.5 # image scale (+/- gain)
shear: 0.0 # image shear (+/- deg)
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
# flipud: 0.0 # image flip up-down (probability)
# fliplr: 0.5 # image flip left-right (probability)
mosaic: 1.0 # image mosaic (probability)
mixup: 0.0 # image mixup (probability)
copy_paste: 0.1 # segment copy-paste (probability)
copy_paste2: # config for copy-paste2
p: 0.0 # copy-paste probability per object.
n_img: 3 # Number of images to be used for copy-paste2
area_thr: 200 # area threshold for copy paste (ex. 2 * 5 = 10)
ioa_thr: 0.3 # IoA threshold for existing object and pasted object.
scale_min: 0.35 # scale factor min value
scale_max: 1.0 # scale factor max value
augmentation:
- policy:
Blur: {p: 0.01}
MedianBlur: {p: 0.01}
ToGray: {p: 0.01}
CLAHE: {p: 0.01}
RandomBrightnessContrast: {p: 0.0}
RandomGamma: {p: 0.0}
ImageCompression: {quality_lower: 75, p: 0.0}
prob: 1.0
- policy:
HorizontalFlip: {p: 0.5}
VerticalFlip: {p: 0.0}
prob: 1.0