forked from POSTECH-CVLab/PyTorch-StudioGAN
-
Notifications
You must be signed in to change notification settings - Fork 0
/
StyleGAN2-ADA.yaml
62 lines (61 loc) · 1.73 KB
/
StyleGAN2-ADA.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# Guidelines for StyleGAN2-ADA config
# Don't change cfgs except for the explained below as comment.
# Changing other cfgs will cause it to deviate from original StyleGAN paper settings.
# g_cond_mtd can be changed btw ["W/O", "cAdaIN"]
# d_cond_mtd can be changed btw ["W/O", "AC", "PD", "MH", "MD", "2C", "D2DCE", "SPD"]
# z_dim, w_dim should be fixed to 512 regardless of image size.
# apply_g_ema should be true for stable results.
# apply_r1_reg should be true.
# g_reg_interval, d_reg_interval is fixed to 4, 16 regardless of image size.
# pl_reg is disabled only for cifar10.
# d_architecture is 'orig' for cifar10.
# style_mixing_p should be 0.9 for all settings except for cifar10 (0)
# for total_step, batch_size, d_epilogue_mbstd_group_size, g/d_lr, r1_lambda, g_ema_kimg
# g_ema_rampup, mapping_network, check StyleGAN2 section in src/config.py
DATA:
name: "CIFAR10"
img_size: 32
num_classes: 10
MODEL:
backbone: "stylegan2"
g_cond_mtd: "cAdaIN"
d_cond_mtd: "SPD"
g_act_fn: "Auto"
d_act_fn: "Auto"
z_prior: "gaussian"
z_dim: 512
w_dim: 512
g_conv_dim: "N/A"
d_conv_dim: "N/A"
apply_g_ema: True
LOSS:
adv_loss: "logistic"
apply_r1_reg: True
r1_lambda: 0.01
OPTIMIZATION:
# These values will be slightly changed if lazy regularization is applied.
g_lr: 0.0025
d_lr: 0.0025
beta1: 0
beta2: 0.99
g_updates_per_step: 1
d_updates_per_step: 1
total_steps: 1000000
AUG:
apply_ada: True
ada_aug_type: "bgc"
ada_initial_augment_p: 0
ada_target: 0.6
ada_kimg: 500
ada_interval: 4
STYLEGAN2:
g_reg_interval: 4
d_reg_interval: 16
mapping_network: 2
style_mixing_p: 0
g_ema_kimg: 500
g_ema_rampup: 0.05
apply_pl_reg: False
pl_weight: 0
d_architecture: "orig"
d_epilogue_mbstd_group_size: 32