-
Notifications
You must be signed in to change notification settings - Fork 154
/
Copy pathtwo_tower.yaml
75 lines (62 loc) · 1.47 KB
/
two_tower.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# @package trainer
_trainer_: lcm.train.two_tower_diffusion_lcm.trainer.prepare_two_tower_diffusion_lcm_trainer
output_dir: ??
#Parameter Size: 1,635,101,696
model_arch: two_tower_diffusion_lcm_1_6B
criterion:
name: two_tower_diffusion_next_sent
cf_guidance_probability: 0.15
reduction: sum
log_losses_per_timestep_bucket: False
compute_rmse: False
step_sampling:
sampling: "uniform"
weighting: "none"
dtype: "torch.float16"
use_optimizer_in_fp32: true
use_fsdp: true
fsdp_fp32_reduce: true
lr: 0.0004
lr_schedule: cosine
num_lr_warmup_steps: 10_000
max_steps: 250_000
gradient_accumulation: 1
max_grad_norm: 25
weight_decay: 0.1
adam_betas:
- 0.9
- 0.95
adam_eps: 1e-5
validate_every_n_steps: 10_000
save_model_every_n_steps: 2_000
checkpoint_every_n_steps: 2_000
keep_last_n_checkpoints: 2
preserve_consolidated_models: True
publish_metrics_every_n_steps: 100
seed: 1
profile: false
data_loading_config:
max_tokens: 7168
min_batch_size: 1
len_to_wrap_long_seq: 128
packing: false
min_length_of_sequences: 1
min_length_after_batching: 2
num_parallel_calls: 1
nb_prefetch: 5
nb_epochs: 1
validation_data_loading_config:
len_to_wrap_long_seq: 128
training_data:
- name: "pretraining_data=train"
source_suffix_text: "End of text."
validation_data:
- name: "pretraining_data=validation"
source_suffix_text: "End of text."
requirements:
nodes: 4
tasks_per_node: 8
gpus_per_node: 8
cpus_per_task: 32
mem_gb: 0
timeout_min: 10000