-
Notifications
You must be signed in to change notification settings - Fork 2
/
pretrain_no_trainer.sh
47 lines (41 loc) · 1.82 KB
/
pretrain_no_trainer.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
#!/bin/bash
# normal cpu stuff: allocate cpus, memory
#SBATCH --job-name=pretrain-pixel-gpt2large
#SBATCH --ntasks=1 --cpus-per-task=48 --mem=70000M
#SBATCH -p gpu --gres=gpu:a100:1
#SBATCH --time=75:00:00
nvidia-smi
export ENCODER="Team-PIXEL/pixel-base"
export DECODER="gpt2-large"
export DATASET="zuzannad1/pixelsum_wiki"
export EXPERIMENT_DIR="experiments/pretraining/$DECODER/"`date +%Y-%m-%d_%H-%M-%S`
mkdir -p ${EXPERIMENT_DIR}
accelerate launch --mixed_precision=fp16 scripts/training/run_pretraining_no_trainer.py \
--encoder_name ${ENCODER} \
--decoder_name ${DECODER} \
--processor_name ${ENCODER} \
--tokenizer_name ${DECODER} \
--fallback_fonts_dir "fonts" \
--dataset_name ${DATASET} \
--train_decoder true \
--train_encoder true \
--predict_with_generate true \
--checkpointing_steps '10000' \
--output_dir ${EXPERIMENT_DIR} \
--log_predictions true \
--per_device_train_batch_size 32 \
--gradient_accumulation_steps 1 \
--learning_rate 1.5e-4 \
--weight_decay 0.05 \
--lr_scheduler_type "cosine" \
--num_warmup_steps 15000 \
--num_train_epochs 2 \
--val_max_target_length 50 \
--max_target_length 50 \
--use_fast_tokenizer true \
--num_beams 2 \
--report_to "wandb" \
--pad_to_max_length true \
--max_train_steps 300000 \
--logging_steps 50 \
--data_cache_dir 'cached_data' \