-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun.sh
104 lines (93 loc) · 2.51 KB
/
run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env bash
python run_multi_task.py \
--seed 42 \
--output_dir ./Tmp_Model/MTL \
--tasks all \
--sample 'anneal'\
--multi \
--do_train \
--do_eval \
--do_lower_case \
--data_dir ./data/ \
--vocab_file ./uncased_L-12_H-768_A-12/vocab.txt \
--bert_config_file ./config/pals_config.json \
--init_checkpoint ./uncased_L-12_H-768_A-12/pytorch_model.bin \
--max_seq_length 50 \
--train_batch_size 32 \
--learning_rate 2e-5 \
--num_train_epochs 10 \
--gradient_accumulation_steps 1
python run_multi_task.py \
--seed 42 \
--output_dir ./Tmp_Model/MTL_sqrt \
--tasks all \
--sample 'sqrt'\
--multi \
--do_train \
--do_eval \
--do_lower_case \
--data_dir ./data/ \
--vocab_file ./uncased_L-12_H-768_A-12/vocab.txt \
--bert_config_file ./config/pals_config.json \
--init_checkpoint ./uncased_L-12_H-768_A-12/pytorch_model.bin \
--max_seq_length 50 \
--train_batch_size 32 \
--learning_rate 2e-5 \
--num_train_epochs 10 \
--gradient_accumulation_steps 1
python run_multi_task.py \
--seed 42 \
--output_dir ./Tmp_Model/MTL_prop \
--tasks all \
--sample 'prop'\
--multi \
--do_train \
--do_eval \
--do_lower_case \
--data_dir ./data/ \
--vocab_file ./uncased_L-12_H-768_A-12/vocab.txt \
--bert_config_file ./config/pals_config.json \
--init_checkpoint ./uncased_L-12_H-768_A-12/pytorch_model.bin \
--max_seq_length 50 \
--train_batch_size 32 \
--learning_rate 2e-5 \
--num_train_epochs 10 \
--gradient_accumulation_steps 1
python run_multi_task.py \
--seed 42 \
--output_dir ./Tmp_Model/goemotion \
--tasks single \
--task_id 1 \
--sample 'rr'\
--multi \
--do_train \
--do_eval \
--do_lower_case \
--data_dir ./data/ \
--vocab_file ./uncased_L-12_H-768_A-12/vocab.txt \
--bert_config_file ./config/pals_config.json \
--init_checkpoint ./uncased_L-12_H-768_A-12/pytorch_model.bin \
--max_seq_length 50 \
--train_batch_size 32 \
--learning_rate 2e-5 \
--num_train_epochs 10 \
--gradient_accumulation_steps 1
python run_multi_task.py \
--seed 42 \
--output_dir ./Tmp_Model/sst \
--tasks single \
--task_id 0 \
--sample 'rr'\
--multi \
--do_train \
--do_eval \
--do_lower_case \
--data_dir ./data/ \
--vocab_file ./uncased_L-12_H-768_A-12/vocab.txt \
--bert_config_file ./config/pals_config.json \
--init_checkpoint ./uncased_L-12_H-768_A-12/pytorch_model.bin \
--max_seq_length 50 \
--train_batch_size 32 \
--learning_rate 2e-5 \
--num_train_epochs 10 \
--gradient_accumulation_steps 1