forked from eyalbd2/PERL
-
Notifications
You must be signed in to change notification settings - Fork 1
/
run_classification.sh
56 lines (48 loc) · 1.91 KB
/
run_classification.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/bin/bash
# Run this before if needed : sed -i -e 's/\r$//' run_classification.sh
# Finally, run this command : sh run_classification.sh
DATA_DIR=data
mkdir -p 5-fold-hyper-tune
for MODEL in feminist_to_abortion
do
SRC_DOMAIN="${MODEL%_to_*}" # split model name according to '_to_' and take the prefix
TRG_DOMAIN="${MODEL#*_to_}" # split model name according to '_to_' and take the suffix
mkdir 5-fold-hyper-tune/${MODEL}/
TEMP_DIR=models/${MODEL}/temp
mkdir -p ${TEMP_DIR}/
mkdir -p 5-fold-hyper-tune/${MODEL}/
MODELS_DIR=models/${MODEL}
for EPOCH in 1 # for hyper-tuning run over [20 40 60]
do
for OUT_CHANNELS in 32 # for hyper-tuning run over [16 32 64]
do
for BATCH_SIZE in 32 # for hyper-tuning run over [32 64]
do
for FILTER_SIZE in 9 # for hyper-tuning run over [7 9 11]
do
for FOLD_NUM in 1 # for five-fold run over [1 2 3 4 5]
do
cp ${MODELS_DIR}/pytorch_model${EPOCH}.bin ${TEMP_DIR}
python supervised_task_learning.py \
--in_domain_data_dir=${DATA_DIR}/${SRC_DOMAIN}/ \
--cross_domain_data_dir=${DATA_DIR}/${TRG_DOMAIN}/ \
--do_train \
--output_dir=${TEMP_DIR}/ \
--load_model \
--model_name=pytorch_model${EPOCH}.bin \
--cnn_window_size=${FILTER_SIZE} \
--cnn_out_channels=${OUT_CHANNELS} \
--learning_rate=5e-5 \
--train_batch_size=${BATCH_SIZE} \
--save_according_to=loss \
--write_log_for_each_epoch
COPY_FROM_PATH=${TEMP_DIR}/pytorch_model${EPOCH}.bin-final_eval_results.txt
COPY_TO_PATH=5-fold-hyper-tune/${MODEL}/ep-${EPOCH}_ch-${OUT_CHANNELS}_batch-${BATCH_SIZE}_filt-${FILTER_SIZE}_fold-${FOLD_NUM}.txt
cp ${COPY_FROM_PATH} ${COPY_TO_PATH}
rm ${TEMP_DIR}/*
done
done
done
done
done
done