-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathexecute.sh
89 lines (76 loc) · 3.22 KB
/
execute.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#!/bin/bash
# Navigate to inference directory
cd "benchmark and datasets"/benchmark/inference
# Set the model type: 'closed' for closed-source model, 'open' for open-source model
model_type="closed" # Change to 'open' if using an open-source model
if [ "$model_type" = "closed" ]; then
# For closed-source model
echo "Please ensure you have filled in your API key and base_url in close_model.py before proceeding."
# Set default parameters (modify these as needed)
model_name="gpt-4o-mini" # Required: specify your model name
top_k=0 # Optional: integer between 0-9; 0 means no RAG
num_threads=10 # Optional: integer between 1-32
lib_name="" # Optional: specify the python library name
answer_difficulty="" # Optional: specify the difficulty level
category="" # Optional: specify the question category
question_type="" # Optional: specify the question type
# Build the command
cmd="python close_model.py --model_name \"$model_name\""
if [ -n "$top_k" ]; then
cmd="$cmd --top_k $top_k"
fi
if [ -n "$num_threads" ]; then
cmd="$cmd --num_threads $num_threads"
fi
if [ -n "$lib_name" ]; then
cmd="$cmd --lib_name \"$lib_name\""
fi
if [ -n "$answer_difficulty" ]; then
cmd="$cmd --answer_difficulty \"$answer_difficulty\""
fi
if [ -n "$category" ]; then
cmd="$cmd --category \"$category\""
fi
if [ -n "$question_type" ]; then
cmd="$cmd --question_type \"$question_type\""
fi
# Run the command
eval $cmd
elif [ "$model_type" = "open" ]; then
# For open-source model
echo "Please ensure you have modified the model path and model_settings path in open_model.py before proceeding."
# Set default parameters (modify these as needed)
model_name="your_model_name" # Required: specify your model name
model_setting="your_model_setting" # Required: specify your model setting
top_k=0 # Optional: integer between 0-9; 0 means no RAG
lib_name="" # Optional: specify the python library name
answer_difficulty="" # Optional: specify the difficulty level
category="" # Optional: specify the question category
question_type="" # Optional: specify the question type
# Build the command
cmd="python open_model.py --model_name \"$model_name\" --model_setting \"$model_setting\""
if [ -n "$top_k" ]; then
cmd="$cmd --top_k $top_k"
fi
if [ -n "$lib_name" ]; then
cmd="$cmd --lib_name \"$lib_name\""
fi
if [ -n "$answer_difficulty" ]; then
cmd="$cmd --answer_difficulty \"$answer_difficulty\""
fi
if [ -n "$category" ]; then
cmd="$cmd --category \"$category\""
fi
if [ -n "$question_type" ]; then
cmd="$cmd --question_type \"$question_type\""
fi
# Run the command
eval $cmd
else
echo "Invalid model_type. Please set model_type to 'closed' or 'open'."
exit 1
fi
# Navigate to evaluation/run directory
cd ../evaluation/run
# Run 1_run_test.py
python 1_run_test.py