-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathELYZA-tasks100-ansuwer_calm3.py
61 lines (58 loc) · 2.24 KB
/
ELYZA-tasks100-ansuwer_calm3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
from llama_cpp import Llama #llama.cppのPythonライブラリであるllama-cpp-python
model="hoge_huga.gguf" #対象のモデルのパスを入力。
llm = Llama(
model_path=model,
n_gpu_layers=-1, # #GPUにロードするレイヤー数(llama-cpp-pythonがcuda版の場合)
n_ctx=2048, # 最大コンテキストサイズ。入力の上限。
#last_n_tokens_size =0, # Maximum number of tokens to keep in the last_n_tokens deque.
)
role = "<|im_start|>system\n\
あなたは誠実で優秀なアシスタントです。<|im_end|>"
role += "\n"
import csv
csv_path="test.csv"
answer_csv_path="answer.csv"
with open(answer_csv_path, mode='w',newline="") as f:
writer = csv.writer(f)
writer.writerow(['answer'])
# CSVファイルを開く
with open(csv_path, mode='r', encoding='utf-8',newline="") as file:
reader = csv.reader(file)
#print(reader)
# 各行を読み込む
for row in reader:
if row[0]!="input":
#print(row[0])
prompt=row[0]
prompt_C3 = (role+"<|im_start|>user"+prompt+"<|im_end|>\n<|im_start|>assistant")
output = llm(
prompt=prompt_C3,
max_tokens=1024,
temperature = 0.8,
top_p=0.95,
min_p=0.05,
typical_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
repeat_penalty=1.1,
top_k=40,
seed=-1,
tfs_z=1.0,
mirostat_mode=0,
mirostat_tau=5.0,
mirostat_eta=0.1,
stop=["<|im_"] # ストップ。特定の文字を生成したらその文字を生成せず停止する。
)
output= output["choices"][0]["text"]
output =output.replace("\\n", "\n").replace("\\u3000", "\u3000").replace("!","!").replace("?","?")
while output[-1]=="\n":
output=output[:-1]
while output[0]=="\n":
output=output[1:]
print( prompt_C3+output+"<|im_end|>")
with open(answer_csv_path, mode='a', encoding='utf-8',newline="") as f:
writer = csv.writer(f)
writer.writerow([output])
with open(answer_csv_path, mode='a', encoding='utf-8',newline='') as f:
writer = csv.writer(f)
writer.writerow([model])