forked from binz98/Large_Language_Model_Based_Actor_Critic
-
Notifications
You must be signed in to change notification settings - Fork 0
/
LLM.py
60 lines (54 loc) · 1.82 KB
/
LLM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import openai
import tiktoken
import time
enc = tiktoken.get_encoding("cl100k_base")
assert enc.decode(enc.encode("hello world")) == "hello world"
enc = tiktoken.encoding_for_model("gpt-4")
openai_api_key_name = ''
# openai_api_key_name = ''
# openai.api_base = ''
def GPT_response(messages, model_name):
token_num_count = 0
for item in messages:
token_num_count += len(enc.encode(item["content"]))
if model_name in ['gpt-4', 'gpt-4-32k', 'gpt-3.5-turbo-0301', 'gpt-4-0613', 'gpt-4-32k-0613', 'gpt-3.5-turbo-16k-0613']:
#print(f'-------------------Model name: {model_name}-------------------')
openai.api_key = openai_api_key_name
try:
result = openai.ChatCompletion.create(
model=model_name,
messages=messages,
temperature = 0.0,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
except:
try:
result = openai.ChatCompletion.create(
model=model_name,
messages=messages,
temperature=0.0,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
except:
try:
print(f'{model_name} Waiting 60 seconds for API query')
time.sleep(60)
result = openai.ChatCompletion.create(
model=model_name,
messages=messages,
temperature = 0.0,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
except:
return 'Out of tokens', token_num_count
token_num_count += len(enc.encode(result.choices[0]['message']['content']))
print(f'Token_num_count: {token_num_count}')
return result.choices[0]['message']['content'], token_num_count
else:
raise ValueError(f'Invalid model name: {model_name}')