-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathllm_utils.py
45 lines (38 loc) · 1.43 KB
/
llm_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from openai import OpenAI
from modules.config import load_config
config = load_config()
client = OpenAI(
api_key=config["translation"]["api_key"],
base_url=config["translation"]["api_base_url"],
)
def get_completion(
prompt: str,
system_message: str = "You are a helpful assistant.",
model: str = "gpt-4o-mini",
temperature: float = 0.3,
) -> str:
"""
Generate a completion using the OpenAI API.
Args:
prompt (str): The user's prompt or query.
system_message (str, optional): The system message to set the context for the assistant.
Defaults to "You are a helpful assistant.".
model (str, optional): The name of the OpenAI model to use for generating the completion.
Defaults to "gpt-4-turbo".
temperature (float, optional): The sampling temperature for controlling the randomness of the generated text.
Defaults to 0.3.
json_mode (bool, optional): Whether to return the response in JSON format.
Defaults to False.
Returns:
Union[str]: The generated completion. returns the generated text as a string.
"""
response = client.chat.completions.create(
model=model,
temperature=temperature,
top_p=1,
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
],
)
return response.choices[0].message.content