Skip to content

Commit 72bce89

Browse files
authored
Update llm_api_calls.py
1 parent 7b6e43f commit 72bce89

File tree

1 file changed

+68
-32
lines changed

1 file changed

+68
-32
lines changed

llm_api_calls.py

Lines changed: 68 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,37 @@
2424
from selenium.webdriver.chrome.service import Service as ChromeService
2525
from webdriver_manager.chrome import ChromeDriverManager
2626
import chromadb
27+
def test_python_code_on_windows_subprocess(script_raw):
28+
"""
29+
Test Python code on Windows using the subprocess module.
30+
31+
This function takes a Python script as input, writes it to a temporary file, and runs it using the subprocess module.
32+
The output and error messages from the script execution are captured and returned.
33+
34+
Parameters:
35+
- script_raw (str): The Python script to test.
36+
37+
Returns:
38+
- JSON object containing the executed script, its output, and any errors encountered.
39+
"""
40+
try:
41+
# Write the script to a temporary file
42+
with open("temp_script.py", "w") as file:
43+
file.write(script_raw)
44+
45+
# Run the script using the subprocess module
46+
result = subprocess.run(["python", "temp_script.py"], shell=True, capture_output=True, text=True)
47+
output = result.stdout
48+
error = result.stderr
49+
if error:
50+
output = json.dumps({"message": f"Script executed with errors: {error}"})
51+
else:
52+
output = json.dumps({"message": f"Script executed successfully: {output}"})
53+
54+
return json.dumps({"script": script_raw, "output": output, "error": error})
55+
except Exception as e:
56+
error_message = json.dumps({"message": f"Error executing script: {str(e)}"})
57+
return json.dumps({"script": script_raw, "error": error_message})
2758

2859
documents = [
2960
"",
@@ -466,7 +497,24 @@ def chat(self, system_prompt,prompt):
466497
messages=messages,
467498
) # get a new response from the model where it can see the function response
468499
second_response = second_response.choices[0].message.content
469-
return second_response
500+
third_response = self.client.chat.completions.create(
501+
model=self.model,
502+
messages=[
503+
{
504+
"role": "system",
505+
"content": system_prompt,
506+
},
507+
{
508+
"role": "assistant",
509+
"content": f"I need to fact check the following information:[my_last_response] {second_response}[/my_last_response] with another function call, please wait a moment.",
510+
},
511+
{
512+
"role": "user",
513+
"content": "Sure, take your time.",
514+
}
515+
]
516+
)
517+
return third_response.choices[0].message.content
470518

471519
elif self.client == ollama:
472520
model = OllamaFunctions(model=self.model, format="json")
@@ -567,7 +615,25 @@ def chat(self, system_prompt,prompt):
567615
model=self.model,
568616
messages=messages
569617
)
570-
return second_response.choices[0].message.content
618+
second_response = second_response.choices[0].message.content
619+
third_response = self.client.chat.completions.create(
620+
model=self.model,
621+
messages=[
622+
{
623+
"role": "system",
624+
"content": system_prompt,
625+
},
626+
{
627+
"role": "assistant",
628+
"content": f"I need to fact check the following information:[my_last_response] {second_response}[/my_last_response] with another function call, please wait a moment.",
629+
},
630+
{
631+
"role": "user",
632+
"content": "Sure, take your time.",
633+
}
634+
]
635+
)
636+
return third_response.choices[0].message.content
571637
else:
572638
return response_message.content
573639

@@ -585,33 +651,3 @@ def chat(self, system_prompt,prompt):
585651
print(f"Error in chat: {e}")
586652
return None
587653

588-
# # Example usage: A chatbot loop
589-
# llm_api = LLM_API_Calls()
590-
591-
# def chat_loop():
592-
# print("Welcome to the chatbot. Type 'exit' to end the chat.")
593-
# system_prompt = f"You are a multi-use function calling LLM. current time is {time}"
594-
595-
# while True:
596-
# try:
597-
# prompt = input("You: ")
598-
# if prompt.lower() == 'exit':
599-
# print("Goodbye!")
600-
# break
601-
602-
# response = llm_api.chat(system_prompt=system_prompt, prompt=prompt)
603-
604-
# if response:
605-
# print("assistant:", response)
606-
# prompt = response # Using the response as the next prompt, if applicable
607-
# else:
608-
# print("Failed to generate a response.")
609-
610-
# except KeyboardInterrupt:
611-
# print("Goodbye!")
612-
# break
613-
# except Exception as e:
614-
# print(f"An error occurred: {e}")
615-
# break
616-
# # Start the chatbot loop
617-
# chat_loop()

0 commit comments

Comments
 (0)