|
17 | 17 | import json
|
18 | 18 |
|
19 | 19 |
|
20 |
| -@pytest.fixture(scope="module") |
21 |
| -def api_key(): |
22 |
| - load_dotenv(dotenv_path="./.env") # Adjust the path as needed |
23 |
| - return os.getenv("OPENAI_API_KEY") |
24 |
| - |
25 |
| - |
26 |
| -@pytest.mark.API_test |
27 |
| -def test_json_output(api_key): |
28 |
| - openai.api_key = api_key |
29 |
| - prompts = [ |
30 |
| - "You are a brilliant math professor. Solve the following problem and put your answer after four hashtags like the following example: \nQuestion: What is 4 + 4?\nAnswer: 4 + 4 is ####8\n\n Make your response as short as possible.", |
31 |
| - "You are a foolish high-school student. Solve the following problem and put your answer after four hashtags like the following example: \nQuestion: What is 4 + 4?\nAnswer: 4 + 4 is ####8\n\n Make your response as short as possible.", |
32 |
| - ] |
33 |
| - |
34 |
| - model = "gpt-3.5-turbo-1106" |
35 |
| - |
36 |
| - response = query_model( |
37 |
| - prompts[0], |
38 |
| - "What is 4 + 4?", |
39 |
| - model_name=model, |
40 |
| - output_tokens=150, |
41 |
| - ) |
42 |
| - |
43 |
| - response_dict = response_to_dict(response) |
44 |
| - |
45 |
| - # Check the main keys |
46 |
| - assert "id" in response_dict |
47 |
| - assert "model" in response_dict |
48 |
| - assert "object" in response_dict |
49 |
| - assert "created" in response_dict |
50 |
| - assert "system_fingerprint" in response_dict |
51 |
| - assert "choices" in response_dict |
52 |
| - assert "usage" in response_dict |
53 |
| - |
54 |
| - # Check the types of the main keys |
55 |
| - assert isinstance(response_dict["id"], str) |
56 |
| - assert isinstance(response_dict["model"], str) |
57 |
| - assert isinstance(response_dict["object"], str) |
58 |
| - assert isinstance(response_dict["created"], int) |
59 |
| - assert isinstance(response_dict["system_fingerprint"], str) |
60 |
| - assert isinstance(response_dict["choices"], list) |
61 |
| - assert isinstance(response_dict["usage"], dict) |
62 |
| - |
63 |
| - # Check the structure and types of the 'choices' key |
64 |
| - assert len(response_dict["choices"]) > 0 |
65 |
| - for choice in response_dict["choices"]: |
66 |
| - assert "finish_reason" in choice |
67 |
| - assert "index" in choice |
68 |
| - assert "message" in choice |
69 |
| - assert isinstance(choice["finish_reason"], str) |
70 |
| - assert isinstance(choice["index"], int) |
71 |
| - assert isinstance(choice["message"], dict) |
72 |
| - assert "content" in choice["message"] |
73 |
| - assert "role" in choice["message"] |
74 |
| - assert isinstance(choice["message"]["content"], str) |
75 |
| - assert isinstance(choice["message"]["role"], str) |
76 |
| - |
77 |
| - # Check the structure and types of the 'usage' key |
78 |
| - assert "completion_tokens" in response_dict["usage"] |
79 |
| - assert "prompt_tokens" in response_dict["usage"] |
80 |
| - assert "total_tokens" in response_dict["usage"] |
81 |
| - assert isinstance(response_dict["usage"]["completion_tokens"], int) |
82 |
| - assert isinstance(response_dict["usage"]["prompt_tokens"], int) |
83 |
| - assert isinstance(response_dict["usage"]["total_tokens"], int) |
84 |
| - |
85 |
| - |
86 |
| -@pytest.mark.API_test |
87 |
| -def test_query_model(api_key): |
88 |
| - openai.api_key = api_key |
89 |
| - prompt = "You are a brilliant math professor. Solve the following problem and put your answer after four hashtags like the following example: \nQuestion: What is 4 + 4?\nAnswer: 4 + 4 is ####8\n\n Make your response as short as possible." |
90 |
| - question = "What is 4 + 4?" |
91 |
| - model_name = "gpt-3.5-turbo-1106" |
92 |
| - output_tokens = 150 |
93 |
| - response = query_model(prompt, question, model_name, output_tokens) |
94 |
| - assert isinstance(response.choices[0].message.content, str) |
95 |
| - assert len(response.choices[0].message.content) > 0 |
96 |
| - assert "8" in response.choices[0].message.content |
97 |
| - |
98 |
| - prompt = 'You are a brilliant math professor. Solve the following problem and return a JSON with the first entry being the reasoning behind the choice labeled as "reasoning", and the second entry being the answer to the question containing only the letter "A", "B", "C" or "D", labeled as "answer". Try to keep your reasoning concise.' |
99 |
| - question = "What is 4 + 4? A. 8 B. 9 C. 10 D. 11" |
100 |
| - model_name = "gpt-3.5-turbo-1106" |
101 |
| - output_tokens = 150 |
102 |
| - json_mode = True |
103 |
| - response = query_model( |
104 |
| - prompt, question, model_name, output_tokens, return_json=json_mode |
105 |
| - ) |
106 |
| - json_response = json.loads(response.choices[0].message.content) |
107 |
| - assert isinstance(json_response, dict) |
108 |
| - assert json_response["answer"] == "A" |
| 20 | +# @pytest.fixture(scope="module") |
| 21 | +# def api_key(): |
| 22 | +# load_dotenv(dotenv_path="./.env") # Adjust the path as needed |
| 23 | +# return os.getenv("OPENAI_API_KEY") |
| 24 | + |
| 25 | + |
| 26 | +# @pytest.mark.API_test |
| 27 | +# def test_json_output(api_key): |
| 28 | +# openai.api_key = api_key |
| 29 | +# prompts = [ |
| 30 | +# "You are a brilliant math professor. Solve the following problem and put your answer after four hashtags like the following example: \nQuestion: What is 4 + 4?\nAnswer: 4 + 4 is ####8\n\n Make your response as short as possible.", |
| 31 | +# "You are a foolish high-school student. Solve the following problem and put your answer after four hashtags like the following example: \nQuestion: What is 4 + 4?\nAnswer: 4 + 4 is ####8\n\n Make your response as short as possible.", |
| 32 | +# ] |
| 33 | + |
| 34 | +# model = "gpt-3.5-turbo-1106" |
| 35 | + |
| 36 | +# print("Testing the JSON output of the query_model function") |
| 37 | + |
| 38 | +# response = query_model( |
| 39 | +# prompts[0], |
| 40 | +# "What is 4 + 4?", |
| 41 | +# model_name=model, |
| 42 | +# output_tokens=150, |
| 43 | +# ) |
| 44 | + |
| 45 | +# print(response.choices[0].message.content) |
| 46 | + |
| 47 | +# response_dict = response_to_dict(response) |
| 48 | + |
| 49 | +# # Check the main keys |
| 50 | +# assert "id" in response_dict |
| 51 | +# assert "model" in response_dict |
| 52 | +# assert "object" in response_dict |
| 53 | +# assert "created" in response_dict |
| 54 | +# assert "system_fingerprint" in response_dict |
| 55 | +# assert "choices" in response_dict |
| 56 | +# assert "usage" in response_dict |
| 57 | + |
| 58 | +# # Check the types of the main keys |
| 59 | +# assert isinstance(response_dict["id"], str) |
| 60 | +# assert isinstance(response_dict["model"], str) |
| 61 | +# assert isinstance(response_dict["object"], str) |
| 62 | +# assert isinstance(response_dict["created"], int) |
| 63 | +# assert isinstance(response_dict["system_fingerprint"], str) |
| 64 | +# assert isinstance(response_dict["choices"], list) |
| 65 | +# assert isinstance(response_dict["usage"], dict) |
| 66 | + |
| 67 | +# # Check the structure and types of the 'choices' key |
| 68 | +# assert len(response_dict["choices"]) > 0 |
| 69 | +# for choice in response_dict["choices"]: |
| 70 | +# assert "finish_reason" in choice |
| 71 | +# assert "index" in choice |
| 72 | +# assert "message" in choice |
| 73 | +# assert isinstance(choice["finish_reason"], str) |
| 74 | +# assert isinstance(choice["index"], int) |
| 75 | +# assert isinstance(choice["message"], dict) |
| 76 | +# assert "content" in choice["message"] |
| 77 | +# assert "role" in choice["message"] |
| 78 | +# assert isinstance(choice["message"]["content"], str) |
| 79 | +# assert isinstance(choice["message"]["role"], str) |
| 80 | + |
| 81 | +# # Check the structure and types of the 'usage' key |
| 82 | +# assert "completion_tokens" in response_dict["usage"] |
| 83 | +# assert "prompt_tokens" in response_dict["usage"] |
| 84 | +# assert "total_tokens" in response_dict["usage"] |
| 85 | +# assert isinstance(response_dict["usage"]["completion_tokens"], int) |
| 86 | +# assert isinstance(response_dict["usage"]["prompt_tokens"], int) |
| 87 | +# assert isinstance(response_dict["usage"]["total_tokens"], int) |
| 88 | + |
| 89 | + |
| 90 | +# @pytest.mark.API_test |
| 91 | +# def test_query_model(api_key): |
| 92 | +# openai.api_key = api_key |
| 93 | +# prompt = "You are a brilliant math professor. Solve the following problem and put your answer after four hashtags like the following example: \nQuestion: What is 4 + 4?\nAnswer: 4 + 4 is ####8\n\n Make your response as short as possible." |
| 94 | +# question = "What is 4 + 4?" |
| 95 | +# model_name = "gpt-3.5-turbo-1106" |
| 96 | +# output_tokens = 150 |
| 97 | +# response = query_model(prompt, question, model_name, output_tokens) |
| 98 | +# assert isinstance(response.choices[0].message.content, str) |
| 99 | +# assert len(response.choices[0].message.content) > 0 |
| 100 | +# assert "8" in response.choices[0].message.content |
| 101 | + |
| 102 | +# prompt = 'You are a brilliant math professor. Solve the following problem and return a JSON with the first entry being the reasoning behind the choice labeled as "reasoning", and the second entry being the answer to the question containing only the letter "A", "B", "C" or "D", labeled as "answer". Try to keep your reasoning concise.' |
| 103 | +# question = "What is 4 + 4? A. 8 B. 9 C. 10 D. 11" |
| 104 | +# model_name = "gpt-3.5-turbo-1106" |
| 105 | +# output_tokens = 150 |
| 106 | +# json_mode = True |
| 107 | +# response = query_model( |
| 108 | +# prompt, question, model_name, output_tokens, return_json=json_mode |
| 109 | +# ) |
| 110 | +# json_response = json.loads(response.choices[0].message.content) |
| 111 | +# assert isinstance(json_response, dict) |
| 112 | +# assert json_response["answer"] == "A" |
109 | 113 |
|
110 | 114 |
|
111 | 115 | def test_with_commas_and_dollar_sign():
|
|
0 commit comments