|
1 | 1 | from abc import ABC, abstractmethod
|
2 | 2 | from typing import Tuple
|
3 | 3 |
|
4 |
| - |
5 | 4 | class AgentCompletionABC(ABC):
|
6 |
| - """Abstract base class for AI-driven prompt handling.""" |
| 5 | + """ |
| 6 | + Abstract base class for AI-driven prompt handling. Each method accepts |
| 7 | + specific input parameters (e.g. source/test content, logs, coverage data) |
| 8 | + and returns a tuple containing the AI response, along with additional |
| 9 | + metadata (e.g. token usage and the generated prompt). |
| 10 | + """ |
7 | 11 |
|
8 | 12 | @abstractmethod
|
9 | 13 | def generate_tests(
|
10 | 14 | self,
|
11 |
| - failed_tests: str, |
| 15 | + source_file_name: str, |
| 16 | + max_tests: int, |
| 17 | + source_file_numbered: str, |
| 18 | + code_coverage_report: str, |
12 | 19 | language: str,
|
13 |
| - test_framework: str, |
14 |
| - coverage_report: str, |
| 20 | + test_file: str, |
| 21 | + test_file_name: str, |
| 22 | + testing_framework: str, |
| 23 | + additional_instructions_text: str = None, |
| 24 | + additional_includes_section: str = None, |
| 25 | + failed_tests_section: str = None, |
15 | 26 | ) -> Tuple[str, int, int, str]:
|
16 | 27 | """
|
17 |
| - Generates additional unit tests to improve test coverage. |
| 28 | + Generates additional unit tests to improve coverage or handle edge cases. |
| 29 | +
|
| 30 | + Args: |
| 31 | + source_file_name (str): Name of the source file under test. |
| 32 | + max_tests (int): Maximum number of test functions to propose. |
| 33 | + source_file_numbered (str): The source code with line numbers. |
| 34 | + code_coverage_report (str): Coverage details highlighting untested lines. |
| 35 | + language (str): The programming language (e.g. "python", "java"). |
| 36 | + test_file (str): Contents of the existing test file. |
| 37 | + test_file_name (str): The name/path of the test file. |
| 38 | + testing_framework (str): The test framework in use (e.g. "pytest", "junit"). |
| 39 | + additional_instructions_text (str, optional): Extra instructions or context. |
| 40 | + additional_includes_section (str, optional): Additional code or includes. |
| 41 | + failed_tests_section (str, optional): Details of failed tests to consider. |
18 | 42 |
|
19 | 43 | Returns:
|
20 |
| - Tuple[str, int, int, str]: AI-generated test cases, input token count, output token count, and generated prompt. |
| 44 | + Tuple[str, int, int, str]: |
| 45 | + A 4-element tuple containing: |
| 46 | + - The AI-generated test suggestions (string), |
| 47 | + - The input token count (int), |
| 48 | + - The output token count (int), |
| 49 | + - The final constructed prompt (string). |
21 | 50 | """
|
22 | 51 | pass
|
23 | 52 |
|
24 | 53 | @abstractmethod
|
25 | 54 | def analyze_test_failure(
|
26 |
| - self, stderr: str, stdout: str, processed_test_file: str |
| 55 | + self, |
| 56 | + source_file_name: str, |
| 57 | + source_file: str, |
| 58 | + processed_test_file: str, |
| 59 | + stdout: str, |
| 60 | + stderr: str, |
| 61 | + test_file_name: str, |
27 | 62 | ) -> Tuple[str, int, int, str]:
|
28 | 63 | """
|
29 |
| - Analyzes a test failure and returns insights. |
| 64 | + Analyzes the output of a failed test to determine possible causes and |
| 65 | + recommended fixes. |
| 66 | +
|
| 67 | + Args: |
| 68 | + source_file_name (str): Name of the source file being tested. |
| 69 | + source_file (str): Raw content of the source file. |
| 70 | + processed_test_file (str): Content of the failing test file (pre-processed). |
| 71 | + stdout (str): Captured standard output from the test run. |
| 72 | + stderr (str): Captured standard error from the test run. |
| 73 | + test_file_name (str): Name/path of the failing test file. |
30 | 74 |
|
31 | 75 | Returns:
|
32 |
| - Tuple[str, int, int, str]: AI-generated analysis, input token count, output token count, and generated prompt. |
| 76 | + Tuple[str, int, int, str]: |
| 77 | + A 4-element tuple containing: |
| 78 | + - The AI-generated analysis or explanation (string), |
| 79 | + - The input token count (int), |
| 80 | + - The output token count (int), |
| 81 | + - The final constructed prompt (string). |
33 | 82 | """
|
34 | 83 | pass
|
35 | 84 |
|
36 | 85 | @abstractmethod
|
37 |
| - def analyze_test_insert_line(self, test_file: str) -> Tuple[str, int, int, str]: |
| 86 | + def analyze_test_insert_line( |
| 87 | + self, |
| 88 | + language: str, |
| 89 | + test_file_numbered: str, |
| 90 | + test_file_name: str, |
| 91 | + additional_instructions_text: str = None, |
| 92 | + ) -> Tuple[str, int, int, str]: |
38 | 93 | """
|
39 |
| - Determines where to insert new test cases. |
| 94 | + Determines the correct placement for inserting new test cases into |
| 95 | + an existing test file. |
| 96 | +
|
| 97 | + Args: |
| 98 | + language (str): The programming language of the test file. |
| 99 | + test_file_numbered (str): The test file content, labeled with line numbers. |
| 100 | + test_file_name (str): Name/path of the test file. |
| 101 | + additional_instructions_text (str, optional): Any extra instructions or context. |
40 | 102 |
|
41 | 103 | Returns:
|
42 |
| - Tuple[str, int, int, str]: Suggested insertion point, input token count, output token count, and generated prompt. |
| 104 | + Tuple[str, int, int, str]: |
| 105 | + A 4-element tuple containing: |
| 106 | + - The AI-generated suggestion or instructions (string), |
| 107 | + - The input token count (int), |
| 108 | + - The output token count (int), |
| 109 | + - The final constructed prompt (string). |
43 | 110 | """
|
44 | 111 | pass
|
45 | 112 |
|
46 | 113 | @abstractmethod
|
47 | 114 | def analyze_test_against_context(
|
48 |
| - self, test_code: str, context: str |
| 115 | + self, |
| 116 | + language: str, |
| 117 | + test_file_content: str, |
| 118 | + test_file_name_rel: str, |
| 119 | + context_files_names_rel: str, |
49 | 120 | ) -> Tuple[str, int, int, str]:
|
50 | 121 | """
|
51 |
| - Validates whether a test is appropriate for its corresponding source code. |
| 122 | + Evaluates a test file against a set of related context files to identify: |
| 123 | + 1. If it is a unit test, |
| 124 | + 2. Which context file the test is primarily targeting. |
| 125 | +
|
| 126 | + Args: |
| 127 | + language (str): The programming language of the test file. |
| 128 | + test_file_content (str): Raw content of the test file under review. |
| 129 | + test_file_name_rel (str): Relative path/name of the test file. |
| 130 | + context_files_names_rel (str): One or more file names related to the context. |
52 | 131 |
|
53 | 132 | Returns:
|
54 |
| - Tuple[str, int, int, str]: AI validation result, input token count, output token count, and generated prompt. |
| 133 | + Tuple[str, int, int, str]: |
| 134 | + A 4-element tuple containing: |
| 135 | + - The AI-generated classification or analysis (string), |
| 136 | + - The input token count (int), |
| 137 | + - The output token count (int), |
| 138 | + - The final constructed prompt (string). |
55 | 139 | """
|
56 | 140 | pass
|
57 | 141 |
|
58 | 142 | @abstractmethod
|
59 | 143 | def analyze_suite_test_headers_indentation(
|
60 |
| - self, test_file: str |
| 144 | + self, |
| 145 | + language: str, |
| 146 | + test_file_name: str, |
| 147 | + test_file: str, |
| 148 | + ) -> Tuple[str, int, int, str]: |
| 149 | + """ |
| 150 | + Analyzes an existing test suite to determine its indentation style, |
| 151 | + the number of existing tests, and potentially the testing framework. |
| 152 | +
|
| 153 | + Args: |
| 154 | + language (str): The programming language of the test file. |
| 155 | + test_file_name (str): Name/path of the test file. |
| 156 | + test_file (str): Raw content of the test file. |
| 157 | +
|
| 158 | + Returns: |
| 159 | + Tuple[str, int, int, str]: |
| 160 | + A 4-element tuple containing: |
| 161 | + - The AI-generated suite analysis (string), |
| 162 | + - The input token count (int), |
| 163 | + - The output token count (int), |
| 164 | + - The final constructed prompt (string). |
| 165 | + """ |
| 166 | + pass |
| 167 | + |
| 168 | + @abstractmethod |
| 169 | + def adapt_test_command_for_a_single_test_via_ai( |
| 170 | + self, |
| 171 | + test_file_relative_path: str, |
| 172 | + test_command: str, |
| 173 | + project_root_dir: str, |
61 | 174 | ) -> Tuple[str, int, int, str]:
|
62 | 175 | """
|
63 |
| - Determines the indentation style used in test suite headers. |
| 176 | + Adapts an existing test command line to run only a single test file, |
| 177 | + preserving other relevant flags and arguments where possible. |
| 178 | +
|
| 179 | + Args: |
| 180 | + test_file_relative_path (str): Path to the specific test file to be isolated. |
| 181 | + test_command (str): The original command line used for running multiple tests. |
| 182 | + project_root_dir (str): Root directory of the project. |
64 | 183 |
|
65 | 184 | Returns:
|
66 |
| - Tuple[str, int, int, str]: Suggested indentation style, input token count, output token count, and generated prompt. |
| 185 | + Tuple[str, int, int, str]: |
| 186 | + A 4-element tuple containing: |
| 187 | + - The AI-generated modified command line (string), |
| 188 | + - The input token count (int), |
| 189 | + - The output token count (int), |
| 190 | + - The final constructed prompt (string). |
67 | 191 | """
|
68 | 192 | pass
|
0 commit comments