Skip to content

Commit e475565

Browse files
authored
Merge branch 'main' into traceback_to_llm
2 parents 874ec81 + 4c907f0 commit e475565

File tree

6 files changed

+134
-148
lines changed

6 files changed

+134
-148
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,7 @@ htmlcov/
207207
.cache
208208
nosetests.xml
209209
coverage.xml
210+
coverage.json
210211
*,cover
211212
.hypothesis/
212213
*.mo

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,10 @@ of [LiteLLM](https://docs.litellm.ai/docs/providers):
3939
## 🔧 Requirements
4040

4141
- Python 3.12
42-
- Chasten leverages numerous Python packages, including notable ones such as:
42+
- Execexam leverages numerous Python packages, including notable ones such as:
4343
- [Rich](https://github.com/Textualize/rich): Full-featured formatting and display of text in the terminal
4444
- [Typer](https://github.com/tiangolo/typer): Easy-to-implement and fun-to-use command-line interfaces
45-
- The developers of Chasten use [Poetry](https://github.com/python-poetry/poetry) for packaging and dependency management
45+
- The developers of Execexam use [Poetry](https://github.com/python-poetry/poetry) for packaging and dependency management
4646

4747
## 🔽 Installation
4848

execexam/display.py

Lines changed: 49 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -35,51 +35,55 @@ def display_tldr(console: Console) -> None:
3535
console.print(
3636
"[bold yellow]Too Lazy; Didn't Read: Example Commands[/bold yellow]\n"
3737
)
38-
console.print(
39-
"[bold red]Please ensure you are in the directory with the pyproject.toml file to run these commands.[/bold red]\n"
40-
)
41-
42-
console.print(
43-
"[bold cyan]poetry run execexam <path-to-project> <path-to-tests>[/bold cyan]"
44-
)
45-
console.print(
46-
" Run executable exam for a project with the specified test files."
47-
)
48-
49-
console.print(
50-
"[bold cyan]poetry run execexam <path-to-project> <path-to-tests> --mark <mark>[/bold cyan]"
51-
)
52-
console.print(" Run the tests with the specified mark(s).")
53-
54-
console.print(
55-
"[bold cyan]poetry run execexam <path-to-project> <path-to-tests> --maxfail[/bold cyan]"
56-
)
57-
console.print(" Limit the number of test failures before stopping.")
58-
59-
console.print(
60-
"[bold cyan]poetry run execexam <path-to-project> <path-to-tests> --report <report_type>/<all>[/bold cyan]"
61-
)
62-
console.print(
63-
" Generate the specified type(s) of reports after the exam. Use 'all' to generate all available report types."
64-
)
65-
66-
console.print(
67-
"[bold cyan]poetry run execexam <path-to-project> <path-to-tests> --advice-model <model> --advice-method <method>[/bold cyan]"
68-
)
69-
console.print(
70-
" Use specified LLM model and method for providing advice on test failures."
71-
)
72-
73-
console.print(
74-
"[bold cyan]poetry run execexam <path-to-project> <path-to-tests> <--debug>/<--no-debug>[/bold cyan]"
75-
)
76-
console.print(" Display or disable debugging information.")
77-
78-
console.print(
79-
"[bold cyan]poetry run execexam <path-to-project> <path-to-tests> <--fancy>/<--no-fancy>[/bold cyan]"
80-
)
81-
console.print(" Display or disable fancy output formatting.")
82-
38+
commands = {
39+
"mark": {
40+
"command": "execexam <path-to-project> <path-to-tests> --mark mark_type",
41+
"description": "Run tests with specific markers.",
42+
},
43+
"maxfail": {
44+
"command": "execexam <path-to-project> <path-to-tests> --maxfail number",
45+
"description": "Set maximum number of test failures before stopping test execution (default: 10)",
46+
},
47+
"report": {
48+
"command": "execexam <path-to-project> <path-to-tests> --report report_type/all",
49+
"description": "Generate the specified type(s) of reports after the exam. Use 'all' to generate all available report types.",
50+
},
51+
"advice-method": {
52+
"command": "execexam --advice-method <method> --advice-model <model> --advice-server <server>",
53+
"description": "Specify the LLM model and advice method to use Coding Mentor. Consult documentation for available models and methods.",
54+
},
55+
"debug": {
56+
"command": "execexam <path-to-project> <path-to-tests> --debug/--no-debug",
57+
"description": "Enable or disable debug mode to collect additional debugging information during execution.",
58+
},
59+
"fancy": {
60+
"command": "execexam <path-to-project> <path-to-tests> --fancy/--no-fancy",
61+
"description": "Toggle fancy output formatting. Disable for simpler output in plain-text environments.",
62+
},
63+
"verbose": {
64+
"command": "execexam <path-to-project> <path-to-tests> --verbose/--no-verbose",
65+
"description": "Enable or disable verbose output to see more detailed logs of the program's execution.",
66+
},
67+
"syntax-theme": {
68+
"command": "execexam <path-to-project> <path-to-tests> --syntax-theme theme_name",
69+
"description": "Choose syntax highlighting theme for code output (options: ansi_dark, ansi_light)",
70+
},
71+
}
72+
# display the TLDR information for each of the commands, ensuring
73+
# that the final display of the TLDR summary does not display a newline
74+
command_items = list(commands.items())
75+
for i, (command_name, command_info) in enumerate(command_items):
76+
console.print(f"[bold green]{command_name}[/bold green]")
77+
console.print(
78+
f"[bold white]Command:[/bold white] [bold cyan]{command_info['command']}[/bold cyan]"
79+
)
80+
console.print(
81+
f"[bold white]Description:[/bold white] {command_info['description']}"
82+
)
83+
if i < len(command_items) - 1:
84+
console.print()
85+
# display a helpful message to the user about how they can
86+
# use the --help option to see more options
8387
console.print(
8488
"\n[bold yellow]help:[/bold yellow] Use [bold yellow]--help[/bold yellow] to see more options."
8589
)

execexam/main.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def run( # noqa: PLR0913, PLR0915
6262
callback=tldr_callback,
6363
help="Display summary of commands",
6464
),
65-
] = None,
65+
] = False,
6666
report: Optional[List[enumerations.ReportType]] = typer.Option(
6767
None,
6868
help="Types of reports to generate",
@@ -103,8 +103,9 @@ def run( # noqa: PLR0913, PLR0915
103103
litellm_thread = threading.Thread(target=advise.load_litellm)
104104
# if --tldr was specified, then display the TLDR summary
105105
# of the commands and then exit the program
106-
if tldr is not None:
107-
return
106+
if tldr:
107+
display.display_tldr(console)
108+
raise typer.Exit()
108109
# if execexam was configured to produce the report for advice
109110
# or if it was configured to produce all of the possible reports,
110111
# then start the litellm thread that provides the advice
@@ -122,7 +123,7 @@ def run( # noqa: PLR0913, PLR0915
122123
# a custom pytest plugin for the executable examination
123124
json_report_plugin = JSONReport()
124125
# display basic diagnostic information about command-line's arguments;
125-
# extract the local parmeters and then make a displayable string of them
126+
# extract the local parameters and then make a displayable string of them
126127
args = locals()
127128
colon_separated_diagnostics = display.make_colon_separated_string(args)
128129
# --> SETUP

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
name = "execexam"
33
version = "0.3.3"
44
description = "ExecExam runs executable examinations, providing feedback and assistance!"
5-
authors = ["Pallas-Athena Cain <cain01@allegheny.edu>", "Gregory M. Kapfhammer <gkapfham@allegheny.edu>"]
5+
authors = ["Pallas-Athena Cain <cain01@allegheny.edu>", "Hemani Alaparthi <alaparthi01@allegheny.edu>","Gregory M. Kapfhammer <gkapfham@allegheny.edu>"]
66
readme = "README.md"
77

88
[tool.poetry.scripts]

tests/test_main.py

Lines changed: 76 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -1,101 +1,81 @@
1-
"""Test cases for the main.py file."""
1+
"""Test cases for the command-line interface provided by main."""
22

3-
import os
4-
import subprocess
5-
import sys
6-
import tempfile
7-
import venv
8-
from pathlib import Path
9-
10-
import pytest
113
from typer.testing import CliRunner
124

5+
from execexam import main
6+
7+
# NOTE: Unless there is a clear reason to do so, only
8+
# write tests for the command-line interface using the
9+
# CliRunner provided by typer.
10+
1311
runner = CliRunner()
1412

15-
EXPECTED_EXIT_CODE_FILE_NOT_FOUND = 4
16-
17-
18-
@pytest.fixture
19-
def poetry_env():
20-
"""Create a temporary virtual environment with poetry installed."""
21-
with tempfile.TemporaryDirectory() as temp_dir:
22-
venv_path = Path(temp_dir) / "venv"
23-
# Create a virtual environment
24-
venv.create(venv_path, with_pip=True)
25-
# Get the path to the Python executable in the virtual environment
26-
if sys.platform == "win32":
27-
python_path = venv_path / "Scripts" / "python.exe"
28-
pip_path = venv_path / "Scripts" / "pip.exe"
29-
else:
30-
python_path = venv_path / "bin" / "python"
31-
pip_path = venv_path / "bin" / "pip"
32-
# Install poetry in the virtual environment
33-
subprocess.run(
34-
[str(pip_path), "install", "poetry"],
35-
check=True,
36-
capture_output=True,
37-
text=True,
38-
)
39-
yield str(python_path)
40-
41-
42-
@pytest.fixture
43-
def cwd():
44-
"""Define a test fixture for the current working directory."""
45-
return os.getcwd()
46-
47-
48-
@pytest.mark.no_print
49-
def test_run_with_missing_test(cwd, poetry_env, capfd):
50-
"""Test the run command with default options."""
51-
with tempfile.TemporaryDirectory() as temp_dir:
52-
test_one = Path(temp_dir) / "test_one"
53-
test_one.mkdir()
54-
test_path = Path(".") / "tests" / "test_question_one.py"
55-
test_path_str = str(test_path)
56-
env = os.environ.copy()
57-
if sys.platform == "win32":
58-
env["PYTHONIOENCODING"] = "utf-8"
59-
env["PYTHONUTF8"] = "1"
60-
try:
61-
# Disable output capture temporarily
62-
with capfd.disabled():
63-
result = subprocess.run(
64-
[
65-
poetry_env,
66-
"-m",
67-
"poetry",
68-
"run",
69-
"execexam",
70-
".",
71-
test_path_str,
72-
"--report",
73-
"trace",
74-
"--report",
75-
"status",
76-
"--report",
77-
"failure",
78-
"--report",
79-
"code",
80-
"--report",
81-
"setup",
82-
],
83-
capture_output=True,
84-
text=True,
85-
encoding="utf-8",
86-
errors="replace",
87-
check=False,
88-
env=env,
89-
cwd=cwd,
90-
)
91-
assert (
92-
result.returncode in [EXPECTED_EXIT_CODE_FILE_NOT_FOUND]
93-
), f"Expected return code {EXPECTED_EXIT_CODE_FILE_NOT_FOUND}, got {result.returncode}"
94-
assert (
95-
"file or directory not found" in result.stdout.lower()
96-
or "no such file or directory" in result.stderr.lower()
97-
), "Expected error message about missing file not found in output"
98-
except UnicodeDecodeError as e:
99-
pytest.fail(f"Unicode decode error: {e!s}")
100-
except Exception as e:
101-
pytest.fail(f"Unexpected error: {e!s}")
13+
# NOTE: tests that run execexam through the its CLI
14+
# using the CliRunner can run into dependency issues
15+
# due to the fact that the pytest plugin that
16+
# execexam uses is going to be repeatedly loaded
17+
# and (potentially) not unloaded
18+
19+
# Tests that provide valid arguments {{{
20+
21+
22+
def test_run_use_help():
23+
"""Test the run command with the --help."""
24+
result = runner.invoke(main.cli, ["run", "--help"])
25+
assert result.exit_code == 0
26+
assert "Arguments" in result.output
27+
assert "Options" in result.output
28+
29+
30+
def test_run_use_tldr():
31+
"""Test the run command with the --tldr."""
32+
result = runner.invoke(main.cli, ["run", "--tldr"])
33+
assert result.exit_code == 0
34+
assert "Too" in result.output
35+
assert "Lazy" in result.output
36+
assert "--help" in result.output
37+
38+
39+
def test_run_use_tldr_and_help_defaults_to_help():
40+
"""Test the run command with the --tldr and --help."""
41+
result = runner.invoke(main.cli, ["run", "--tldr", "--help"])
42+
assert result.exit_code == 0
43+
assert "Arguments" in result.output
44+
assert "Options" in result.output
45+
result = runner.invoke(main.cli, ["run", "--help", "--tldr"])
46+
assert result.exit_code == 0
47+
assert "Arguments" in result.output
48+
assert "Options" in result.output
49+
50+
51+
# }}}
52+
53+
54+
# Tests that provide invalid arguments {{{
55+
56+
57+
def test_run_valid_argument_no_action():
58+
"""Test the run command with valid required arguments."""
59+
result = runner.invoke(main.cli, ["run", ". tests/"])
60+
assert result.exit_code != 0
61+
62+
63+
def test_run_invalid_report_argument():
64+
"""Test the run command with invalid report argument."""
65+
result = runner.invoke(main.cli, ["run", ". tests/", "--report invalid"])
66+
assert result.exit_code != 0
67+
68+
69+
def test_invalid_tldr_spelling():
70+
"""Test the run command with invalid tldr command-line argument spelling."""
71+
result = runner.invoke(main.cli, ["run", ". tests/", "--tldear"])
72+
assert result.exit_code != 0
73+
74+
75+
def test_invalid_help_spelling():
76+
"""Test the run command with invalid help command-line argument spelling."""
77+
result = runner.invoke(main.cli, ["run", ". tests/", "--hlp"])
78+
assert result.exit_code != 0
79+
80+
81+
# }}}

0 commit comments

Comments
 (0)