Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,6 @@ devenv.local.nix

# pre-commit
.pre-commit-config.yaml

# test
unit-text.json
28 changes: 28 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,34 @@ unit-text ideate
unit-text test <path-to-the-draft.md>
```

### API

The package also provides a FastAPI server for programmatic access:

```bash
# Start the development server
uv run fastapi dev ./src/unit_text/api

# Start the production server
uv run fastapi run ./src/unit_text/api

# or for convenience
uv run unit-text-api
```

The server exposes a `/test` endpoint that accepts POST requests with two files:

- `file`: Your draft text file
- `config`: Your idea configuration JSON file

Example using curl:

```bash
curl -X POST http://localhost:8000/test \
-F "file=@draft.md" \
-F "config=@unit-text.json"
```

## 📝 Process

The ideation phase is where you define your blog idea. It looks something like this:
Expand Down
47 changes: 27 additions & 20 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,17 @@ description = "Unit tests for plain text"
readme = "README.md"
license = "AGPL-3.0-or-later"
license-files = ["LICENSE"]
keywords = ["cli", "ollama", "llm", "language", "unit", "test", "blog", "editor", "ai"]
keywords = [
"cli",
"ollama",
"llm",
"language",
"unit",
"test",
"blog",
"editor",
"ai",
]
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
Expand All @@ -29,30 +39,33 @@ authors = [
]
requires-python = ">=3.11"
dependencies = [
"dicttoxml>=1.7.16",
"ollama>=0.4.7",
"pydantic>=2.10.6",
"dicttoxml>=1.7.16",

# CLI
"typer>=0.15.1",
"rich",

# TODO should be optional?
# API
"fastapi[standard]>=0.115.11",
]

# https://packaging.python.org/en/latest/specifications/well-known-project-urls/#well-known-labels
[project.urls]
source = "https://github.com/sealambda/unit-text"
issues = "https://github.com/sealambda/unit-text/issues"

[project.scripts]
unit-text = "unit_text:app"
unit-text-api = "unit_text.api:run"

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[dependency-groups]
dev = [
"commitizen>=4.1.1",
"pre-commit>=4.1.0",
"ruff>=0.9.4",
]
dev = ["commitizen>=4.1.1", "pre-commit>=4.1.0", "ruff>=0.9.4"]

[tool.commitizen]
name = "cz_conventional_commits"
Expand All @@ -65,16 +78,10 @@ major_version_zero = true

[tool.ruff.lint]
select = [
# pycodestyle
"E",
# Pyflakes
"F",
# pyupgrade
"UP",
# flake8-bugbear
"B",
# flake8-simplify
"SIM",
# isort
"I",
"E", # pycodestyle
"F", # Pyflakes
"UP", # pyupgrade
"B", # flake8-bugbear
"SIM", # flake8-simplify
"I", # isort
]
12 changes: 11 additions & 1 deletion src/unit_text/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,11 @@
from .cli import app as app
"""Unit tests for plain text."""

from unit_text.cli import app
from unit_text.core import Evaluation, IdeaModel, run_tests

__all__ = [
"Evaluation",
"IdeaModel",
"app",
"run_tests",
]
7 changes: 5 additions & 2 deletions src/unit_text/__main__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
from .cli import app
"""Main entry point for unit-text."""

app(prog_name="unit-text")
from unit_text.cli.app import app

if __name__ == "__main__":
app(prog_name="unit-text")
4 changes: 4 additions & 0 deletions src/unit_text/api/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .app import app
from .server import run

__all__ = ["app", "run"]
6 changes: 6 additions & 0 deletions src/unit_text/api/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
"""Main entry point for unit-text."""

from .app import app

if __name__ == "__main__":
app(prog_name="unit-text-api")
32 changes: 32 additions & 0 deletions src/unit_text/api/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
"""FastAPI application for the unit-text API."""

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel

from unit_text.core import IdeaModel, TestResult, run_tests

app = FastAPI()


class TestRequest(BaseModel):
"""Request model for the test endpoint."""

draft: str
idea: IdeaModel


@app.get("/")
async def root():
"""Root endpoint returning a welcome message."""
return {"message": "Welcome to the unit-text API!"}


@app.post("/test")
async def test(request: TestRequest) -> TestResult:
"""Run tests on the input draft against the idea configuration."""
print("Running tests...")

try:
return run_tests(request.draft, request.idea)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e)) from e
8 changes: 8 additions & 0 deletions src/unit_text/api/server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import uvicorn

from .app import app


# This is just a convenience function to run the server from the command line
def run():
uvicorn.run(app, host="0.0.0.0", port=8000)
3 changes: 3 additions & 0 deletions src/unit_text/cli/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .app import app

__all__ = ["app"]
74 changes: 3 additions & 71 deletions src/unit_text/cli.py → src/unit_text/cli/app.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,14 @@
from pathlib import Path
from typing import Annotated

import ollama
import typer
from dicttoxml import dicttoxml
from rich import print
from rich.console import Group
from rich.markdown import Markdown
from rich.panel import Panel
from rich.prompt import Prompt

from .models import Evaluation, IdeaModel, TestResult
from unit_text.core import Evaluation, IdeaModel, run_tests

app = typer.Typer(
short_help="Unit tests for prose",
Expand Down Expand Up @@ -94,77 +92,11 @@ def ideate(config: OptConfig = default_config):

@app.command()
def test(file: Path, config: OptConfig = default_config):
"""Test the input file."""
"""Run tests on the input file against the config."""
print("Running tests...")

idea = IdeaModel.model_validate_json(config.read_text())

xml_idea = dicttoxml(idea.model_dump(), attr_type=False, custom_root="idea")

body = {"draft": file.read_text()}
xml_body = dicttoxml(body, attr_type=False, root=False)

prompt = f"""
{xml_idea}

{xml_body}
"""

response = ollama.chat(
model="deepseek-r1:7b",
messages=[
{
"role": "system",
"content": """
You are an experienced technical writer and editor
with expertise in developer-focused content.
Your role is to provide detailed, actionable feedback on blog posts,
focusing on both technical accuracy and engaging writing style.

When reviewing a blog post, analyze it against the following criteria:

1. Clarity:
- Is the main message clear and well-articulated?
- Are technical concepts explained appropriately for the target audience?
- Is the writing style engaging and accessible?
- Are there any confusing or ambiguous sections?

2. Alignment with Objectives:
- Does the content match the stated goals and target audience?
- Is the technical depth appropriate for the intended readers?
- Are the examples and analogies relevant and helpful?
- Does the post deliver on its promises?

3. Completeness:
- Are all key points fully developed?
- Is there a clear introduction and conclusion?
- Are code examples (if any) complete and well-explained?
- Are there any missing or unnecessary sections?

4. Overall Suggestions:
- Specific improvements for structure and flow
- Recommendations for enhancing engagement
- Suggestions for technical accuracy or depth
- Ideas for better examples or analogies

For each evaluation, return a `test_passed` boolean
to indicate if the content was good enough for that specific aspect.

Keep your feedback constructive but honest.
Focus on specific, actionable improvements rather than general observations.
Reference specific parts of the text when making suggestions.
""",
},
{"role": "user", "content": prompt},
],
format=TestResult.model_json_schema(),
options=ollama.Options(
temperature=0, # to ensure consistent results
num_ctx=8192, # to ensure the entire text is processed
),
)

out = TestResult.model_validate_json(response.message.content)
out = run_tests(file.read_text(), idea)

def evaluation_panel(evaluation: Evaluation, title: str) -> Panel:
return Panel(
Expand Down
9 changes: 9 additions & 0 deletions src/unit_text/core/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from .lib import run_tests
from .models import Evaluation, IdeaModel, TestResult

__all__ = [
"Evaluation",
"TestResult",
"run_tests",
"IdeaModel",
]
74 changes: 74 additions & 0 deletions src/unit_text/core/lib.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import ollama
from dicttoxml import dicttoxml

from .models import IdeaModel, TestResult


def run_tests(draft: str, idea: IdeaModel) -> TestResult:
"""Run tests on the draft content against the idea."""
xml_idea = dicttoxml(idea.model_dump(), attr_type=False, custom_root="idea")

body = {"draft": draft}
xml_body = dicttoxml(body, attr_type=False, root=False)

prompt = f"""
{xml_idea}

{xml_body}
"""

response = ollama.chat(
model="deepseek-r1:7b",
messages=[
{
"role": "system",
"content": """
You are an experienced technical writer and editor
with expertise in developer-focused content.
Your role is to provide detailed, actionable feedback on blog posts,
focusing on both technical accuracy and engaging writing style.

When reviewing a blog post, analyze it against the following criteria:

1. Clarity:
- Is the main message clear and well-articulated?
- Are technical concepts explained appropriately for the target audience?
- Is the writing style engaging and accessible?
- Are there any confusing or ambiguous sections?

2. Alignment with Objectives:
- Does the content match the stated goals and target audience?
- Is the technical depth appropriate for the intended readers?
- Are the examples and analogies relevant and helpful?
- Does the post deliver on its promises?

3. Completeness:
- Are all key points fully developed?
- Is there a clear introduction and conclusion?
- Are code examples (if any) complete and well-explained?
- Are there any missing or unnecessary sections?

4. Overall Suggestions:
- Specific improvements for structure and flow
- Recommendations for enhancing engagement
- Suggestions for technical accuracy or depth
- Ideas for better examples or analogies

For each evaluation, return a `test_passed` boolean
to indicate if the content was good enough for that specific aspect.

Keep your feedback constructive but honest.
Focus on specific, actionable improvements rather than general observations.
Reference specific parts of the text when making suggestions.
""",
},
{"role": "user", "content": prompt},
],
format=TestResult.model_json_schema(),
options=ollama.Options(
temperature=0, # to ensure consistent results
num_ctx=8192, # to ensure the entire text is processed
),
)

return TestResult.model_validate_json(response.message.content)
File renamed without changes.
Loading