Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from alembic import op
import sqlalchemy as sa
from typing import Union, Sequence
import rhesis
from typing import Sequence, Union

import sqlalchemy as sa
from alembic import op

# revision identifiers, used by Alembic.
revision: str = "1776e6dd47d3"
Expand Down
43 changes: 34 additions & 9 deletions apps/backend/src/rhesis/backend/app/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,39 @@ def get_value(cls, entity_type):
return entity_type


# TestType Enum - DB-level test classification aligned with initial_data.json type_lookup
class TestType(str, Enum):
"""
Enum for test types.

These are reserved test type values in the TypeLookup table:
- SINGLE_TURN: Traditional single request-response tests
- MULTI_TURN: Agentic multi-turn conversation tests using Penelope
"""

SINGLE_TURN = "Single-Turn"
MULTI_TURN = "Multi-Turn"

@classmethod
def get_value(cls, test_type):
"""Get the string value of a test type"""
if isinstance(test_type, cls):
return test_type.value
return test_type

@classmethod
def from_string(cls, value: str):
"""Get enum from string value (case-sensitive, accepts only allowed values)."""
if not value:
return None
for test_type in cls:
if test_type.value == value:
return test_type
return None


# TestSetType Enum - DB-level test set classification aligned with initial_data.json type_lookup
class TestSetType(Enum):
class TestSetType(str, Enum):
SINGLE_TURN = "Single-Turn"
MULTI_TURN = "Multi-Turn"

Expand All @@ -48,17 +79,11 @@ def get_value(cls, test_set_type):

@classmethod
def from_string(cls, value: str):
"""Get enum from string value (case-insensitive, accepts snake_case and hyphenated).

Maps both 'single_turn' and 'Single-Turn' to TestSetType.SINGLE_TURN, so API
clients that use snake_case conventions are handled transparently.
"""
"""Get enum from string value (case-sensitive, accepts only allowed values)."""
if not value:
return None
# Normalize underscores to hyphens so snake_case aliases work (single_turn → single-turn)
normalized = value.lower().replace("_", "-")
for test_set_type in cls:
if test_set_type.value.lower() == normalized:
if test_set_type.value == value:
return test_set_type
return None

Expand Down
2 changes: 1 addition & 1 deletion apps/backend/src/rhesis/backend/app/models/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def to_searchable_text(self) -> str:
- Single-turn: Uses prompt.content and expected_response
- Multi-turn: Uses test_configuration (goal, instructions, scenario, etc.)
"""
from rhesis.backend.tasks.enums import TestType
from rhesis.backend.app.constants import TestType
from rhesis.backend.tasks.execution.modes import get_test_type

test_type = get_test_type(self)
Expand Down
127 changes: 46 additions & 81 deletions apps/backend/src/rhesis/backend/app/schemas/test.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from datetime import datetime
from typing import Any, Dict, List, Optional, Union

from pydantic import UUID4, BaseModel, ConfigDict, ValidationError, field_validator
from pydantic import UUID4, BaseModel, ConfigDict, field_validator

from rhesis.backend.app.schemas import Base
from rhesis.backend.app.schemas.multi_turn_test_config import validate_multi_turn_config
from rhesis.backend.app.schemas.user import UserReference


Expand Down Expand Up @@ -113,36 +112,19 @@ class TestCreate(TestBase):
prompt: Optional[TestPromptCreate] = None
test_type: Optional[str] = None

@field_validator("test_type")
@classmethod
def validate_test_type(cls, v: Optional[str]) -> Optional[str]:
from rhesis.backend.app.schemas.validators import format_test_type

return format_test_type(v)

@field_validator("test_configuration")
@classmethod
def validate_test_configuration(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
"""
Validate test_configuration JSON based on content.

For multi-turn tests (when goal is present), validates against MultiTurnTestConfig schema.
"""
if v is None:
return None
from rhesis.backend.app.schemas.validators import validate_test_config_content

# If 'goal' is present, this is a multi-turn test configuration
if "goal" in v:
try:
# Validate using multi-turn config schema
validated_config = validate_multi_turn_config(v)
# Return as dict for storage
return validated_config.model_dump(exclude_none=True)
except ValidationError as e:
# Re-raise with more context
error_messages = []
for error in e.errors():
field = " -> ".join(str(loc) for loc in error["loc"])
error_messages.append(f"{field}: {error['msg']}")
raise ValueError(
f"Invalid multi-turn test configuration: {'; '.join(error_messages)}"
)

# For other configurations, allow any valid JSON
return v
return validate_test_config_content(v)


class TestUpdate(TestBase):
Expand All @@ -153,36 +135,19 @@ class TestUpdate(TestBase):
prompt: Optional[TestPromptCreate] = None
test_type: Optional[str] = None

@field_validator("test_type")
@classmethod
def validate_test_type(cls, v: Optional[str]) -> Optional[str]:
from rhesis.backend.app.schemas.validators import format_test_type

return format_test_type(v)

@field_validator("test_configuration")
@classmethod
def validate_test_configuration(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
"""
Validate test_configuration JSON based on content.
from rhesis.backend.app.schemas.validators import validate_test_config_content

For multi-turn tests (when goal is present), validates against MultiTurnTestConfig schema.
"""
if v is None:
return None

# If 'goal' is present, this is a multi-turn test configuration
if "goal" in v:
try:
# Validate using multi-turn config schema
validated_config = validate_multi_turn_config(v)
# Return as dict for storage
return validated_config.model_dump(exclude_none=True)
except ValidationError as e:
# Re-raise with more context
error_messages = []
for error in e.errors():
field = " -> ".join(str(loc) for loc in error["loc"])
error_messages.append(f"{field}: {error['msg']}")
raise ValueError(
f"Invalid multi-turn test configuration: {'; '.join(error_messages)}"
)

# For other configurations, allow any valid JSON
return v
return validate_test_config_content(v)


class Test(TestBase):
Expand Down Expand Up @@ -221,6 +186,14 @@ class TestBulkCreate(BaseModel):
owner_id: Optional[UUID4] = None
status: Optional[str] = None
priority: Optional[int] = None
test_type: Optional[str] = None

@field_validator("test_type")
@classmethod
def validate_test_type(cls, v: Optional[str]) -> Optional[str]:
from rhesis.backend.app.schemas.validators import format_test_type

return format_test_type(v)

@field_validator("assignee_id", "owner_id")
@classmethod
Expand Down Expand Up @@ -259,21 +232,9 @@ def validate_multi_turn_or_prompt(cls, v, info):
"or 'test_configuration' with 'goal' must be provided (for multi-turn tests)"
)

# If 'goal' is present, validate as multi-turn config
if "goal" in v:
try:
validated_config = validate_multi_turn_config(v)
return validated_config.model_dump(exclude_none=True)
except ValidationError as e:
error_messages = []
for error in e.errors():
field = " -> ".join(str(loc) for loc in error["loc"])
error_messages.append(f"{field}: {error['msg']}")
raise ValueError(
f"Invalid multi-turn test configuration: {'; '.join(error_messages)}"
)
from rhesis.backend.app.schemas.validators import validate_test_config_content

return v
return validate_test_config_content(v)


class TestBulkCreateRequest(BaseModel):
Expand Down Expand Up @@ -339,23 +300,20 @@ class TestExecuteRequest(BaseModel):
# Optional: Explicitly specify test type (otherwise auto-detected)
test_type: Optional[str] = None # "Single-Turn" or "Multi-Turn"

@field_validator("test_type")
@classmethod
def validate_test_type(cls, v: Optional[str]) -> Optional[str]:
from rhesis.backend.app.schemas.validators import format_test_type

return format_test_type(v)

@field_validator("test_configuration")
@classmethod
def validate_test_configuration(cls, v, info):
"""Validate multi-turn test configuration if provided."""
if v and "goal" in v:
try:
validated_config = validate_multi_turn_config(v)
return validated_config.model_dump(exclude_none=True)
except ValidationError as e:
error_messages = []
for error in e.errors():
field = " -> ".join(str(loc) for loc in error["loc"])
error_messages.append(f"{field}: {error['msg']}")
raise ValueError(
f"Invalid multi-turn test configuration: {'; '.join(error_messages)}"
)
return v
from rhesis.backend.app.schemas.validators import validate_test_config_content

return validate_test_config_content(v)

def model_post_init(self, __context):
"""Validate that either test_id or test definition is provided."""
Expand Down Expand Up @@ -421,6 +379,13 @@ class ConversationToTestRequest(BaseModel):
endpoint_id: Optional[UUID4] = None
test_type: Optional[str] = "Multi-Turn" # "Single-Turn" or "Multi-Turn"

@field_validator("test_type")
@classmethod
def validate_test_type(cls, v: Optional[str]) -> Optional[str]:
from rhesis.backend.app.schemas.validators import format_test_type

return format_test_type(v)


class SingleTurnTestExtraction(BaseModel):
"""LLM-extracted metadata for a single-turn test."""
Expand Down
33 changes: 16 additions & 17 deletions apps/backend/src/rhesis/backend/app/schemas/test_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,13 @@ class TestData(BaseModel):
priority: Optional[int] = None
metadata: Optional[Dict[str, Any]] = {}

@field_validator("test_type")
@classmethod
def validate_test_type(cls, v: Optional[str]) -> Optional[str]:
from rhesis.backend.app.schemas.validators import format_test_type

return format_test_type(v)

@field_validator("assignee_id", "owner_id")
@classmethod
def validate_uuid(cls, v):
Expand All @@ -105,9 +112,6 @@ def validate_multi_turn_or_prompt(cls, v, info):
- prompt is provided (single-turn test), OR
- test_configuration with goal is provided (multi-turn test)
"""
from pydantic import ValidationError

from rhesis.backend.app.schemas.multi_turn_test_config import validate_multi_turn_config

prompt = info.data.get("prompt")

Expand All @@ -122,21 +126,9 @@ def validate_multi_turn_or_prompt(cls, v, info):
"or 'test_configuration' with 'goal' must be provided (for multi-turn tests)"
)

# If 'goal' is present, validate as multi-turn config
if "goal" in v:
try:
validated_config = validate_multi_turn_config(v)
return validated_config.model_dump(exclude_none=True)
except ValidationError as e:
error_messages = []
for error in e.errors():
field = " -> ".join(str(loc) for loc in error["loc"])
error_messages.append(f"{field}: {error['msg']}")
raise ValueError(
f"Invalid multi-turn test configuration: {'; '.join(error_messages)}"
)
from rhesis.backend.app.schemas.validators import validate_test_config_content

return v
return validate_test_config_content(v)


class TestSetBulkCreate(BaseModel):
Expand All @@ -150,6 +142,13 @@ class TestSetBulkCreate(BaseModel):
tests: List[TestData]
metadata: Optional[Dict[str, Any]] = None

@field_validator("test_set_type")
@classmethod
def validate_test_set_type(cls, v: str) -> str:
from rhesis.backend.app.schemas.validators import format_test_set_type

return format_test_set_type(v)

@field_validator("owner_id", "assignee_id")
@classmethod
def validate_uuid_fields(cls, v):
Expand Down
64 changes: 64 additions & 0 deletions apps/backend/src/rhesis/backend/app/schemas/validators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from typing import Any, Dict, Optional

from pydantic import ValidationError

from rhesis.backend.app.constants import TestSetType, TestType
from rhesis.backend.app.schemas.multi_turn_test_config import validate_multi_turn_config


def format_test_type(v: Optional[str]) -> Optional[str]:
"""Format test type to title case and validate against allowed types."""
if v is None:
return None

formatted = v.title()
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Critical: v.title() won’t normalize snake_case/underscore inputs (e.g. "single_turn" → "Single_Turn"), so those will now 422 even though TestType.from_string() / TestSetType.from_string() explicitly support snake_case.

Fix: consider implementing these validators via TestType.from_string(v) / TestSetType.from_string(v) (and returning .value), so you accept single_turn, single-turn, SINGLE_TURN, etc. while still enforcing the allowed set. (There’s already a frontend interface typed as 'single_turn' | 'multi_turn'.)

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ack — if rejecting snake_case/underscore inputs is intentional for API consistency, then the v.title() approach here is fine and my earlier comment isn’t a blocker.

One thing to consider: since we now have both TestType.from_string() (which does accept snake_case) and these validators (which intentionally don’t), it may be worth updating/removing from_string() (or at least its docstring) to avoid sending mixed signals about what inputs are supported.

allowed_types = [t.value for t in TestType]

if formatted not in allowed_types:
raise ValueError(f"Invalid test type '{v}'. Allowed values are: {', '.join(allowed_types)}")

return formatted


def format_test_set_type(v: Optional[str]) -> Optional[str]:
"""Format test set type to title case and validate against allowed types."""
if v is None:
return None

formatted = v.title()
allowed_types = [t.value for t in TestSetType]

if formatted not in allowed_types:
raise ValueError(
f"Invalid test set type '{v}'. Allowed values are: {', '.join(allowed_types)}"
)

return formatted


def validate_test_config_content(v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
"""
Validate test_configuration JSON based on content.

For multi-turn tests (when goal is present), validates against MultiTurnTestConfig schema.
"""
if v is None:
return None

# If 'goal' is present, this is a multi-turn test configuration
if "goal" in v:
try:
# Validate using multi-turn config schema
validated_config = validate_multi_turn_config(v)
# Return as dict for storage
return validated_config.model_dump(exclude_none=True)
except ValidationError as e:
# Re-raise with more context
error_messages = []
for error in e.errors():
field = " -> ".join(str(loc) for loc in error["loc"])
error_messages.append(f"{field}: {error['msg']}")
raise ValueError(f"Invalid multi-turn test configuration: {'; '.join(error_messages)}")

# For other configurations, allow any valid JSON
return v
Loading
Loading