Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/coverage.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name: Post coverage comment
on:
workflow_run:
workflows: ["UnitTests"]
workflows: ["Testing"]
types:
- completed

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: UnitTests
name: Testing

on:
push:
Expand Down Expand Up @@ -34,3 +34,4 @@ jobs:
name: python-coverage-comment-action
# If you use a different name, update COMMENT_FILENAME accordingly
path: python-coverage-comment-action.txt

5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,15 @@ dev = [
"ruff",
"pre-commit",
"pytest",
"pytest-coverage"
"pytest-coverage",
"pytest-asyncio"
]

[tool.setuptools.packages.find]
where = ["src"]

[tool.coverage.run]
omit = ["src/libkernelbot/backend.py", "src/libkernelbot/run_eval.py", "src/libkernelbot/launchers/*.py"]
omit = ["src/libkernelbot/run_eval.py", "src/libkernelbot/launchers/*.py"]
relative_files = true

[tool.coverage.report]
Expand Down
37 changes: 2 additions & 35 deletions src/kernelbot/cogs/leaderboard_cog.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,9 @@
from libkernelbot.leaderboard_db import (
LeaderboardItem,
LeaderboardRankedEntry,
RunItem,
SubmissionItem,
)
from libkernelbot.submission import SubmissionRequest, prepare_submission
from libkernelbot.submission import SubmissionRequest, generate_run_verdict, prepare_submission
from libkernelbot.utils import format_time, setup_logging

if TYPE_CHECKING:
Expand Down Expand Up @@ -57,38 +56,6 @@ async def select_gpu_view(
await view.wait()
return view

def generate_run_verdict(self, run: RunItem, sub_data: SubmissionItem):
medals = {1: "🥇 First", 2: "🥈 Second", 3: "🥉 Third"}

# get the competition
with self.bot.leaderboard_db as db:
competition = db.get_leaderboard_submissions(
sub_data["leaderboard_name"], run["runner"]
)
# compare against the competition
other_by_user = False
run_time = float(run["score"])
score_text = format_time(run_time * 1e9)

for entry in competition:
# can we find our own run? Only if it is the fastest submission by this user
if entry["submission_id"] == sub_data["submission_id"]:
rank = entry["rank"]
if 1 <= rank <= 3:
return f"> {medals[rank]} place on {run['runner']}: {score_text}"
elif rank <= 10:
return f"> {rank}th place on {run['runner']}: {score_text}"
else:
return f"> Personal best on {run['runner']}: {score_text}"
elif entry["user_id"] == sub_data["user_id"]:
other_by_user = True
if other_by_user:
# User already has a submission that is faster
return f"> Successful on {run['runner']}: {score_text}"
else:
# no submission by the user exists
return f"> 🍾 First successful submission on {run['runner']}: {score_text}"

async def post_submit_hook(self, interaction: discord.Interaction, sub_id: int):
with self.bot.leaderboard_db as db:
sub_data: SubmissionItem = db.get_submission_by_id(sub_id)
Expand All @@ -100,7 +67,7 @@ async def post_submit_hook(self, interaction: discord.Interaction, sub_id: int):
and run["mode"] == SubmissionMode.LEADERBOARD.value
and run["passed"]
):
result_lines.append(self.generate_run_verdict(run, sub_data))
result_lines.append(generate_run_verdict(self.bot.backend, run, sub_data))

if len(result_lines) > 0:
await send_discord_message(
Expand Down
5 changes: 0 additions & 5 deletions src/kernelbot/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,5 @@ def init_environment():
env.PROBLEM_DEV_DIR = os.getenv("PROBLEM_DEV_DIR", "examples")

# PostgreSQL-specific constants
env.POSTGRES_HOST = os.getenv("POSTGRES_HOST")
env.POSTGRES_DATABASE = os.getenv("POSTGRES_DATABASE")
env.POSTGRES_USER = os.getenv("POSTGRES_USER")
env.POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD")
env.POSTGRES_PORT = os.getenv("POSTGRES_PORT")
env.DATABASE_URL = os.getenv("DATABASE_URL")
env.DISABLE_SSL = os.getenv("DISABLE_SSL")
7 changes: 1 addition & 6 deletions src/libkernelbot/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,8 @@ def __init__(
):
self.debug_mode = debug_mode
self.db = LeaderboardDB(
env.POSTGRES_HOST,
env.POSTGRES_DATABASE,
env.POSTGRES_USER,
env.POSTGRES_PASSWORD,
env.POSTGRES_PORT,
url=env.DATABASE_URL,
ssl_mode="require" if not env.DISABLE_SSL else "disable",
ssl_mode="require" if not getattr(env, "DISABLE_SSL", "") else "disable",
)

try:
Expand Down
17 changes: 2 additions & 15 deletions src/libkernelbot/leaderboard_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,8 @@


class LeaderboardDB:
def __init__(
self, host: str, database: str, user: str, password: str, port: str, url: str, ssl_mode: str
):
def __init__(self, url: str, ssl_mode: str):
"""Initialize database connection parameters"""
self.connection_params = {
"host": host,
"database": database,
"user": user,
"password": password,
"port": port,
}
self.url = url
self.ssl_mode = ssl_mode
self.connection: Optional[psycopg2.extensions.connection] = None
Expand All @@ -39,11 +30,7 @@ def __init__(
def connect(self) -> bool:
"""Establish connection to the database"""
try:
self.connection = (
psycopg2.connect(self.url, sslmode=self.ssl_mode)
if self.url
else psycopg2.connect(**self.connection_params)
)
self.connection = psycopg2.connect(self.url, sslmode=self.ssl_mode)
self.cursor = self.connection.cursor()
return True
except psycopg2.Error as e:
Expand Down
7 changes: 5 additions & 2 deletions src/libkernelbot/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,18 @@ class Log:


class RunResultReport:
def __init__(self):
self.data: List[Text | Log] = []
def __init__(self, data=None):
self.data: List[Text | Log] = data or []

def add_text(self, section: str):
self.data.append(Text(section))

def add_log(self, header: str, log: str):
self.data.append(Log(header, log))

def __repr__(self):
return f"RunResultReport(data={self.data})"


def _generate_compile_report(reporter: "RunResultReport", comp: CompileResult):
message = ""
Expand Down
34 changes: 33 additions & 1 deletion src/libkernelbot/submission.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@
from better_profanity import profanity

from libkernelbot.consts import RankCriterion
from libkernelbot.db_types import RunItem, SubmissionItem
from libkernelbot.leaderboard_db import LeaderboardDB, LeaderboardItem
from libkernelbot.run_eval import FullResult
from libkernelbot.task import LeaderboardTask
from libkernelbot.utils import KernelBotError, setup_logging
from libkernelbot.utils import KernelBotError, format_time, setup_logging

if typing.TYPE_CHECKING:
from backend import KernelBackend
Expand Down Expand Up @@ -194,3 +195,34 @@ def compute_score(result: FullResult, task: LeaderboardTask, submission_id: int)
raise KernelBotError(f"Invalid ranking criterion {task.ranking_by}")

return score


def generate_run_verdict(backend: "KernelBackend", run: RunItem, sub_data: SubmissionItem):
medals = {1: "🥇 First", 2: "🥈 Second", 3: "🥉 Third"}

# get the competition
with backend.db as db:
competition = db.get_leaderboard_submissions(sub_data["leaderboard_name"], run["runner"])
# compare against the competition
other_by_user = False
run_time = float(run["score"])
score_text = format_time(run_time * 1e9)

for entry in competition:
# can we find our own run? Only if it is the fastest submission by this user
if entry["submission_id"] == sub_data["submission_id"]:
rank = entry["rank"]
if 1 <= rank <= 3:
return f"> {medals[rank]} place on {run['runner']}: {score_text}"
elif rank <= 10:
return f"> {rank}th place on {run['runner']}: {score_text}"
else:
return f"> Personal best on {run['runner']}: {score_text}"
elif entry["user_id"] == sub_data["user_id"]:
other_by_user = True
if other_by_user:
# User already has a submission that is faster
return f"> Successful on {run['runner']}: {score_text}"
else:
# no submission by the user exists
return f"> 🍾 First successful submission on {run['runner']}: {score_text}"
124 changes: 124 additions & 0 deletions unit-tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
import subprocess
import time
from pathlib import Path

import pytest

DATABASE_URL = "postgresql://postgres:postgres@localhost:5433/clusterdev"


@pytest.fixture(scope="module")
def docker_compose():
tgt_path = Path.cwd()
if tgt_path.name == "unit-tests":
tgt_path = tgt_path.parent

"""Start a test database and run migrations"""
subprocess.check_call(
["docker", "compose", "-f", "docker-compose.test.yml", "up", "-d"], cwd=tgt_path
)

try:
# Wait for migrations to finish
while True:
result = subprocess.run(
["docker", "compose", "-f", "docker-compose.test.yml", "ps", "-q", "migrate-test"],
capture_output=True,
text=True,
cwd=tgt_path,
)

if not result.stdout.strip(): # Container no longer exists
break
time.sleep(1)

# Check if migrations succeeded
logs = subprocess.run(
["docker", "compose", "-f", "docker-compose.test.yml", "logs", "migrate-test"],
capture_output=True,
text=True,
cwd=tgt_path,
)

if "error" in logs.stdout.lower():
raise Exception(f"Migrations failed: {logs.stdout}")

yield
finally:
subprocess.run(
["docker", "compose", "-f", "docker-compose.test.yml", "down", "-v"], cwd=tgt_path
)


def _nuke_contents(db):
db.cursor.execute(
"TRUNCATE leaderboard.code_files, leaderboard.submission, leaderboard.runs, "
"leaderboard.leaderboard, leaderboard.user_info, leaderboard.templates, "
"leaderboard.gpu_type RESTART IDENTITY CASCADE"
)
db.connection.commit()


@pytest.fixture()
def database(docker_compose):
from libkernelbot import leaderboard_db

db = leaderboard_db.LeaderboardDB(
url=DATABASE_URL,
ssl_mode="disable",
)

with db:
_nuke_contents(db)
yield db
with db:
_nuke_contents(db)


@pytest.fixture()
def bot(docker_compose, database):
from types import SimpleNamespace

from libkernelbot import backend

env = SimpleNamespace()
env.DATABASE_URL = DATABASE_URL
env.DISABLE_SSL = "1"
yield backend.KernelBackend(env, False)


TASK_YAML = """
lang: py
description: "Test task description"
ranking_by: geom
test_timeout: 120
files:
- name: "kernel.py"
source: "kernel.py"
- name: "submission.py"
source: "@SUBMISSION@"
config:
main: "kernel.py"
tests:
- input_size: 1000
dtype: "float32"
benchmarks:
- input_size: 10000
dtype: "float32"
templates:
Python: "template.py"
CUDA: "template.cu"
"""


@pytest.fixture
def task_directory(tmp_path):
"""Create a temporary directory structure for task definition testing"""
# Create source files
Path.write_text(tmp_path / "kernel.py", "def kernel(): pass")
Path.write_text(tmp_path / "template.py", "# Python template")
Path.write_text(tmp_path / "template.cu", "// CUDA template")

# Create task.yml
Path.write_text(tmp_path / "task.yml", TASK_YAML)
return tmp_path
Loading
Loading