Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Problem Page 'RUN' 기능 동작 #199

Open
wants to merge 22 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
171 changes: 116 additions & 55 deletions backend/judge/dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,16 @@ def process_pending_task():
judge_task.send(**data)


def process_pending_task_run():
if cache.llen(CacheKey.run_waiting_queue):
# Prevent loop introduction
from judge.tasks import coderun_task
tmp_data = cache.rpop(CacheKey.run_waiting_queue)
if tmp_data:
data = json.loads(tmp_data.decode("utf-8"))
coderun_task.send(**data)


class ChooseJudgeServer:
def __init__(self):
self.server = None
Expand Down Expand Up @@ -90,12 +100,20 @@ def compile_spj(self):


class JudgeDispatcher(DispatcherBase):
def __init__(self, submission_id, problem_id):
def __init__(self, data):
super().__init__()
self.submission = Submission.objects.get(id=submission_id)
self.contest_id = self.submission.contest_id
self.last_result = self.submission.result if self.submission.info else None
self.submission_id = data.get("submission_id")
if self.submission_id:
self.submission = Submission.objects.get(id=self.submission_id)
self.contest_id = self.submission.contest_id
self.last_result = self.submission.result if self.submission.info else None
self.language = self.submission.language
else:
self.run_data = data
self.contest_id = data.get("contest_id")
self.language = self.run_data["language"]

problem_id = data.get("problem_id")
if self.contest_id:
self.problem = Problem.objects.select_related("contest").get(id=problem_id, contest_id=self.contest_id)
self.contest = self.problem.contest
Expand Down Expand Up @@ -124,20 +142,21 @@ def _compute_statistic_info(self, resp_data):
self.submission.statistic_info["score"] = score

def judge(self):
language = self.submission.language
sub_config = list(filter(lambda item: language == item["name"], SysOptions.languages))[0]
sub_config = list(filter(lambda item: self.language == item["name"], SysOptions.languages))[0]
spj_config = {}
if self.problem.spj_code:
for lang in SysOptions.spj_languages:
if lang["name"] == self.problem.spj_language:
spj_config = lang["spj"]
break

if language in self.problem.template:
template = parse_problem_template(self.problem.template[language])
if self.language in self.problem.template:
template = parse_problem_template(self.problem.template[self.language])
code = f"{template['prepend']}\n{self.submission.code}\n{template['append']}"
else:
elif self.submission_id:
code = self.submission.code
else:
code = self.run_data["code"]

data = {
"language_config": sub_config["config"],
Expand All @@ -153,57 +172,99 @@ def judge(self):
"io_mode": self.problem.io_mode
}

with ChooseJudgeServer() as server:
if not server:
data = {"submission_id": self.submission.id, "problem_id": self.problem.id}
cache.lpush(CacheKey.waiting_queue, json.dumps(data))
return
Submission.objects.filter(id=self.submission.id).update(result=JudgeStatus.JUDGING)
resp = self._request(urljoin(server.service_url, "/judge"), data=data)
if self.submission_id:
with ChooseJudgeServer() as server:
if not server:
data = {"submission_id": self.submission.id, "problem_id": self.problem.id}
cache.lpush(CacheKey.waiting_queue, json.dumps(data))
return
Submission.objects.filter(id=self.submission.id).update(result=JudgeStatus.JUDGING)
resp = self._request(urljoin(server.service_url, "/judge"), data=data)

if not resp:
Submission.objects.filter(id=self.submission.id).update(result=JudgeStatus.SYSTEM_ERROR)
return
if not resp:
Submission.objects.filter(id=self.submission.id).update(result=JudgeStatus.SYSTEM_ERROR)
return

if resp["err"]:
self.submission.result = JudgeStatus.COMPILE_ERROR
self.submission.statistic_info["err_info"] = resp["data"]
self.submission.statistic_info["score"] = 0
else:
resp["data"].sort(key=lambda x: int(x["test_case"]))
self.submission.info = resp
self._compute_statistic_info(resp["data"])
error_test_case = list(filter(lambda case: case["result"] != 0, resp["data"]))
# In ACM mode, if multiple test points are all correct, then AC,
# otherwise, take the status of the first wrong test point
# In OI mode, if multiple test points are all correct, AC is used,
# if all test points are wrong, the first test point state is taken as the first error,
# otherwise it is partially correct
if not error_test_case:
self.submission.result = JudgeStatus.ACCEPTED
elif self.problem.rule_type == ProblemRuleType.ACM or len(error_test_case) == len(resp["data"]):
self.submission.result = error_test_case[0]["result"]
if resp["err"]:
self.submission.result = JudgeStatus.COMPILE_ERROR
self.submission.statistic_info["err_info"] = resp["data"]
self.submission.statistic_info["score"] = 0
else:
self.submission.result = JudgeStatus.PARTIALLY_ACCEPTED
self.submission.save()

if self.contest_id:
if self.contest.status != ContestStatus.CONTEST_UNDERWAY or \
User.objects.get(id=self.submission.user_id).is_contest_admin(self.contest):
logger.info(
"Contest debug mode, id: " + str(self.contest_id) + ", submission id: " + self.submission.id)
return
with transaction.atomic():
self.update_contest_problem_status()
self.update_contest_rank()
else:
if self.last_result:
self.update_problem_status_rejudge()
resp["data"].sort(key=lambda x: int(x["test_case"]))
self.submission.info = resp
self._compute_statistic_info(resp["data"])
error_test_case = list(filter(lambda case: case["result"] != 0, resp["data"]))
# In ACM mode, if multiple test points are all correct, then AC,
# otherwise, take the status of the first wrong test point
# In OI mode, if multiple test points are all correct, AC is used,
# if all test points are wrong, the first test point state is taken as the first error,
# otherwise it is partially correct
if not error_test_case:
self.submission.result = JudgeStatus.ACCEPTED
elif self.problem.rule_type == ProblemRuleType.ACM or len(error_test_case) == len(resp["data"]):
self.submission.result = error_test_case[0]["result"]
else:
self.submission.result = JudgeStatus.PARTIALLY_ACCEPTED
self.submission.save()

if self.contest_id:
if self.contest.status != ContestStatus.CONTEST_UNDERWAY or \
User.objects.get(id=self.submission.user_id).is_contest_admin(self.contest):
logger.info(
"Contest debug mode, id: " + str(self.contest_id) + ", submission id: " + self.submission.id)
return
with transaction.atomic():
self.update_contest_problem_status()
self.update_contest_rank()
else:
self.update_problem_status()
if self.last_result:
self.update_problem_status_rejudge()
else:
self.update_problem_status()

# At this point, the judgment is over, try to process the remaining tasks in the task queue
process_pending_task()
# At this point, the judgment is over, try to process the remaining tasks in the task queue
process_pending_task()
else:
data["test_case_id"] = None
data["output"] = True
data["test_case"] = []
for testcases in self.run_data["new_testcase"]:
data["test_case"].append({"input": testcases, "output": ""})
run_id = self.run_data["run_id"]

with ChooseJudgeServer() as server:
if not server:
cache.lpush(CacheKey.run_waiting_queue, json.dumps(self.run_data))
return
resp = self._request(urljoin(server.service_url, "/judge"), data=data)

if not resp: # System error
cache.hset("run", run_id, json.dumps([{"err": "System Error", "data": "System Error"}]))

elif resp["err"]: # Compile error
cache.hset("run", run_id, json.dumps(resp))

else: # Other errors or normal operation
resp["data"].sort(key=lambda x: int(x["test_case"]))
resp_data = resp["data"]
testcase_num = len(resp_data)
run_result = []
for i in range(testcase_num):
result = {}
result["output"] = {}
result["input"] = data["test_case"][i]["input"]
if resp_data[i]["result"] in (-1, 0):
result["output"]["err"] = None
result["output"]["data"] = resp_data[i]["output"]
else:
err_code = resp_data[i]["result"]
result["output"]["err"] = err_code
result["output"]["data"] = None
run_result.append(result)
cache.hset("run", run_id, json.dumps(run_result))

# At this point, the judgment is over, try to process the remaining tasks in the task queue
process_pending_task_run()

def update_problem_status_rejudge(self):
result = str(self.submission.result)
Expand Down
11 changes: 8 additions & 3 deletions backend/judge/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,13 @@


@dramatiq.actor(**DRAMATIQ_WORKER_ARGS())
def judge_task(submission_id, problem_id):
uid = Submission.objects.get(id=submission_id).user_id
def judge_task(data):
uid = Submission.objects.get(id=data["submission_id"]).user_id
if User.objects.get(id=uid).is_disabled:
return
JudgeDispatcher(submission_id, problem_id).judge()
JudgeDispatcher(data).judge()


@dramatiq.actor(**DRAMATIQ_WORKER_ARGS())
def coderun_task(data):
JudgeDispatcher(data).judge()
3 changes: 2 additions & 1 deletion backend/submission/urls.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from django.conf.urls import url

from .views import SubmissionAPI, SubmissionListAPI, ContestSubmissionListAPI, SubmissionExistsAPI
from .views import SubmissionAPI, SubmissionListAPI, ContestSubmissionListAPI, SubmissionExistsAPI, CodeRunAPI

urlpatterns = [
url(r"^submission/?$", SubmissionAPI.as_view(), name="submission_api"),
url(r"^submissions/?$", SubmissionListAPI.as_view(), name="submission_list_api"),
url(r"^submission_exists/?$", SubmissionExistsAPI.as_view(), name="submission_exists"),
url(r"^contest_submissions/?$", ContestSubmissionListAPI.as_view(), name="contest_submission_list_api"),
url(r"^coderun/?$", CodeRunAPI.as_view(), name="coderun_api"),
]
76 changes: 65 additions & 11 deletions backend/submission/views.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,18 @@
import ipaddress
import json

from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi

from account.decorators import login_required, check_contest_permission
from contest.models import ContestStatus, ContestRuleType
from judge.tasks import judge_task
from judge.tasks import judge_task, coderun_task
from options.options import SysOptions
# from judge.dispatcher import JudgeDispatcher
from problem.models import Problem, ProblemRuleType
from utils.api import APIView, validate_serializer
from utils.cache import cache
from utils.shortcuts import rand_str
from utils.captcha import Captcha
from utils.throttling import TokenBucket
from .models import Submission
Expand All @@ -19,14 +21,66 @@
from .serializers import SubmissionSafeModelSerializer, SubmissionListSerializer


class SubmissionAPI(APIView):
def throttling(self, request):
user_bucket = TokenBucket(key=str(request.user.id),
redis_conn=cache, **SysOptions.throttling["user"])
can_consume, wait = user_bucket.consume()
if not can_consume:
return "Please wait %d seconds" % (int(wait))
def throttling(request):
user_bucket = TokenBucket(key=str(request.user.id),
redis_conn=cache, **SysOptions.throttling["user"])
can_consume, wait = user_bucket.consume()
if not can_consume:
return "Please wait %d seconds" % (int(wait))


class CodeRunAPI(APIView):
@login_required
def post(self, request):
data = request.data

if data.get("contest_id"):
error = self.check_contest_permission(request)
if error:
return error

if data.get("captcha"):
if not Captcha(request).check(data["captcha"]):
return self.error("Invalid captcha")
error = throttling(request)
if error:
return self.error(error)

try:
problem = Problem.objects.get(id=data["problem_id"], contest_id=data.get("contest_id"), visible=True)
except Problem.DoesNotExist:
return self.error("Problem not exist")
if data["language"] not in problem.languages:
return self.error(f"{data['language']} is now allowed in the problem")

run_data = {}
run_data["language"] = data["language"]
run_data["code"] = data["code"]
run_data["contest_id"] = data.get("contest_id")
run_data["new_testcase"] = data.get("new_testcase")
run_data["problem_id"] = problem.id
run_id = rand_str()
run_data["run_id"] = run_id
cache.hset("run", run_id, json.dumps("Judging"))
coderun_task.send(run_data)
return self.success(run_id)

@login_required
def get(self, request):
run_id = request.GET.get("run_id")
if not cache.hexists("run", run_id):
return self.error("run_id does not exist")

res = cache.hget("run", run_id)
res = res.decode("utf-8")
res = json.loads(res)
if res != "Judging":
cache.hdel("run", run_id)

return self.success(res)


class SubmissionAPI(APIView):
@check_contest_permission(check_type="problems")
def check_contest_permission(self, request):
contest = self.contest
Expand Down Expand Up @@ -55,7 +109,7 @@ def post(self, request):
if data.get("captcha"):
if not Captcha(request).check(data["captcha"]):
return self.error("Invalid captcha")
error = self.throttling(request)
error = throttling(request)
if error:
return self.error(error)

Expand All @@ -73,8 +127,8 @@ def post(self, request):
ip=request.session["ip"],
contest_id=data.get("contest_id"))
# use this for debug
# JudgeDispatcher(submission.id, problem.id).judge()
judge_task.send(submission.id, problem.id)
submission_info = {"submission_id": submission.id, "problem_id": problem.id}
judge_task.send(submission_info)
if hide_id:
return self.success()
else:
Expand Down
1 change: 1 addition & 0 deletions backend/utils/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ class CacheKey:
waiting_queue = "waiting_queue"
contest_rank_cache = "contest_rank_cache"
website_config = "website_config"
run_waiting_queue = "run_waiting_queue"


class Difficulty(Choices):
Expand Down
12 changes: 12 additions & 0 deletions frontend/src/pages/oj/api.js
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,18 @@ export default {
data
})
},
runCode (data) {
return ajax('coderun', 'post', {
data
})
},
getRunResult (runID) {
return ajax('coderun', 'get', {
params: {
run_id: runID
}
})
},
getSubmissionList (offset, limit, params) {
params.limit = limit
params.offset = offset
Expand Down
Loading