diff options
author | 2021-06-20 21:12:47 +0300 | |
---|---|---|
committer | 2021-06-20 21:12:47 +0300 | |
commit | af65bd5584e4a02057374b344e5d055bc799ba60 (patch) | |
tree | 2b3658d1dd491d6cc2c967c59a3fe237cac4281e /backend/routes/forms | |
parent | Merge pull request #90 from python-discord/auth-form (diff) | |
parent | Updates Unittest Filter To Match New Model (diff) |
Merge pull request #92 from python-discord/unittest_failures
Allow Unittest Failures
Diffstat (limited to 'backend/routes/forms')
-rw-r--r-- | backend/routes/forms/submit.py | 57 | ||||
-rw-r--r-- | backend/routes/forms/unittesting.py | 31 |
2 files changed, 73 insertions, 15 deletions
diff --git a/backend/routes/forms/submit.py b/backend/routes/forms/submit.py index c0a50f3..7229ee1 100644 --- a/backend/routes/forms/submit.py +++ b/backend/routes/forms/submit.py @@ -45,6 +45,18 @@ class PartialSubmission(BaseModel): captcha: Optional[str] +class UnittestError(BaseModel): + question_id: str + question_index: int + return_code: int + passed: bool + result: str + + +class UnittestErrorMessage(ErrorMessage): + test_results: list[UnittestError] + + class SubmitForm(Route): """ Submit a form with the provided form ID. @@ -58,7 +70,8 @@ class SubmitForm(Route): resp=Response( HTTP_200=SubmissionResponse, HTTP_404=ErrorMessage, - HTTP_400=ErrorMessage + HTTP_400=ErrorMessage, + HTTP_422=UnittestErrorMessage ), tags=["forms", "responses"] ) @@ -168,16 +181,46 @@ class SubmitForm(Route): if any("unittests" in question.data for question in form.questions): unittest_results = await execute_unittest(response_obj, form) - if not all(test.passed for test in unittest_results): - # Return 500 if we encountered an internal error (code 99). - status_code = 500 if any( - test.return_code == 99 for test in unittest_results - ) else 403 + failures = [] + status_code = 422 + + for test in unittest_results: + response_obj.response[test.question_id] = { + "value": response_obj.response[test.question_id], + "passed": test.passed + } + + if test.return_code == 0: + failure_names = [] if test.passed else test.result.split(";") + elif test.return_code == 5: + failure_names = ["Could not parse user code."] + elif test.return_code == 6: + failure_names = ["Could not load user code."] + else: + failure_names = ["Internal error."] + + response_obj.response[test.question_id]["failures"] = failure_names + + # Report a failure on internal errors, + # or if the test suite doesn't allow failures + if not test.passed: + allow_failure = ( + form.questions[test.question_index].data["unittests"]["allow_failure"] + ) + + # An error while communicating with the test runner + if test.return_code == 99: + failures.append(test) + status_code = 500 + + elif not allow_failure: + failures.append(test) + if len(failures): return JSONResponse({ "error": "failed_tests", "test_results": [ - test._asdict() for test in unittest_results if not test.passed + test._asdict() for test in failures ] }, status_code=status_code) diff --git a/backend/routes/forms/unittesting.py b/backend/routes/forms/unittesting.py index 3854314..a830775 100644 --- a/backend/routes/forms/unittesting.py +++ b/backend/routes/forms/unittesting.py @@ -7,13 +7,15 @@ import httpx from httpx import HTTPStatusError from backend.constants import SNEKBOX_URL -from backend.models import FormResponse, Form +from backend.models import Form, FormResponse with open("resources/unittest_template.py") as file: TEST_TEMPLATE = file.read() -UnittestResult = namedtuple("UnittestResult", "question_id return_code passed result") +UnittestResult = namedtuple( + "UnittestResult", "question_id question_index return_code passed result" +) def filter_unittests(form: Form) -> Form: @@ -23,8 +25,8 @@ def filter_unittests(form: Form) -> Form: This is used to redact the exact tests when sending the form back to the frontend. """ for question in form.questions: - if question.type == "code" and "unittests" in question.data: - question.data["unittests"] = len(question.data["unittests"]) + if question.type == "code" and question.data["unittests"] is not None: + question.data["unittests"]["tests"] = len(question.data["unittests"]["tests"]) return form @@ -62,20 +64,32 @@ async def execute_unittest(form_response: FormResponse, form: Form) -> list[Unit """Execute all the unittests in this form and return the results.""" unittest_results = [] - for question in form.questions: - if question.type == "code" and "unittests" in question.data: + for index, question in enumerate(form.questions): + if question.type == "code": + + # Exit early if the suite doesn't have any tests + if question.data["unittests"] is None: + unittest_results.append(UnittestResult( + question_id=question.id, + question_index=index, + return_code=0, + passed=True, + result="" + )) + continue + passed = False # Tests starting with an hashtag should have censored names. hidden_test_counter = count(1) hidden_tests = { test.lstrip("#").lstrip("test_"): next(hidden_test_counter) - for test in question.data["unittests"].keys() + for test in question.data["unittests"]["tests"].keys() if test.startswith("#") } # Compose runner code - unit_code = _make_unit_code(question.data["unittests"]) + unit_code = _make_unit_code(question.data["unittests"]["tests"]) user_code = _make_user_code(form_response.response[question.id]) code = TEST_TEMPLATE.replace("### USER CODE", user_code) @@ -119,6 +133,7 @@ async def execute_unittest(form_response: FormResponse, form: Form) -> list[Unit unittest_results.append(UnittestResult( question_id=question.id, + question_index=index, return_code=return_code, passed=passed, result=result |