aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGravatar Hassan Abouelela <[email protected]>2021-06-20 21:12:47 +0300
committerGravatar GitHub <[email protected]>2021-06-20 21:12:47 +0300
commitaf65bd5584e4a02057374b344e5d055bc799ba60 (patch)
tree2b3658d1dd491d6cc2c967c59a3fe237cac4281e
parentMerge pull request #90 from python-discord/auth-form (diff)
parentUpdates Unittest Filter To Match New Model (diff)
Merge pull request #92 from python-discord/unittest_failures
Allow Unittest Failures
-rw-r--r--SCHEMA.md26
-rw-r--r--backend/models/__init__.py3
-rw-r--r--backend/models/question.py26
-rw-r--r--backend/routes/forms/submit.py57
-rw-r--r--backend/routes/forms/unittesting.py31
5 files changed, 124 insertions, 19 deletions
diff --git a/SCHEMA.md b/SCHEMA.md
index 9d89188..93be830 100644
--- a/SCHEMA.md
+++ b/SCHEMA.md
@@ -128,8 +128,13 @@ Textareas require no additional configuration.
"language": "python",
// An optinal mapping of unit tests
"unittests": {
- "unit_1": "unit_code()",
- ...
+ // Record a submission, even if the tests don't pass
+ // Default: false
+ "allow_failure": false,
+ "tests": {
+ "unit_1": "unit_code()",
+ ...
+ }
}
}
```
@@ -164,10 +169,25 @@ Textareas require no additional configuration.
| `_id`/`id` | MongoDB ObjectID | Random identifier used for the response |
| `user` | Optional [user details object](#user-details-object) | An object describing the user that submitted if the form is not anonymous |
| `antispam` | Optional [anti spam object](#anti-spam-object) | An object containing information about the anti-spam on the form submission |
-| `response` | Object | Object containing question IDs mapping to the users answer |
+| `response` | Object | Object containing question IDs mapping to the users answer* |
| `form_id` | String | ID of the form that the user is submitting to |
| `timestamp` | String | ISO formatted string of submission time. |
+
+&nbsp;* If the question is of type `code`, the response has the following structure:
+```json
+"response": {
+ "<QUESTION ID>": {
+ "value": "<USER CODE>",
+ "passed": bool,
+ "failures": ["<TEST NAME 1>", "<TEST NAME 4>", "<HIDDEN TEST 1>", ...]
+ },
+ ...
+}
+```
+* Values in `<>` are placeholders, while the rest are actual keys
+* `passed` is True only if all tests in the suite passed.
+
### User details object
The user details contains the information returned by Discord alongside an `admin` boolean key representing that the user has admin privileges. The information returned from Discord can be found in the [Discord Developer portal](https://discord.com/developers/docs/resources/user#user-object).
diff --git a/backend/models/__init__.py b/backend/models/__init__.py
index 29ccb24..8ad7f7f 100644
--- a/backend/models/__init__.py
+++ b/backend/models/__init__.py
@@ -2,13 +2,14 @@ from .antispam import AntiSpam
from .discord_user import DiscordUser
from .form import Form, FormList
from .form_response import FormResponse, ResponseList
-from .question import Question
+from .question import CodeQuestion, Question
__all__ = [
"AntiSpam",
"DiscordUser",
"Form",
"FormResponse",
+ "CodeQuestion",
"Question",
"FormList",
"ResponseList"
diff --git a/backend/models/question.py b/backend/models/question.py
index 7daeb5a..5a1334a 100644
--- a/backend/models/question.py
+++ b/backend/models/question.py
@@ -4,6 +4,28 @@ from pydantic import BaseModel, Field, root_validator, validator
from backend.constants import QUESTION_TYPES, REQUIRED_QUESTION_TYPE_DATA
+_TESTS_TYPE = t.Union[t.Dict[str, str], int]
+
+
+class Unittests(BaseModel):
+ """Schema model for unittest suites in code questions."""
+ allow_failure: bool = False
+ tests: _TESTS_TYPE
+
+ @validator("tests")
+ def validate_tests(cls, value: _TESTS_TYPE) -> _TESTS_TYPE:
+ """Confirm that at least one test exists in a test suite."""
+ if isinstance(value, dict) and len(value.keys()) == 0:
+ raise ValueError("Must have at least one test in a test suite.")
+
+ return value
+
+
+class CodeQuestion(BaseModel):
+ """Schema model for questions of type `code`."""
+ language: str
+ unittests: t.Optional[Unittests]
+
class Question(BaseModel):
"""Schema model for form question."""
@@ -49,4 +71,8 @@ class Question(BaseModel):
f"got {type(value['data'][key]).__name__} instead."
)
+ # Validate unittest options
+ if value.get("type").lower() == "code":
+ value["data"] = CodeQuestion(**value.get("data")).dict()
+
return value
diff --git a/backend/routes/forms/submit.py b/backend/routes/forms/submit.py
index c0a50f3..7229ee1 100644
--- a/backend/routes/forms/submit.py
+++ b/backend/routes/forms/submit.py
@@ -45,6 +45,18 @@ class PartialSubmission(BaseModel):
captcha: Optional[str]
+class UnittestError(BaseModel):
+ question_id: str
+ question_index: int
+ return_code: int
+ passed: bool
+ result: str
+
+
+class UnittestErrorMessage(ErrorMessage):
+ test_results: list[UnittestError]
+
+
class SubmitForm(Route):
"""
Submit a form with the provided form ID.
@@ -58,7 +70,8 @@ class SubmitForm(Route):
resp=Response(
HTTP_200=SubmissionResponse,
HTTP_404=ErrorMessage,
- HTTP_400=ErrorMessage
+ HTTP_400=ErrorMessage,
+ HTTP_422=UnittestErrorMessage
),
tags=["forms", "responses"]
)
@@ -168,16 +181,46 @@ class SubmitForm(Route):
if any("unittests" in question.data for question in form.questions):
unittest_results = await execute_unittest(response_obj, form)
- if not all(test.passed for test in unittest_results):
- # Return 500 if we encountered an internal error (code 99).
- status_code = 500 if any(
- test.return_code == 99 for test in unittest_results
- ) else 403
+ failures = []
+ status_code = 422
+
+ for test in unittest_results:
+ response_obj.response[test.question_id] = {
+ "value": response_obj.response[test.question_id],
+ "passed": test.passed
+ }
+
+ if test.return_code == 0:
+ failure_names = [] if test.passed else test.result.split(";")
+ elif test.return_code == 5:
+ failure_names = ["Could not parse user code."]
+ elif test.return_code == 6:
+ failure_names = ["Could not load user code."]
+ else:
+ failure_names = ["Internal error."]
+
+ response_obj.response[test.question_id]["failures"] = failure_names
+
+ # Report a failure on internal errors,
+ # or if the test suite doesn't allow failures
+ if not test.passed:
+ allow_failure = (
+ form.questions[test.question_index].data["unittests"]["allow_failure"]
+ )
+
+ # An error while communicating with the test runner
+ if test.return_code == 99:
+ failures.append(test)
+ status_code = 500
+
+ elif not allow_failure:
+ failures.append(test)
+ if len(failures):
return JSONResponse({
"error": "failed_tests",
"test_results": [
- test._asdict() for test in unittest_results if not test.passed
+ test._asdict() for test in failures
]
}, status_code=status_code)
diff --git a/backend/routes/forms/unittesting.py b/backend/routes/forms/unittesting.py
index 3854314..a830775 100644
--- a/backend/routes/forms/unittesting.py
+++ b/backend/routes/forms/unittesting.py
@@ -7,13 +7,15 @@ import httpx
from httpx import HTTPStatusError
from backend.constants import SNEKBOX_URL
-from backend.models import FormResponse, Form
+from backend.models import Form, FormResponse
with open("resources/unittest_template.py") as file:
TEST_TEMPLATE = file.read()
-UnittestResult = namedtuple("UnittestResult", "question_id return_code passed result")
+UnittestResult = namedtuple(
+ "UnittestResult", "question_id question_index return_code passed result"
+)
def filter_unittests(form: Form) -> Form:
@@ -23,8 +25,8 @@ def filter_unittests(form: Form) -> Form:
This is used to redact the exact tests when sending the form back to the frontend.
"""
for question in form.questions:
- if question.type == "code" and "unittests" in question.data:
- question.data["unittests"] = len(question.data["unittests"])
+ if question.type == "code" and question.data["unittests"] is not None:
+ question.data["unittests"]["tests"] = len(question.data["unittests"]["tests"])
return form
@@ -62,20 +64,32 @@ async def execute_unittest(form_response: FormResponse, form: Form) -> list[Unit
"""Execute all the unittests in this form and return the results."""
unittest_results = []
- for question in form.questions:
- if question.type == "code" and "unittests" in question.data:
+ for index, question in enumerate(form.questions):
+ if question.type == "code":
+
+ # Exit early if the suite doesn't have any tests
+ if question.data["unittests"] is None:
+ unittest_results.append(UnittestResult(
+ question_id=question.id,
+ question_index=index,
+ return_code=0,
+ passed=True,
+ result=""
+ ))
+ continue
+
passed = False
# Tests starting with an hashtag should have censored names.
hidden_test_counter = count(1)
hidden_tests = {
test.lstrip("#").lstrip("test_"): next(hidden_test_counter)
- for test in question.data["unittests"].keys()
+ for test in question.data["unittests"]["tests"].keys()
if test.startswith("#")
}
# Compose runner code
- unit_code = _make_unit_code(question.data["unittests"])
+ unit_code = _make_unit_code(question.data["unittests"]["tests"])
user_code = _make_user_code(form_response.response[question.id])
code = TEST_TEMPLATE.replace("### USER CODE", user_code)
@@ -119,6 +133,7 @@ async def execute_unittest(form_response: FormResponse, form: Form) -> list[Unit
unittest_results.append(UnittestResult(
question_id=question.id,
+ question_index=index,
return_code=return_code,
passed=passed,
result=result