From 18b8b6527cc156c982b883c95c9f2937cf938481 Mon Sep 17 00:00:00 2001 From: <> Date: Mon, 13 Nov 2023 23:01:29 +0000 Subject: [PATCH] Deployed 682496c with MkDocs version: 1.5.3 --- .nojekyll | 0 404.html | 1 + API/errors/index.html | 206 + API/gradescope_data_types/index.html | 509 ++ API/pipeline_support/index.html | 195 + API/problem/index.html | 460 ++ API/problem_extras/index.html | 99 + API/result_synthesizer/index.html | 497 ++ API/test_case_wrapper/index.html | 678 ++ API/test_cases/index.html | 1439 ++++ API/test_result/index.html | 408 + API/tester/index.html | 945 +++ API/utils/index.html | 309 + CNAME | 1 + Contributing/how_to_contribute/index.html | 1 + Tutorials/Detailed-Usage/index.html | 316 + Tutorials/Examples/index.html | 1 + Tutorials/Getting-Started/index.html | 53 + Tutorials/Test-Result-(Proxy)/index.html | 32 + Tutorials/Upload-Autograder/index.html | 13 + .../Various-Function-Protocols/index.html | 309 + Tutorials/gap_-Keywords/index.html | 202 + assets/_mkdocstrings.css | 64 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.81fa17fe.min.js | 29 + assets/javascripts/bundle.81fa17fe.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.f886a092.min.js | 42 + .../workers/search.f886a092.min.js.map | 7 + assets/stylesheets/main.4b4a2bd9.min.css | 1 + assets/stylesheets/main.4b4a2bd9.min.css.map | 1 + assets/stylesheets/palette.356b1318.min.css | 1 + .../stylesheets/palette.356b1318.min.css.map | 1 + css/api-styling.css | 51 + css/doc-icon-dark.svg | 4 + css/doc-icon.svg | 4 + index.html | 1 + objects.inv | Bin 0 -> 1748 bytes search/search_index.json | 1 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes 74 files changed, 14092 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 API/errors/index.html create mode 100644 API/gradescope_data_types/index.html create mode 100644 API/pipeline_support/index.html create mode 100644 API/problem/index.html create mode 100644 API/problem_extras/index.html create mode 100644 API/result_synthesizer/index.html create mode 100644 API/test_case_wrapper/index.html create mode 100644 API/test_cases/index.html create mode 100644 API/test_result/index.html create mode 100644 API/tester/index.html create mode 100644 API/utils/index.html create mode 100644 CNAME create mode 100644 Contributing/how_to_contribute/index.html create mode 100644 Tutorials/Detailed-Usage/index.html create mode 100644 Tutorials/Examples/index.html create mode 100644 Tutorials/Getting-Started/index.html create mode 100644 Tutorials/Test-Result-(Proxy)/index.html create mode 100644 Tutorials/Upload-Autograder/index.html create mode 100644 Tutorials/Various-Function-Protocols/index.html create mode 100644 Tutorials/gap_-Keywords/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.81fa17fe.min.js create mode 100644 assets/javascripts/bundle.81fa17fe.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.f886a092.min.js create mode 100644 assets/javascripts/workers/search.f886a092.min.js.map create mode 100644 assets/stylesheets/main.4b4a2bd9.min.css create mode 100644 assets/stylesheets/main.4b4a2bd9.min.css.map create mode 100644 assets/stylesheets/palette.356b1318.min.css create mode 100644 assets/stylesheets/palette.356b1318.min.css.map create mode 100644 css/api-styling.css create mode 100644 css/doc-icon-dark.svg create mode 100644 css/doc-icon.svg create mode 100644 index.html create mode 100644 objects.inv create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..44d8319 --- /dev/null +++ b/404.html @@ -0,0 +1 @@ +
This module contains the error classes used in the framework.
Bases: Exception
src/gapper/core/errors.py
Extract the traceback from the exception as a string.
Extract the user traceback from the exception.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
grader_path | str | None | The path to the grader file. | None |
src/gapper/core/errors.py
Extract the user traceback from the exception as a string.
src/gapper/core/errors.py
Format the arguments of the error message.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
indent_num | int | The number of spaces to indent the message. | 0 |
src/gapper/core/errors.py
Bases: ErrorFormatter
Raised when an internal error occurs in the framework.
src/gapper/core/errors.py
Bases: StudentError
Raised when no submission is loaded.
src/gapper/core/errors.py
Bases: StudentError
Raised when a submission has syntax errors.
src/gapper/core/errors.py
The JSON schemas for Gradescope's metadata.
dataclass
The JSON schema for Gradescope's assignment settings.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
due_date | datetime | The assignment's due date. | field(metadata=config(encoder=isoformat, decoder=fromisoformat, mm_field=DateTime(format='iso'))) |
group_size | Optional[int] | The maximum group size on a group assignment. | required |
group_submission | bool | Whether group submission is allowed. | required |
id | int | The assignment's ID. | required |
course_id | int | The course's ID. | required |
late_due_date | Optional[datetime] | The late due date, or None if late submission disallowed. | field(metadata=config(encoder=lambda : isoformat(s) if s else None, decoder=lambda : fromisoformat(s) if s else None, mm_field=DateTime(format='iso'))) |
release_date | datetime | The assignment's release date. | field(metadata=config(encoder=isoformat, decoder=fromisoformat, mm_field=DateTime(format='iso'))) |
title | str | The assignment's title. | required |
total_points | float | The total point value of the assignment. | field(metadata=config(encoder=str, decoder=float)) |
src/gapper/gradescope/datatypes/gradescope_meta.py
dataclass
The JSON schema for a 'user' (submitter) of a Gradescope assignment.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
email | str | The submitter's email. | required |
id | int | The submitter's ID. | required |
name | str | The submitter's name. | required |
src/gapper/gradescope/datatypes/gradescope_meta.py
dataclass
The JSON schema for a previous submission record.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
submission_time | datetime | The time of the previous submission. | field(metadata=config(encoder=isoformat, decoder=fromisoformat, mm_field=DateTime(format='iso'))) |
score | float | The previous submission's score. | required |
results | GradescopeJson | The results.json file from the previous submission. | required |
src/gapper/gradescope/datatypes/gradescope_meta.py
dataclass
The JSON schema for Gradescope's submission metadata.
See Also https://gradescope-autograders.readthedocs.io/en/latest/submission_metadata/
Parameters:
Name | Type | Description | Default |
---|---|---|---|
id | int | The submission's ID. | required |
created_at | datetime | The time the submission was created. | field(metadata=config(encoder=isoformat, decoder=fromisoformat, mm_field=DateTime(format='iso'))) |
assignment | GradescopeAssignmentMetadata | The assignment's metadata. | required |
submission_method | Literal['upload', 'GitHub', 'Bitbucket'] | The submission method. | required |
users | List[GradescopeAssignmentUser] | The submitters' metadata. | required |
previous_submissions | List[GradescopePreviousSubmission] | The previous submissions' metadata. | required |
src/gapper/gradescope/datatypes/gradescope_meta.py
classmethod
Load the submission metadata from a file.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
path | Path | The path to load the submission metadata from. | required |
src/gapper/gradescope/datatypes/gradescope_meta.py
The Gradescope grading output JSON schema.
dataclass
The JSON schema for Gradescope.
We currently don't support the leaderboard and extra_data features of the gradescope schema. Those are documented on the autograder documentation, here: https://gradescope-autograders.readthedocs.io/en/latest/specs/.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
tests | list[GradescopeTestJson] | The tests for the problem. Required if no global score provided. | field(default_factory=list) |
score | Optional[float] | The overall score. Required if any test has no set score. | None |
execution_time | Optional[int] | The execution time of all the tests, in seconds. | None |
output | Optional[str] | The top-level, human-readable text output for all the problems. | None |
visibility | VisibilityType | The default visibility for each test. Overridden by test-specific settings. | 'visible' |
stdout_visibility | Optional[str] | Whether to show stdout for the tests. Same options as for visibility. | None |
src/gapper/gradescope/datatypes/gradescope_output.py
62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 |
|
classmethod
from_test_results(results: List[TestResult], score: float, save_path: Path | None = None, **kwargs) -> GradescopeJson
+
Convert a list of test results to Gradescope JSON.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
results | List[TestResult] | The test results. | required |
score | float | The score obtained from the submission. | required |
save_path | Path | None | The path to save the Gradescope JSON to. | None |
kwargs | The keyword arguments to pass to the constructor. | {} |
src/gapper/gradescope/datatypes/gradescope_output.py
dataclass
The JSON schema for a single Test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
score | Optional[float] | The test's score. Required if no top-level score is set. | field(default=None) |
max_score | Optional[float] | The max score for the test. | field(default=None) |
name | Optional[str] | The test's name. | field(default=None) |
output | Optional[str] | Human-readable text output of the test. | field(default=None) |
tags | Optional[str] | Tags for the test. | field(default=None) |
visibility | VisibilityType | The test's visibility. "hidden", "visible", "after_due_date", "after_published" | field(default='visible') |
src/gapper/gradescope/datatypes/gradescope_output.py
Support for pipeline actions.
Bases: PipelineFactory
A pipeline action factory generating init calls.
src/gapper/core/pipeline_support.py
Bases: PipelineFactory
A pipeline action factory generating function calls.
src/gapper/core/pipeline_support.py
A pipeline action base class. All pipeline actions should inherit from this class.
src/gapper/core/pipeline_support.py
property
Whether to replace the piped object with the result of the pipeline action.
A pipeline base class.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
name | str | The name of the attribute to be extracted from the piped object. | required |
_pipeline_replace | bool | Whether to replace the piped object with the result of the pipeline. | False |
args | The arguments to pass to the attribute. | () |
src/gapper/core/pipeline_support.py
A pipeline action factory base class. All pipeline action factories should inherit from this class.
The semantics of a pipeline action factory is to generate a pipeline action when called. That is
pipeline_factory = PipelineFactory(name)
+pipeline_action = pipeline_factory(*args, **kwargs)
+
+@test_case(
+ pipeline_action,
+ pipeline_factory(*args, **kwargs),
+)
+...
+
Attributes:
Name | Type | Description |
---|---|---|
ValueType | Type[PipelineBase] | The type of the pipeline action generated by the factory. |
src/gapper/core/pipeline_support.py
Bases: PipelineBase
A pipeline action factory generating property lookups.
src/gapper/core/pipeline_support.py
@problem
-- Problem
Problem
And problem
APIThis module defines the Problem class, which is the abstract representation of a assignment problem.
Bases: ModuleLoader
, Generic[ProbInputType, ProbOutputType]
A abstract representation of a assignment problem.
src/gapper/core/problem/problem_def.py
34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 |
|
property
The expected name of the submission.
Create a problem object.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
solution | Callable[ProbInputType, ProbOutputType] | The solution to the problem. | required |
config | ProblemConfig | The configuration of the problem. | required |
src/gapper/core/problem/problem_def.py
Add a post test to the problem.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
post_test | PostTest | The post test to add. | required |
src/gapper/core/problem/problem_def.py
Add a test parameter to the problem.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
test_param | TestParam | The test parameter to add. | required |
src/gapper/core/problem/problem_def.py
classmethod
Load a problem from a path.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
path | Path | The path to load the problem from. If the path is a directory, it will be searched recursively. But only one problem can be defined in a directory. | required |
src/gapper/core/problem/problem_def.py
problem(*, is_script: bool = False, check_stdout: Optional[bool] = None, mock_input: Optional[bool] = None, context: Iterable[str] = (), easy_context: bool = False) -> Callable[[Callable[ProbInputType, ProbOutputType]], Problem[ProbInputType, ProbOutputType]]
+
Create a problem object.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
is_script | bool | Whether this problem is a script. This cannot coexist with check_stdout or mock_input. | False |
check_stdout | Optional[bool] | Whether to check the stdout of the solution. | None |
mock_input | Optional[bool] | Whether to mock the input of the solution. | None |
context | Iterable[str] | The context to capture from the submission. | () |
easy_context | bool | Whether to use context directly in gap override tests. | False |
src/gapper/core/problem/problem_def.py
Problem
Configs APIdataclass
Problem configuration.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
check_stdout | bool | Whether to check the stdout of the solution. | False |
mock_input | bool | Whether to mock the input of the solution. | False |
captured_context | Iterable[str] | The context to capture from the submission. | () |
is_script | bool | Whether this problem is a script. | False |
easy_context | bool | Whether to use context directly in gap override tests. | False |
src/gapper/core/problem/problem_config.py
gs_connect
Build the connect arguments.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
url_or_cid | str | The url when aid is not specified, or the course id of the Gradescope assignment. The format of the url should be https://www.gradescope.com/courses/ | required |
aid | str | None | The assignment id of the Gradescope assignment. It should be specified when url_or_cid is a cid. | None |
src/gapper/core/problem/extras/gradescope_connect.py
Connect a problem to a Gradescope assignment.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
url_or_cid | str | The course id of the Gradescope assignment, or the url when aid is not specified. The format of the url should be https://www.gradescope.com/courses/ | required |
aid | str | None | The assignment id of the Gradescope assignment. | None |
src/gapper/core/problem/extras/gradescope_connect.py
ResultSynthesizer
This module contains a class to synthesize the results from a tester.
src/gapper/core/result_synthesizer.py
18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 |
|
__init__(*, results: List[TestResult] | None = None, metadata: GradescopeSubmissionMetadata | None = None, total_score: float | None = None) -> None
+
A class to synthesize the results from a tester.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
results | List[TestResult] | None | The results of the tester. | None |
metadata | GradescopeSubmissionMetadata | None | The metadata of the submission. | None |
total_score | float | None | The total score of the assignment. | None |
src/gapper/core/result_synthesizer.py
Synthesize the score from the results.
staticmethod
Synthesize the score from the results.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
results | List[TestResult] | The results to synthesize the score from. | required |
total_score | float | The total score of the assignment. | required |
src/gapper/core/result_synthesizer.py
64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 |
|
Convert the results to Gradescope JSON.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
save_path | Path | None | The path to save the Gradescope JSON to. | None |
kwargs | The keyword arguments to pass to the GradescopeJson constructor. | {} |
src/gapper/core/result_synthesizer.py
TestCaseWrapper
This module contains the TestCaseWrapper class, and related help definitions.
Bases: TestCase
A wrapper for the unittest.TestCase class.
This serves as a proxy for the testing process to get useful information about the test and functions for testing
src/gapper/core/unittest_wrapper.py
52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 |
|
Create a test case wrapper.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
test_param | TestParam | The test parameter to be used in testing. | required |
problem | Problem | The problem definition to be used in testing. | required |
src/gapper/core/unittest_wrapper.py
Check if the test passes against the gap_expect and gap_expect_stdout.
Returns:
Type | Description |
---|---|
Tuple[bool, Any, str] | None | True if the test passes, False if the test fails, None if the test is skipped. |
src/gapper/core/unittest_wrapper.py
Load the submission context into the test case.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
context | ContextManager | The context to load. | required |
src/gapper/core/unittest_wrapper.py
Load the submission metadata into the test case.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
metadata | GradescopeSubmissionMetadata | None | The metadata to load. The metadata could be None. | required |
src/gapper/core/unittest_wrapper.py
Run the test on the submission.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
submission | Any | The submission to be tested. | required |
result | TestResult | The result object to be used and written to. | required |
Returns:
Type | Description |
---|---|
TestResult | The result object passed to this method. |
src/gapper/core/unittest_wrapper.py
TestParam
and TestParamBundle
test_case
and param
are aliases of TestParam
, and test_cases
is a alias of TestParamBundle
respectively.
from gapper import test_case, param
+from gapper.core.test_parameter import TestParam
+
+assert param is TestParam
+assert test_case is TestParam
+
from gapper import test_cases
+from gapper.core.test_parameter import TestParamBundle
+
+assert test_cases is TestParamBundle
+
This module contains the test case (parameter) support classes and functions.
Bases: Enum
Reserved keywords for gap.
src/gapper/core/test_parameter.py
A class to extract the gap test parameter.
src/gapper/core/test_parameter.py
Initialize the gap test parameter.
staticmethod
Check if there are any residue gap kwargs.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
kwargs | Dict[str, Any] | The keyword arguments to check. | required |
src/gapper/core/test_parameter.py
Update the gap kwargs with a set of kwargs.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
kwargs | Any | The keyword arguments to be pushed into the param_info. | {} |
Bases: ParamExtractor
A class to represent a test case (parameter). Will be used as @test_case() decorator.
Attributes:
Name | Type | Description |
---|---|---|
pipeline | partial[TestParam] | specify test case using pipeline actions. |
src/gapper/core/test_parameter.py
134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 |
|
Initialize the gap test parameter (test_case).
Parameters:
Name | Type | Description | Default |
---|---|---|---|
args | Any | The arguments for the test parameter. | () |
kwargs | Any | The keyword arguments for the test parameter, including kwargs. | {} |
src/gapper/core/test_parameter.py
classmethod
Bind the gap kwargs to the test case.
Format the test parameter.
src/gapper/core/test_parameter.py
A class to represent a test parameter bundle (test_cases). Will be used as @test_cases() decorator.
Attributes:
Name | Type | Description |
---|---|---|
params | partial[TestParamBundle] | specify test cases using parameters. |
param_iter | partial[TestParamBundle] | specify test cases using parameter iterators. |
singular_params | partial[TestParamBundle] | specify test cases using singular parameters. |
singular_param_iter | partial[TestParamBundle] | specify test cases using singular parameter iterators. |
zip | partial[TestParamBundle] | specify test cases using zip. |
product | partial[TestParamBundle] | specify test cases using product. |
src/gapper/core/test_parameter.py
288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 |
|
Generate the test cases as a decorator.
src/gapper/core/test_parameter.py
__init__(*args: Iterable[Any] | Any, gap_product: bool = False, gap_zip: bool = False, gap_params: bool = False, gap_param_iter: bool = False, gap_singular_params: bool = False, gap_singular_param_iter: bool = False, **kwargs: Any) -> None
+
Initialize the test parameter bundle (test_cases).
Parameters:
Name | Type | Description | Default |
---|---|---|---|
args | Iterable[Any] | Any | The arguments for the test parameter bundle. | () |
gap_product | bool | Whether to take the cartesian product of the arguments. .. deprecated:: Use params, param_iter, singular_params, singular_param_iter instead. | False |
gap_zip | bool | Whether to zip the arguments. .. deprecated:: Use params, param_iter, singular_params, singular_param_iter instead. | False |
gap_params | bool | Whether to parse the arguments as parameters. | False |
gap_param_iter | bool | Whether to parse the argument as parameter iterators. | False |
gap_singular_params | bool | Whether to parse the arguments as singular parameters. | False |
gap_singular_param_iter | bool | Whether to parse the arguments as singular parameter iterators. | False |
kwargs | Any | The keyword arguments for the test parameter bundle. .. seealso:: :class: | {} |
src/gapper/core/test_parameter.py
354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 |
|
staticmethod
Add gap_kwargs to the finalized parameters.
src/gapper/core/test_parameter.py
classmethod
Bind the gap kwargs to the test cases.
staticmethod
Parse the parameters for param sequence.
src/gapper/core/test_parameter.py
staticmethod
Parse the parameters for param sequence.
src/gapper/core/test_parameter.py
staticmethod
parse_zip_or_product(*args: Iterable[Any], gap_product: bool = False, gap_zip: bool = False, **kwargs: Any) -> List[TestParam]
+
Parse parameters for zip or product.
:deprecated: Use params, param_iter, singular_params, singular_param_iter instead.
src/gapper/core/test_parameter.py
TestResult
-- Test Result ProxyThis module contains the test result class.
dataclass
src/gapper/core/test_result.py
13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 |
|
property
The name of the test, with the default name prepended if the name is unset.
property
The description output of the test, with the score and max score appended if set.
Add a description to the test.
New descriptions are added as newlines to the end of the existing descriptions.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
detail | str | The description to add. | () |
src/gapper/core/test_result.py
Add an error to the test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
error | ErrorFormatter | The error to add. | required |
set_failed | bool | Whether to set the pass status to failed. | True |
src/gapper/core/test_result.py
Set the description of the test.
This overrides all the existing descriptions.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
detail | Iterable[str] | The description to set. | required |
src/gapper/core/test_result.py
Set the extra points of the test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
score | float | None | The extra points of this test case to set. | required |
Set the hidden status of the test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
hidden | bool | Whether the test is hidden. | required |
Set the max score of the test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
max_score | float | None | The max score of this test case to set. | required |
Set the pass status of the test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
status | PassStateType | The pass status to set. | required |
Set the score of the test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
score | float | The score to set. | required |
Set the weight of the test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
weight | int | None | The weight of this test case to set. | required |
Tester
and post_test
post_test
is an alias of PostTest
. That is
from gapper import post_test
+from gapper.core.tester import PostTest
+
+assert post_test is PostTest
+
This module contains the definition of the tester class.
Bases: ModuleLoader
, Generic[ProbInputType, ProbOutputType]
src/gapper/core/tester/tester_def.py
43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 |
|
property
writable
The problem to be tested.
property
The context of captured from the submission.
Create a tester object.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
problem | Problem[ProbInputType, ProbOutputType] | The problem to be tested. | required |
src/gapper/core/tester/tester_def.py
Check if the context is complete against what's required in the problem.
src/gapper/core/tester/tester_def.py
Dump the tester to a file.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
path | Path | str | The path to dump the tester to. | required |
src/gapper/core/tester/tester_def.py
classmethod
Load a tester from a file.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
path | Path | The path to load the tester from. | required |
src/gapper/core/tester/tester_def.py
Load the context from a module.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
md | ModuleType | The module to load the context from. | required |
Raises:
Type | Description |
---|---|
MultipleContextValueError | If multiple context values are found. |
src/gapper/core/tester/tester_def.py
Load the submission from a path.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
path | Path | The path to load the submission from. If the path is a directory, it will be searched recursively. | required |
Raises:
Type | Description |
---|---|
NoSubmissionError | If no submission is found. |
MultipleSubmissionError | If multiple submissions are found. |
src/gapper/core/tester/tester_def.py
Run the tests.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
metadata | GradescopeSubmissionMetadata | None | The metadata of the submission, which could be None. | None |
src/gapper/core/tester/tester_def.py
run_post_tests(*, results: List[TestResult], metadata: GradescopeSubmissionMetadata | None) -> List[TestResult]
+
Run the post tests.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
results | List[TestResult] | The results of the tests. | required |
metadata | GradescopeSubmissionMetadata | None | The metadata of the submission, which could be None. | required |
src/gapper/core/tester/tester_def.py
PostTest
and post_test
APIA decorator for post tests. Will be used as @post_test() decorator.
src/gapper/core/tester/post_test_hook.py
Add the post test to the problem.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
problem | Problem | The problem to add the post test to. | required |
A decorator for specifying post tests. Will be used as @post_test().
from gapper import post_test, problem
+
+@post_test()
+@problem()
+...
+
Parameters:
Name | Type | Description | Default |
---|---|---|---|
post_test_fn | PostTestFn | The function to be called after all tests are run. | required |
as_test_case | bool | Whether to treat the post test as a test case. If this is set to True, the post test will incur a TestResult instance to be created and be added to the pool of all test results after the post testing phrase is completed. The test result will then be used to synthesize the score. If this is set to False, the post test will not incur a TestResult instance. | True |
src/gapper/core/tester/post_test_hook.py
run(test_results: List[TestResult], result_proxy: TestResult | None, metadata: GradescopeSubmissionMetadata | None) -> TestResult | None
+
Run the post test.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
test_results | List[TestResult] | The results of the tests. | required |
result_proxy | TestResult | None | The proxy of the post test result. | required |
metadata | GradescopeSubmissionMetadata | None | The metadata of the submission, which could be None. | required |
src/gapper/core/tester/post_test_hook.py
Utility functions and classes for the core module.
Bases: Protocol
The custom test function protocol.
src/gapper/core/utils.py
The function type to be called for custom tests.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
param | TestCaseWrapper | The TestCaseWrapper instance. It contains the test case information, including the test case name, the test case parameters, etc. | required |
result_proxy | TestResult | The TestResult instance of this custom test to be used as a proxy. You can use this proxy to affect the test result of this test case. See .. seealso:: :class: | required |
solution | T | The expected result, which will be the solution under the @problem decorator | required |
submission | T | The actual result, which will be the submission from the student | required |
Raises:
Type | Description |
---|---|
AssertionError | It should raise assertion error if test fails. |
src/gapper/core/utils.py
Bases: Protocol
The custom equality check function protocol.
src/gapper/core/utils.py
The function type to be called for custom equality checks.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
expected | T | The expected result, which will be executed result of the solution | required |
actual | T | The actual result, which will be the executed result of the submission | required |
msg | str | None | The message to be printed if the equality check fails. | None |
Raises:
Type | Description |
---|---|
AssertionError | It should raise assertion error if the equality check tails |
src/gapper/core/utils.py
Bases: Protocol
The post check function protocol.
src/gapper/core/utils.py
__call__(param: TestCaseWrapper, result_proxy: TestResult, solution: T, submission: T, expected_results: Tuple[Any, str | None], actual_results: Tuple[Any, str | None]) -> None
+
The function type to be called for post checks all the equality check of a test case.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
param | TestCaseWrapper | The TestCaseWrapper instance. It contains the test case information, including the test case name, the test case parameters, etc. | required |
result_proxy | TestResult | The TestResult instance of this custom test to be used as a proxy. You can use this proxy to affect the test result of this test case. See .. seealso:: :class: | required |
solution | T | The expected result, which will be the solution under the @problem decorator | required |
submission | T | The actual result, which will be the submission from the student | required |
expected_results | Tuple[Any, str | None] | The expected results of the test case. It is a tuple of expected execution result and expected stdout result. | required |
actual_results | Tuple[Any, str | None] | The actual results of the test case. It is a tuple of actual execution result and actual stdout result. | required |
Raises:
Type | Description |
---|---|
AssertionError | It should raise assertion error if the post check fails. |
src/gapper/core/utils.py
Bases: Protocol
src/gapper/core/utils.py
__call__(test_results: List[TestResult], test_proxy: TestResult | None, metadata: GradescopeSubmissionMetadata | None) -> None
+
The function type to be called after all tests are run.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
test_results | List[TestResult] | A list of test results from tested test cases. Note that, the number of results will remain the same through the post testing phrase, even though you have post tests with as_test_case set to True. The results from post tests will not be added until the post testing phrase is completed. | required |
test_proxy | TestResult | None | The TestResult instance of this post test to be used as a proxy. If the post_test's as_test_case is set to False, this will be None. | required |
metadata | GradescopeSubmissionMetadata | None | The metadata of the submission. | required |
src/gapper/core/utils.py
gapper
welcomes everyone to contribute to the project. There are many ways to contribute. You can open up issues when you find bugs or have feature requests. You can also contribute code to the project. This document contains the guidelines for contributing to the project.
We assume you have Python>=3.12
and poetry
is installed on your computer.
git clone https://github.com/<your_user_name>/gapper.git
.poetry install
.pre-commit install --hook-type pre-commit --hook-type pre-push
. See Code Style.pytest tests
. See Testing.--ignore-glob=*integration_test.py
flag to your pytest
command. If you have pre-commit hooks installed, you're good to go. The hooks will automatically check your code style and notify you if something went wrong.
If you're using any kind of IDEs, please install the corresponding plugins from the instructions above.
We are currently not checking document strings and this will be enforced in the future.
gapper
uses ruff
as the formatter and linter. The configuration is encoded in the pyproject.toml
located under the root of this project. You should run ruff src tests
to lint the style of your code, and use ruff format src tests
to format your code. You might be prompt to use the --fix
flag to auto-fix some of the problems, and please do when you find so.
Testing is done using pytest
and several extensions include pytest-mock
, pytest-asyncio
, and pytest-cov
.
As mentioned above, some integrations can be slow to test, including CLI testing and GUI testing. You can find those integrations by searching *integration_test.py
in the project directory.
The pre-commit hook ignores integration tests when you commit, but checks them when you push. This helps us do quick commits without waiting for testing while ensuring code qualities. This implies two things:
We will discuss the detailed usage of gapper
, including installation, CLI commands, how to create a problem, and how to construct test cases.
The python version required is >=3.12.0
.
You can either install from PyPI
or install from source
You need a solution to the assignment for which you'd like to create a autograder, no matter it being a function, or a class.
If you want a brief of the whole process, please refer to the workflow brief in the home page.
Once gapper
is installed, you can invoke it through either gap
, gapper
, or gradescope-autograder-packer
.
❯ gapper --help
+
+Usage: gapper [OPTIONS] COMMAND [ARGS]...
+
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --install-completion [bash|zsh|fish|powershell|pwsh] Install completion for the specified shell. │
+│ [default: None] │
+│ --show-completion [bash|zsh|fish|powershell|pwsh] Show completion for the specified shell, to copy it or │
+│ customize the installation. │
+│ [default: None] │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Commands ──────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ check Check if the problem is defined correctly again the gap_check fields. │
+│ gen Generate the autograder for a problem. │
+│ login Login to Gradescope. │
+│ run Run the autograder on an example submission. │
+│ run-in-prod Run the autograder in production mode. │
+│ upload │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper check --help
+
+ Usage: gapper check [OPTIONS] PATH
+
+ Check if the problem is defined correctly again the gap_check fields.
+
+╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ * path PATH The path to the problem python file. [default: None] [required] │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --auto-inject -a Whether to auto inject the tester file. [default: (dynamic)] │
+│ --inject -i PATH The path to the tester file to inject. [default: (dynamic)] │
+│ --verbose -v Whether to run in verbose mode. │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper gen --help
+
+ Usage: gapper gen [OPTIONS] PATH
+
+ Generate the autograder for a problem.
+
+╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ * path PATH The path to the problem python file. [default: None] [required] │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --save-path -s PATH The directory to save the generated tester file. [default: (dynamic)] │
+│ --auto-inject -a Whether to auto inject the tester file. [default: (dynamic)] │
+│ --inject -i PATH The path to the tester file to inject. [default: (dynamic)] │
+│ --confirm-overwrite -y Confirm overwrite files. │
+│ --verbose -v Whether to run in verbose mode. │
+│ --upload -u Whether to upload the autograder. │
+│ --gui -g Whether to use the GUI to upload. │
+│ --login-save-path -l PATH The path to save the login info. │
+│ [default: /Users/flicker_soul/.config/gapper/gs_account.yaml] │
+│ --ui-debug -d Whether to run in verbose mode. │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper login --help
+
+ Usage: gapper login [OPTIONS]
+
+ Login to Gradescope.
+
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --confirm-store -s Confirm storing your login info. │
+│ --confirm-overwrite -y Confirm overwrite files. │
+│ --login-save-path -l PATH The path to save the login info. │
+│ [default: /Users/flicker_soul/.config/gapper/gs_account.yaml] │
+│ --verbose -v Whether to run in verbose mode. │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper run --help
+
+ Usage: gapper run [OPTIONS] PATH SUBMISSION
+
+ Run the autograder on an example submission.
+
+╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ * path PATH The path to the problem python file. [default: None] [required] │
+│ * submission PATH The path to the submission file. [default: None] [required] │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --metadata -m FILE The path to the submission metadata file. [default: (dynamic)] │
+│ --auto-inject -a Whether to auto inject the tester file. [default: (dynamic)] │
+│ --inject -i PATH The path to the tester file to inject. [default: (dynamic)] │
+│ --verbose -v Whether to run in verbose mode. │
+│ --total-score FLOAT [default: 20] │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper run-in-prod --help
+
+ Usage: gapper run-in-prod [OPTIONS] [TESTER_PATH] [SUBMISSION_DIR]
+ [METADATA_FILE] [OUTPUT_FILE]
+
+ Run the autograder in production mode.
+
+╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ tester_path [TESTER_PATH] The path to the tester pickle file. [default: /autograder/source/tester.pckl] │
+│ submission_dir [SUBMISSION_DIR] The path to the submission directory. [default: /autograder/submission] │
+│ metadata_file [METADATA_FILE] The path to the submission metadata file. │
+│ [default: /autograder/submission_metadata.json] │
+│ output_file [OUTPUT_FILE] The path to the output file. [default: /autograder/results/results.json] │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --verbose -v Whether to run in verbose mode. [default: True] │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper upload --help
+
+ Usage: gapper upload [OPTIONS] COMMAND [ARGS]...
+
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Commands ──────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ gui Upload an autograder to Gradescope with GUI. │
+│ ids Upload an autograder to Gradescope using the cid and aid. │
+│ url Upload an autograder to Gradescope using the assignment url. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper upload gui --help
+
+ Usage: gapper upload gui [OPTIONS] AUTOGRADER_PATH
+
+ Upload an autograder to Gradescope with GUI.
+
+╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ * autograder_path PATH The path to the autograder zip file. [default: None] [required] │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --login-save-path -l PATH The path to save the login info. │
+│ [default: /Users/flicker_soul/.config/gapper/gs_account.yaml] │
+│ --ui-debug -d Whether to run in verbose mode. │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper upload ids --help
+
+ Usage: gapper upload ids [OPTIONS] AUTOGRADER_PATH [CID] [AID]
+
+ Upload an autograder to Gradescope using the cid and aid.
+
+╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ * autograder_path PATH The path to the autograder zip file. [default: None] [required] │
+│ cid [CID] The course id. [default: None] │
+│ aid [AID] The assignment id. [default: None] │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --login-save-path -l PATH The path to save the login info. │
+│ [default: /Users/flicker_soul/.config/gapper/gs_account.yaml] │
+│ --ui-debug -d Whether to run in verbose mode. │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
❯ gapper upload url --help
+
+ Usage: gapper upload url [OPTIONS] AUTOGRADER_PATH ASSIGNMENT_URL
+
+ Upload an autograder to Gradescope using the assignment url.
+
+╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ * autograder_path PATH The path to the autograder zip file. [default: None] [required] │
+│ * assignment_url TEXT The url to the autograder. [default: None] [required] │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ --login-save-path -l PATH The path to save the login info. │
+│ [default: /Users/flicker_soul/.config/gapper/gs_account.yaml] │
+│ --ui-debug -d Whether to run in verbose mode. │
+│ --help Show this message and exit. │
+╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
+
assignment
refers to the assignment issued to students, and is often created as one GradeScope assignment entry. solution
refers to the code that solves the assignment. problem
refers to the definition of an assignment in the autograder. It can be created with the @problem
decorator. test case
refers to one test entry in the GradeScope assignment entry. It can be created with the @test_case
decorator. test cases
refer to a group of test case
s. They can be created in batch with the @test_cases
decorator. We first import problem
from gapper
and apply it as an decorator. Examples are provided at the end of this section.
The problem
has two over loads:
check_stdout: Optional[bool] = None
+mock_input: Optional[bool] = None
+context: Iterable[str] = ()
+
is_script
is used to indicate if the assignment is a script, which is something like the following
is_script
to False
, the @problem()
decorator treat decorated entity without extra interpretation. check_stdout
asks the autograder to compare stdout output (e.g. from the print
function)
mock_input
feeds test case arguments into input
call when the submission is run.
context
is used to capture variables in submissions. In the following example, the GasStation
in student's submission will become available to the autograder.
class GasStation:
+ """A gas station implementation."""
+
+@problem(context=['GasStation'])
+class Car:
+ """The car uses GasStation."""
+
Problem:
Submission:@problem(is_script=True) +def distance(): + x1 = float(input("Location x-coordinate? ")) + y1 = float(input("Location y-coordinate? ")) + x2 = float(input("Classroom x-coordinate? ")) + y2 = float(input("Classroom y-coordinate? ")) + dx = x2-x1 + dy = y2-y1 + d = (dx*dx+dy*dy)**0.5 + print("Distance:") + print(d) +
Problem:
Submission:
Problem:
Submission:
You can import test_case
and test_cases
to help the generation of tests. The two helpers are treated as decorators and should be applied after the @problem()
decorator. For example,
from gapper import problem, test_case, test_cases, param
+
+@test_cases.param_iter((i, i + 1) for i in range(10))
+@test_cases.params([1, 2], param(1, b=1), param(1, b=3))
+@test_case(1, b=4)
+@problem()
+def fn(a, b):
+ ...
+
Given a function def fn()
, arguments specified in @test_case()
will be unfolded to parameters of fn when testing. That is, for instance, the input of a
, args
, kw=1
, and kwargs
in @test_case(a, *args, kw=1, **kwargs)
will result in fn(a, *args, kw=1, **kwargs)
when testing.
When using @test_cases()
one has to choose a flavor of test_cases
before proceeding. The options currently are params
, param_iter
, singular_params
, and singular_param_iter
. To use choose the option, one specify by using @test_cases.<option>()
. For example, @test_cases.params()
. Depending on the option, you can usually pass either Iterable
s or param
s as arguments to the decorator @test_cases()
. For example, @test_cases.params([1, 2], param(3, b=4))
. Note that param
is the preferred way to define test cases since it is equivalent to @test_case
semantically.
The following is the explanation of the effect of each option.
params
takes in any number of Sequence
or param
. Each Sequence
or param
is equivalent to specifying a @test_case()
. For example, @test_cases.params([1, 2], param(1, b=1))
is equivalent to specifying two tests cases, @test_case(1, 2)
and @test_case(1, b=1)
. param_iter
takes in a Iterable
object of Sequence
or param
. @test_cases.param_iter(iter)
is equivalent to @test_cases.params(*iter)
. For example, singular_params
is similar to params
except it does not unfold Sequence
like params
. That is, @test_cases.singular_params([1, 2], param(1, b=1))
is equivalent to specifying two tests cases, @test_case([1, 2])
and @test_case(1, b=1)
. - singular_param_iter
is similar to param_iter
. @test_cases.singular_param_iter(iter)
is equivalent to @test_cases.singular_params(*iter)
. You can configure test cases' properties by using keyword arguments start with gap_
. For each test case, the supported options are
gap_expect: The expected output of the test case.
+gap_expect_stdout: The expected stdout of the test case.
+gap_hidden: Whether the test case is hidden.
+gap_name: The name of the test case.
+gap_extra_points: The extra credit of the test case.
+gap_override_check: The custom equality check function.
+gap_override_test: The custom test function.
+gap_description: The description of the test case.
+gap_is_pipeline: Whether the test case is a pipeline.
+gap_max_score: The max score of the test case.
+gap_weight: The weight of the test case.
+
We will dedicate a page to discuss their usages. gap_ Keywords
You can notice that the @test_case
and @test_cases
decorators take in parameters that should be passed into the function under test.
from gapper import problem, test_case, test_cases
+
+@test_cases.params([5, 6], [7, 8]) # test_cases is a decorator that takes in a list of test cases
+@test_case(3, 4) # test_case is a decorator that takes in a single test case
+@test_case(1, 2) # they together generate 4 tests, where the parameters are
+@problem() # x=1,y=2; x=3,y=4; x=5,y=6; x=7,y=8
+def add(x: int, y: int) -> int:
+ return x + y
+
The following are several ways to specify test cases.
This is how you can specify a test cases with one iterable parameter.
from gapper import problem, test_cases, param
+from typing import Iterable, Generator
+import random
+
+def randomly_generate_numbers(times: int) -> Generator[param, None, None]:
+ for _ in range(times):
+ yield param([random.randint(0, 100) for _ in range(random.randint(0, 100))])
+
+@test_cases.param_iterm(randomly_generate_numbers(10), gap_max_score=1) # the first two lines have the same semantics, which is creating
+@test_cases.params(*randomly_generate_numbers(10), gap_max_score=1) # 10 random generated numbers, each worth 1 point
+@test_cases.params(param([1, 2]), param([3, 4], gap_max_score=2)) # `param` is a helper that allows you to specify parameters, in a more
+@test_cases.params([[5, 6]], [[7, 8]], gap_hidden=[True, False]) # readable way. This problem has 6 test cases, where the parameters
+@problem() # are [1,2]; [3,4]; [5,6]; [7,8]. The three ways of specifying
+def sum_many(args: Iterable[int]) -> int: # parameters are equivalent. Note that @test_cases.params([5, 6], [7, 8])
+ return sum(args) # doesn't work because will treat [x, y] as two parameters instead of a list.
+
This is how you can specify a test cases with keyword arguments.
from gapper import problem, test_cases, test_case, param
+
+@test_cases(param(0, x = 1, y = 2), param(3, x = 4, y = 5)) # You can also specify kwargs in the param or test_case
+@test_case(6, x = 7, y = 8) # decorator. Note that using param is the only way to
+@test_case(9, x = 10) # specify kwargs in test_cases.
+@problem()
+def add(a: int, x: int, y: int = 20) -> int:
+ return a * x + y
+
This is how you can override the equality check between the solution and the submission.
from gapper import problem, test_cases, test_case
+from typing import Iterable
+
+def override_check(solution_ans, submission_ans) -> bool:
+ return set(solution_ans) == set(submission_ans)
+
+@test_cases(11, 12, 13, gap_override_check=override_check)
+@test_case(10, gap_override_check=override_check)
+@problem()
+def generate_numbers(x: int) -> Iterable[int]:
+ return range(x)
+
This is how you can override how the submission should be tested.
from gapper import problem, test_case, test_cases
+from gapper.core.unittest_wrapper import TestCaseWrapper
+from gapper.core.test_result import TestResult
+
+def override_test(tc: TestCaseWrapper, result: TestResult, solution, submission):
+ solution_answer = solution(*tc.test_param.args)
+ student_answer = submission(*tc.test_param.args)
+ tc.assertEqual(solution_answer, student_answer)
+
+ result.set_pass_status("failed")
+
+
+@test_cases([3, 4], [5, 6], gap_override_test=override_test)
+@test_case(1, 2, gap_override_test=override_test)
+@problem()
+def add(x: int, y: int) -> int:
+ if x < 0 or y < 0:
+ raise ValueError("x and y must be positive")
+ return x + y
+
Welcome to gapper
wiki. Here you can find how to use gapper and how to contribute to gapper. This project is inspired by aga and some core code is took from my contribution to the aga
project.
If you're looking for API references, please visit this page
gapper
And Who Might Find It Helpful.gapper
is created to facilitate creating autograders for the GradeScope platform. The official tool recommended by GradeScope is gradescope-utils
. However, this tool requires users to write cumbersome unittest
test cases like the following:
class TestSimpleArithmetic(unittest.TestCase):
+ def setUp(self):
+ self.calc = Calculator()
+
+ @weight(1)
+ def test_eval_add(self):
+ """Evaluate 1 + 1"""
+ val = self.calc.eval("1 + 1")
+ self.assertEqual(val, 2)
+
Considering professors and teaching assistants usually provide their solutions to students, we created gapper
to help them create graders directly and easily from the solutions, without writing boilerplates and test cases from the ground up. For example, the code above can be expressed as the following with the help from gapper
:
from gapper import problem, test_case
+from gapper.core.pipeline_support import Constructor, Function, Property
+
+init = Constructor()
+eval = Function("eval")
+
+@test_case.pipeline(init(), eval("1 + 1"))
+@problem()
+class Calculator:
+ """The calculator solution."""
+
Note that we designed gapper
to handle simple workflow, such as testing a single function, a single class, etc. You can adapt gapper
to more complicated workflow by overriding test processes using gap_override_test
which will be covered in a separate post.
If you're interested, please check out the following workflow brief.
Suppose you are creating a autograder for the following Python solution:
# print_digits.py
+def print_digits(n: int) -> None:
+ print(n % 10)
+ if n >= 10:
+ print_digits(n // 10)
+
First, you need to install gapper
by running pip install gapper
in your terminal (the minimal Python version is 3.12.0). Once it's installed, you can import problem
from gapper
and invoke it as a decorator, like the following. This will transform the solution into a problem operated by the autograder. The check_stdout
flag instructs the autograder to check stdout
output from the print
function.
# print_digits.py
+from gapper import problem
+
+@problem(check_stdout=True)
+def print_digits(n: int) -> None:
+ print(n % 10)
+ if n >= 10:
+ print_digits(n // 10)
+
Suppose you want to create 10 tests, each worth 1 point. In addition, 4 of them are hand written and others are randomly generated. You can import test_case
and test_cases
from gapper
, and invoke them as you do with the problem
directive. The following is an example.
# print_digits.py
+from gapper import problem, test_case, test_cases
+import random
+
+
+@test_cases.param_iter(
+ ([random.randint(100, 10000)] for _ in range(3)), gap_max_score=1, gap_hidden=True
+)
+@test_cases.param_iter(
+ ([random.randint(100, 10000)] for _ in range(3)), gap_max_score=1
+)
+@test_case(1, gap_max_score=1)
+@test_case(1234, gap_max_score=1)
+@test_case(3731, gap_max_score=1)
+@test_case(7, gap_max_score=1)
+@problem(check_stdout=True)
+def print_digits(n: int) -> None:
+ print(n % 10)
+ if n >= 10:
+ print_digits(n // 10)
+
You can then in the command line invoke gapper gen print_digits.py
, which will generate a print_digits.zip
file. Note that, the random numbers are generated during creation time instead judging time, meaning once the autograder is created, the random numbers are chosen and fixed for all student submissions.
On gradescope, when creating a new assignment, choose Programming Assignment
and fill in the required details.
Then, on the configure autograder
page, click Select Autograder (.zip)
button, choose the print_digits.zip
file from your filesystem, and the click Update Autograder
.
After the autograder is built, you can click on Test Autograder
next to the Update Autograder
button, and upload the print_digits.py
solution to see the grading result.
TestResult
(Proxy)We discuss how to operate the result proxy when overriding a test or writing custom post check functions. We recommend reading the gap_override_test
and gap_post_checks
sections first.
TestResult
(Proxy)A TestResult
contains information of the execution result of a test. Thus, when creating your own testing functions or post checking functions, you might want to modify the test result object accordingly to store information such as score obtained, pass status, descriptions of the test, etc. In the text below, we will use test result proxy and test result interchangeably.
@dataclass
+class TestResult:
+ default_name: str
+ name: str | None = None
+ score: float | None = field(default=None)
+ max_score: float | None = field(default=None)
+ weight: int | None = field(default=None)
+ extra_points: float | None = field(default=None)
+ errors: List[ErrorFormatter] = field(default_factory=list)
+ pass_status: PassStateType | None = field(default=None)
+ hidden: bool = False
+ descriptions: List[str] = field(default_factory=list)
+
The signature of a gap_override_test
function and a gap_post_check
function are the following, in which the second positional argument is the TestResult
proxy.
class CustomTestFn(Protocol):
+ def __call__[T](self, param: TestCaseWrapper, result_proxy: TestResult, expected: T, actual: T) -> None:
+ ...
+
class PostChecksFn(Protocol):
+ def __call__[T](
+ self,
+ param: TestCaseWrapper,
+ result_proxy: TestResult,
+ solution: T,
+ submission: T,
+ expected_results: Tuple[Any, str | None],
+ actual_results: Tuple[Any, str | None],
+ ) -> None:
+ ...
+
You can set the result attributes using set_<attribute>()
function. For example,
result_proxy.set_score(result_proxy.max_score // 2)
+result_proxy.set_pass_status("failed")
+result_proxy.add_description(
+ "Failed because recursive call not found in submission."
+)
+
Please refer to the API reference here for detailed usages.
There are two ways to upload your autograder directly to gradescope: through GUI, or through either a url to the assignment or the specific course id and assignment id.
You need to understand what a course is and what an assignment is in gradescope, and the difference between the two.
Before uploading, you are assumed to have your gradescope email and password, and have created the assignment you're uploading the autograder to.
To upload an autograder, you will need to log into your account. You can do that using the gapper login
command. You can always check the commands' help messages by running gapper <command> --help
.
Note that, we don't store your password anywhere. However, we do remember your session cookie for upload to happen. If you find your cookies expired, simply log in again.
Once you are logged in, you can use either of the following two ways to upload your autograder.
gapper gen
When you're generating an autograder using gapper gen
, simply by attaching --upload
flag or --upload --gui
to the command.
--upload
flag with gs_connect
If you're using --upload
flag only, you have to have gs_connect
decorator on your problem definition.
When specifying gs_connect
argument, you can either pass in a url to the assignment, or the course id and assignment id.
An example using the url to the assignment:
from gapper import problem, gs_connect
+
+@gs_connect('https://www.gradescope.com/courses/<cid>/assignments/<aid>')
+@problem()
+def add(a: int, b: int) -> int:
+ ...
+
Note that the url has to have at least contain https://www.gradescope.com/courses/<cid>/assignments/<aid>
. It doesn't matter if there are any other things after the url. For example, https://www.gradescope.com/courses/<cid>/assignments/<aid>/review_grades
is also acceptable.
An example using the course id and assignment id:
from gapper import problem, gs_connect
+
+@gs_connect('<cid>', '<aid>')
+@problem()
+def add(a: int, b: int) -> int:
+ ...
+
The <cid>
and <aid>
must be strings of digits. You can reference the API for more information.
--upload --gui
flag with GUIIf you don't specify gs_connect
, you can add an additional --gui
flag to use graphical interface to upload to an assignment. Simply type gapper gen <script> --upload --gui
and follow the instruction.
If you have your autograder zip file already, you can use gapper upload
command to upload your autograder.
There are three subcommands under gapper upload
: gapper upload gui
, gapper upload url
, and gapper upload id
.
gapper upload gui
This command will open a graphical interface for you to upload your autograder. Simply type gapper upload gui
and follow the instruction.
gapper upload url
This command will upload your autograder to an assignment using the url to the assignment. Simply type gapper upload url
and follow the instruction.
gapper upload id
This command will upload your autograder to an assignment using the course id and assignment id. Simply type gapper upload id
and follow the instruction.
It can be confusing to remember all function protocols used in gapper. Below, we list the function signatures and their docstrings for each use case.
gap_override_test
Bases: Protocol
The custom test function protocol.
src/gapper/core/utils.py
The function type to be called for custom tests.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
param | TestCaseWrapper | The TestCaseWrapper instance. It contains the test case information, including the test case name, the test case parameters, etc. | required |
result_proxy | TestResult | The TestResult instance of this custom test to be used as a proxy. You can use this proxy to affect the test result of this test case. See .. seealso:: :class: | required |
solution | T | The expected result, which will be the solution under the @problem decorator | required |
submission | T | The actual result, which will be the submission from the student | required |
Raises:
Type | Description |
---|---|
AssertionError | It should raise assertion error if test fails. |
src/gapper/core/utils.py
gap_override_check
Bases: Protocol
The custom equality check function protocol.
src/gapper/core/utils.py
The function type to be called for custom equality checks.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
expected | T | The expected result, which will be executed result of the solution | required |
actual | T | The actual result, which will be the executed result of the submission | required |
msg | str | None | The message to be printed if the equality check fails. | None |
Raises:
Type | Description |
---|---|
AssertionError | It should raise assertion error if the equality check tails |
src/gapper/core/utils.py
gap_post_check
Bases: Protocol
The post check function protocol.
src/gapper/core/utils.py
__call__(param: TestCaseWrapper, result_proxy: TestResult, solution: T, submission: T, expected_results: Tuple[Any, str | None], actual_results: Tuple[Any, str | None]) -> None
+
The function type to be called for post checks all the equality check of a test case.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
param | TestCaseWrapper | The TestCaseWrapper instance. It contains the test case information, including the test case name, the test case parameters, etc. | required |
result_proxy | TestResult | The TestResult instance of this custom test to be used as a proxy. You can use this proxy to affect the test result of this test case. See .. seealso:: :class: | required |
solution | T | The expected result, which will be the solution under the @problem decorator | required |
submission | T | The actual result, which will be the submission from the student | required |
expected_results | Tuple[Any, str | None] | The expected results of the test case. It is a tuple of expected execution result and expected stdout result. | required |
actual_results | Tuple[Any, str | None] | The actual results of the test case. It is a tuple of actual execution result and actual stdout result. | required |
Raises:
Type | Description |
---|---|
AssertionError | It should raise assertion error if the post check fails. |
src/gapper/core/utils.py
post_test
Bases: Protocol
src/gapper/core/utils.py
__call__(test_results: List[TestResult], test_proxy: TestResult | None, metadata: GradescopeSubmissionMetadata | None) -> None
+
The function type to be called after all tests are run.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
test_results | List[TestResult] | A list of test results from tested test cases. Note that, the number of results will remain the same through the post testing phrase, even though you have post tests with as_test_case set to True. The results from post tests will not be added until the post testing phrase is completed. | required |
test_proxy | TestResult | None | The TestResult instance of this post test to be used as a proxy. If the post_test's as_test_case is set to False, this will be None. | required |
metadata | GradescopeSubmissionMetadata | None | The metadata of the submission. | required |
src/gapper/core/utils.py
gap_
KeywordsThis post discusses the effect of each gap_
keyword and how they can be used.
gap_
Keyword ListingWe list the possible gap_
keywords below.
gap_expect: The expected output of the test case.
+gap_expect_stdout: The expected stdout of the test case.
+gap_hidden: Whether the test case is hidden.
+gap_name: The name of the test case.
+gap_extra_points: The extra credit of the test case.
+gap_override_check: The custom equality check function.
+gap_override_test: The custom test function.
+gap_post_checks: The custom post check functions.
+gap_description: The description of the test case.
+gap_is_pipeline: Whether the test case is a pipeline.
+gap_max_score: The max score of the test case. This and gap_weight cannot be specified as the same time.
+gap_weight: The weight of the test case. This and gap_max_score cannot be specified as the same time.
+
@test_case()
And @test_cases
@test_case()
, gap_
keywords are specified as ordinary keyword arguments. For example, @test_case(1, 2, gap_expect=3, gap_name="secret test", gap_max_score=5)
. @test_cases()
, gap_
keywords are also specified as keyword arguments but accept one single value or a Sequence
of values. For example, @test_cases.params([1,2], [3,4], gap_max_score=2, gap_name=["test 1", "test 2"])
. When a single value is passed, it will be duplicated to every test cases. When a sequence is passed, the length of the sequence has to be the same as the number of test cases, and will be applied to each test case in order. gap_expect
This serves as sanity check to the correctness of the defined problem. When specified in a test case, it's value equals the expected outcome of executing the test case. For example,
# add_num.py
+@test_case(2, 2, gap_expect=3)
+@test_case(1, 2, gap_expect=3)
+@problem()
+def add(a: int, b: int) -> int:
+ return a + b
+
Using the command line, we invoke gapper check add_num.py
ad we will see
❯ gapper check add_num.py
+Test (1, 2) passed: True
+Test (2, 2) passed: False
+ result: 4
+ expected result: 3
+ output: None
+ expected output: None
+
gap_expect_stdout
It is similar to gap_expect
except it expects output from the stdout
.
gap_hidden
It accepts True
or False
and indicates if the test case can be seen by student. For example,
@test_cases.param_iter(
+ ([random.randint(100, 10000)] for _ in range(3)),
+ gap_max_score=2,
+ gap_hidden=True,
+ gap_name="random hidden tests",
+)
+
gap_name
A custom name of the autograder beside showing the argument passed to the test. For example,
@test_cases.param_iter(
+ ([random.randint(100, 10000)] for _ in range(3)),
+ gap_max_score=2,
+ gap_hidden=True,
+ gap_name="random hidden tests",
+)
+@test_cases.param_iter(
+ ([random.randint(100, 10000)] for _ in range(3)),
+ gap_max_score=0,
+ gap_name="random tests",
+)
+
will produce
gap_description
Description of the test that will be shown to the students. For example, the following code will produce the result below.
@test_case(
+ 1, gap_max_score=1, gap_description="this is the test in the assignment handout"
+)
+@test_case(1234, gap_max_score=1, gap_description=["this test is ", "slightly longer"])
+
gap_max_score
The max score this test is worth. This cannot coexist with gap_weight
.
gap_weight
The weight of the max score this test is worth. This cannot coexist with gap_max_score
. The calculation of the max score is
max_score_of_the_test = gap_weight * (totoal_score_of_the_assignment - sum(gap_max_score in all tests)) / sum(gap_weight in all tests)
+
If both gap_weight
and gap_max_score
are not set, the test case will be assigned with a default weight of 1
.
For example, suppose we create 4 test cases worth total 4 points.
@test_case(1, gap_description="this is the test in the assignment handout")
+@test_case(1234, gap_weight=2, gap_description=["this test is ", "slightly longer"])
+@test_case(3731, gap_weight=4)
+@test_case(7)
+
will produce the following max score assignment
because, for the test case of (1,)
and case of (7,)
, the score is calculated as
and similarly, the score assigned for the case of (1234,)
is from
gap_extra_points
Extra points specified in a test is a number and will be granted to student if they passed to test. For example,
will result in 5 extra points when the student passes the test, shown as following:
gap_override_check
You can override tests' equality checks by passing a comparator function to gap_override_check
keyword.
For example, suppose the you want to compare answers from students' submissions with the solution but do not care about ordering, you can pass gap_override_check=set_equality
to @test_case()
where set_equality
is pre-defined in your script as
def set_equality(solution_answer: Any, submission_answer: Any) -> bool:
+ return set(solution_answer) == set(submission_answer)
+
gap_override_test
You can override entire test by passing a custom function to gap_override_test
parameter, similar to override equality checks. For example, you not only want to check the answers, but also ensure the function is recursive. You can define custom_test
as the following and pass it as gap_override_test=custom_test
in your @test_case()
. Note that you have to run the test and equality check by yourself, for the entire test process is overridden.
from gapper import problem, test_case
+from gapper.core.test_result import TestResult
+from gapper.core.unittest_wrapper import TestCaseWrapper
+
+
+def check_recursive_ast(fn):
+ tree = ast.parse(inspect.getsource(fn))
+ for node in ast.walk(tree):
+ if isinstance(node, ast.Call):
+ if isinstance(node.func, ast.Name):
+ if node.func.id == fn.__name__:
+ return True
+ return False
+
+def custom_test(param: TestCaseWrapper, result_proxy: TestResult, solution, submission) -> bool:
+ soln_ans = solution(*param.args, **param.kwargs)
+ subm_ans = submission(*param.args, **param.kwargs)
+
+ param.assertEqual(soln_ans, subm_ans) # equivalent to `assert soln_ans == subm_ans`
+
+ # param.assertTrue(check_recursive_ast(submission))
+ # equivalent to `assert check_recursive_ast(submission)`
+
+ if not check_recursive_ast(submission):
+ result_proxy.set_score(result_proxy.max_score // 2)
+
+@test_case(10, gap_override_test=custom_test)
+@problem()
+def fib(n: int) -> int:
+ ...
+
A overriding function show have the following positional parameter signature
class CustomTestFn(Protocol):
+ def __call__[T](self, param: TestCaseWrapper, result_proxy: TestResult, expected: T, actual: T) -> None:
+ ...
+
gap_post_checks
Consider the situation in which you'd like to provide extra checks but not override the whole test. You can write custom check functions and pass it into gap_post_checks
. For example, you'd like to check if the students' solutions are recursive, you can write
from gapper import problem, test_case
+from gapper.core.test_result import TestResult
+from gapper.core.unittest_wrapper import TestCaseWrapper
+
+
+def check_recursive_ast(fn):
+ tree = ast.parse(inspect.getsource(fn))
+ for node in ast.walk(tree):
+ if isinstance(node, ast.Call):
+ if isinstance(node.func, ast.Name):
+ if node.func.id == fn.__name__:
+ return True
+ return False
+
+def recursive_check(param: TestCaseWrapper, result_proxy: TestResult, solution, submission, sln_results: Tuple[Any, str | None], sub_results: Tuple[Any, str | None]) -> None:
+ if not check_recursive_ast(submission):
+ result_proxy.set_score(result_proxy.max_score // 2)
+ result_proxy.set_pass_status("failed")
+ result_proxy.add_description(
+ "Failed because recursive call not found in submission."
+ )
+
+@test_case(10, gap_post_check=recursive_check)
+@problem()
+def fib(n: int) -> int:
+ ...
+
A post check function has to follow the following positional parameter signature
class PostChecksFn(Protocol):
+ def __call__[T](
+ self,
+ param: TestCaseWrapper,
+ result_proxy: TestResult,
+ solution: T,
+ submission: T,
+ expected_results: Tuple[Any, str | None],
+ actual_results: Tuple[Any, str | None],
+ ) -> None:
+ ...
+
gap_pipeline
The gap_pipeline
keyword is invented to simulated a sequence of actions acting on an object. The object going into the pipeline might remain the same, be modified, or be swapped. It comes handy when testing classes and their instances' behaviors. For example, given a Car
class,
class Car:
+ def __init__(self, x: int, y: int, tank_size: float) -> None:
+ self.x = x
+ self.y = y
+ self.tank_size = tank_size
+ self.fuel = tank_size
+
+ def drive_to(self, x: int, y: int) -> bool:
+ dis = abs(x - self.x) + abs(y - self.y)
+ if dis > self.fuel:
+ return False
+ else:
+ self.fuel -= dis
+ self.x = x
+ self.y = y
+ return True
+
+ def refill(self) -> None:
+ self.fuel = self.tank_size
+
+ def get_fuel(self) -> float:
+ return self.fuel
+
Using pipeline, we can (1) test creating instances with different parameters, (2) running some functions of the instances and check if their outputs match, (3) and checking if attributes and states match (but we recommend requiring student to create uniform interfaces (functions) and not check the properties directly).
from gapper import problem, test_case
+from gapper.core.pipeline_support import Constructor, Function, Property
+
+init = Constructor()
+drive_to = Function("drive_to")
+refill = Function("refill")
+get_fuel = Function("get_fuel")
+x = Property("x")
+y = Property("y")
+tank_size = Property("tank_size")
+
+
+@test_case.pipeline( # using `@test_case.pipeline` is equivalent to `@test_case(gap_pipeline=True)`
+ init(0, 0, 100),
+ tank_size, # we recommend creating a uniform interface such as `get_tank_size()`
+ x, # and not to check the attributes directly
+ y,
+ drive_to(10, 10),
+ get_fuel(),
+ drive_to(100, 0),
+ get_fuel(),
+ refill(),
+ drive_to(100, 0),
+ get_fuel(),
+)
+@problem()
+class Car:
+ ...
+
from gapper import problem, test_case, test_cases
+import random
+
+
+@test_case(random.randint(100000, 1000000), gap_max_score=0, gap_extra_points=5)
+@test_cases.param_iter(
+ ([random.randint(100, 10000)] for _ in range(3)),
+ gap_max_score=2,
+ gap_hidden=True,
+ gap_name="random hidden tests",
+)
+@test_cases.param_iter(
+ ([random.randint(100, 10000)] for _ in range(3)),
+ gap_max_score=0,
+ gap_name="random tests",
+)
+@test_case(1, gap_description="this is the test in the assignment handout")
+@test_case(1234, gap_weight=2, gap_description=["this test is ", "slightly longer"])
+@test_case(3731, gap_weight=4)
+@test_case(7)
+@problem(check_stdout=True)
+def print_digits(n: int) -> None:
+ print(n % 10)
+ if n >= 10:
+ print_digits(n // 10)
+
{"use strict";/*!
+ * escape-html
+ * Copyright(c) 2012-2013 TJ Holowaychuk
+ * Copyright(c) 2015 Andreas Lubbe
+ * Copyright(c) 2015 Tiancheng "Timothy" Gu
+ * MIT Licensed
+ */var Ha=/["'&<>]/;Un.exports=$a;function $a(e){var t=""+e,r=Ha.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i