diff --git a/evalai/submissions.py b/evalai/submissions.py index ac8bccf62..86a2f6b86 100644 --- a/evalai/submissions.py +++ b/evalai/submissions.py @@ -19,6 +19,7 @@ from evalai.utils.submissions import ( display_submission_details, display_submission_result, + display_submission_stderr, convert_bytes_to, ) from evalai.utils.urls import URLS @@ -63,6 +64,18 @@ def result(ctx): display_submission_result(ctx.submission_id) +@submission.command() +@click.pass_obj +def stderr(ctx): + """ + Display the submission stderr in Terminal output + """ + """ + Invoked by `evalai submission SUBMISSION_ID stterr`. + """ + display_submission_stderr(ctx.submission_id) + + @click.command() @click.argument("IMAGE", nargs=1) @click.option( diff --git a/evalai/utils/submissions.py b/evalai/utils/submissions.py index 19d9fc3e4..0430a798d 100644 --- a/evalai/utils/submissions.py +++ b/evalai/utils/submissions.py @@ -281,6 +281,14 @@ def display_submission_result(submission_id): ) +def display_submission_stderr(submission_id): + """ + Function to display stderr file of a particular submission in Terminal output + """ + response = submission_details_request(submission_id).json() + echo(requests.get(response['stderr_file']).text) + + def convert_bytes_to(byte, to, bsize=1024): """ Convert bytes to KB, MB, GB etc. diff --git a/tests/data/submission_response.py b/tests/data/submission_response.py index e9ee30474..9227f0f0f 100644 --- a/tests/data/submission_response.py +++ b/tests/data/submission_response.py @@ -1,9 +1,29 @@ submission = """ { - "count": 4, + "count": 5, "next": null, "previous": null, "results": [ + { + "challenge_phase": 251, + "created_by": 5672, + "execution_time": 0.085137, + "id": 48728, + "input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-ac19-409d-a97d-7240ea336a0c.txt", + "is_public": false, + "method_description": null, + "method_name": null, + "participant_team": 3519, + "participant_team_name": "test", + "project_url": null, + "publication_url": null, + "status": "failed", + "stderr_file": null, + "stdout_file": null, + "submission_result_file": null, + "submitted_at": "2018-06-03T09:24:09.866590Z", + "when_made_public": null + }, { "challenge_phase": 7, "created_by": 4, @@ -123,6 +143,31 @@ "when_made_public": null }""" + +submission_stderr_details = """ + { + "challenge_phase": 251, + "created_by": 5672, + "execution_time": 0.085137, + "id": 48728, + "input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-\ + ac19-409d-a97d-7240ea336a0c.txt", + "is_public": false, + "method_description": null, + "method_name": null, + "participant_team": 3519, + "participant_team_name": "test", + "project_url": null, + "publication_url": null, + "status": "submitted", + "stderr_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/39f3b087-8f86-4757-9c93-bf0b26c1a3c2.txt", + "stdout_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/0b2c4396-e078-4b95-b041-83801a430874.txt", + "submission_result_file": null, + "submitted_at": "2018-06-03T09:24:09.866590Z", + "when_made_public": null + }""" + + aws_credentials = """ { "success": { diff --git a/tests/test_submissions.py b/tests/test_submissions.py index 02f65fc76..ae8ebdd45 100644 --- a/tests/test_submissions.py +++ b/tests/test_submissions.py @@ -290,3 +290,54 @@ def test_make_submission_for_docker_based_challenge( ], ) assert result.exit_code == 0 + + +class TestDisplaySubmissionStderr(BaseTestClass): + def setup(self): + self.submission = json.loads(submission_response.submission_stderr_details) + + url = "{}{}" + responses.add( + responses.GET, + url.format(API_HOST_URL, URLS.get_submission.value).format("48728"), + json=self.submission, + status=200, + ) + + responses.add( + responses.GET, + self.submission["stderr_file"], + json=json.loads(submission_response.submission_stderr_details), + status=200, + ) + + @responses.activate + def test_display_submission_strerr_with_a_string_argument(self): + expected = ( + "Usage: submission [OPTIONS] SUBMISSION_ID COMMAND [ARGS]...\n" + '\nError: Invalid value for "SUBMISSION_ID": four is not a valid integer\n' + ) + runner = CliRunner() + result = runner.invoke(submission, ["four"]) + response = result.output + assert response == expected + + @responses.activate + def test_display_submission_strerr_with_no_argument(self): + expected = ( + "Usage: submission [OPTIONS] SUBMISSION_ID COMMAND [ARGS]...\n" + '\nError: Missing argument "SUBMISSION_ID".\n' + ) + runner = CliRunner() + result = runner.invoke(submission) + response = result.output + assert response == expected + + @responses.activate + def test_display_submission_stderr_details(self): + expected = "" + runner = CliRunner() + result = runner.invoke(submission, ["48728", "stderr"]) + response = result.output.strip() + if response == expected: + assert response