diff --git a/remote_challenge_evaluation/eval_ai_interface.py b/remote_challenge_evaluation/eval_ai_interface.py index 0c7e90bb2..4f48f91b8 100644 --- a/remote_challenge_evaluation/eval_ai_interface.py +++ b/remote_challenge_evaluation/eval_ai_interface.py @@ -7,6 +7,7 @@ URLS = { "get_message_from_sqs_queue": "/api/jobs/challenge/queues/{}/", "get_submission_by_pk": "/api/jobs/submission/{}", + "get_challenge_phase_by_pk": "/api/challenges/challenge/phase/{}", "delete_message_from_sqs_queue": "/api/jobs/queues/{}/", "update_submission": "/api/jobs/challenge/{}/update_submission/", } @@ -139,3 +140,9 @@ def get_submission_by_pk(self, submission_pk): url = self.return_url_per_environment(url) response = self.make_request(url, "GET") return response + + def get_challenge_phase_by_pk(self, phase_pk): + url = URLS.get("get_challenge_phase_by_pk").format(phase_pk) + url = self.return_url_per_environment(url) + response = self.make_request(url, "GET") + return response diff --git a/remote_challenge_evaluation/evaluate.py b/remote_challenge_evaluation/evaluate.py index b30bff1ae..297f46929 100644 --- a/remote_challenge_evaluation/evaluate.py +++ b/remote_challenge_evaluation/evaluate.py @@ -1,4 +1,3 @@ -import random def evaluate(user_submission_file, phase_codename, test_annotation_file=None, **kwargs): @@ -11,7 +10,11 @@ def evaluate(user_submission_file, phase_codename, test_annotation_file=None, ** `test_annotations_file`: Path to test_annotation_file on the server We recommend setting a default `test_annotation_file` or using `phase_codename` - to select the appropriate file. + to select the appropriate file. For example, you could load test annotation file + for current phase as: + ``` + test_annotation_file = json.loads(open("{phase_codename}_path", "r")) + ``` `**kwargs`: keyword arguments that contains additional submission metadata that challenge hosts can use to send slack notification. You can access the submission metadata @@ -39,43 +42,35 @@ def evaluate(user_submission_file, phase_codename, test_annotation_file=None, ** 'submitted_at': u'2017-03-20T19:22:03.880652Z' } """ + + ''' + # Load test annotation file for current phase + test_annotation_file = json.loads(open("{phase_codename}_path", "r")) + ''' output = {} if phase_codename == "dev": print("Evaluating for Dev Phase") output["result"] = [ { - "train_split": { - "Metric1": random.randint(0, 99), - "Metric2": random.randint(0, 99), - "Metric3": random.randint(0, 99), - "Total": random.randint(0, 99), - } - } + "split": "train_split", + "show_to_participant": True, + "accuracies": {"Metric1": 90}, + }, ] - # To display the results in the result file - output["submission_result"] = output["result"][0]["train_split"] print("Completed evaluation for Dev Phase") elif phase_codename == "test": print("Evaluating for Test Phase") output["result"] = [ { - "train_split": { - "Metric1": random.randint(0, 99), - "Metric2": random.randint(0, 99), - "Metric3": random.randint(0, 99), - "Total": random.randint(0, 99), - } + "split": "train_split", + "show_to_participant": True, + "accuracies": {"Metric1": 90}, }, { - "test_split": { - "Metric1": random.randint(0, 99), - "Metric2": random.randint(0, 99), - "Metric3": random.randint(0, 99), - "Total": random.randint(0, 99), - } + "split": "test_split", + "show_to_participant": False, + "accuracies": {"Metric1": 50, "Metric2": 40}, }, ] - # To display the results in the result file - output["submission_result"] = output["result"][0] print("Completed evaluation for Test Phase") return output diff --git a/remote_challenge_evaluation/main.py b/remote_challenge_evaluation/main.py index 3f3cde779..932ef8899 100644 --- a/remote_challenge_evaluation/main.py +++ b/remote_challenge_evaluation/main.py @@ -1,7 +1,9 @@ +import json import os import time import requests + from eval_ai_interface import EvalAI_Interface from evaluate import evaluate @@ -15,17 +17,18 @@ def download(submission, save_dir): - response = requests.get(submission.input_file.url) - submission_file_path = os.path.join(save_dir, submission.input_file.name) + response = requests.get(submission["input_file"]) + submission_file_path = os.path.join( + save_dir, submission["input_file"].split("/")[-1] + ) with open(submission_file_path, "wb") as f: f.write(response.content) return submission_file_path -def update_running(evalai, submission, job_name): +def update_running(evalai, submission_pk): status_data = { - "submission": submission, - "job_name": job_name, + "submission": submission_pk, "submission_status": "RUNNING", } update_status = evalai.update_submission_status(status_data) @@ -79,7 +82,7 @@ def update_finished( phase_pk = message_body.get("phase_pk") # Get submission details -- This will contain the input file URL submission = evalai.get_submission_by_pk(submission_pk) - + challenge_phase = evalai.get_challenge_phase_by_pk(phase_pk) if ( submission.get("status") == "finished" or submission.get("status") == "failed" @@ -89,15 +92,17 @@ def update_finished( evalai.delete_message_from_sqs_queue(message_receipt_handle) else: - update_running(submission, job_name="") + if submission.get("status") == "submitted": + update_running(evalai, submission_pk) submission_file_path = download(submission, save_dir) try: results = evaluate( - submission_file_path, - submission.challenge_phase.codename + submission_file_path, challenge_phase["codename"] + ) + update_finished( + evalai, phase_pk, submission_pk, json.dumps(results["result"]) ) - update_finished(phase_pk, submission_pk, results) except Exception as e: - update_failed(phase_pk, submission_pk, str(e)) + update_failed(evalai, phase_pk, submission_pk, str(e)) # Poll challenge queue for new submissions time.sleep(60)