-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathevaluate.py
37 lines (30 loc) · 1.26 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from __future__ import print_function
try:
from urllib.request import urlopen
from urllib.request import Request
except ImportError:
from urllib2 import urlopen
from urllib2 import Request
import json
import numpy as np
def evaluate(submission, test_labels, verbose=True):
# Find where on the submission predicted index equals to the gt
hits = np.array(submission, dtype='int') == test_labels[:, np.newaxis]
# Compute and display top 1 / 5 accuracies
top1_accuracy = np.mean(hits[:, 0]) * 100
top5_accuracy = np.mean(np.any(hits, axis=1)) * 100
score = np.mean(np.sum(np.arange(100, 50, -10)[np.newaxis] * hits, axis=1))
if verbose:
print('top 1 accuracy {:.2f}%'.format(top1_accuracy))
print('top 5 accuracy {:.2f}%'.format(top5_accuracy))
print('mean score: {:.2f}'.format(score))
return top1_accuracy, top5_accuracy
def submit(name, submission):
# Submit your result to the leaderboard
jsonStr = json.dumps({'submitter': name, 'predictions': submission})
data = jsonStr.encode('utf-8')
req = Request('https://leaderboard.datahack.org.il/orcam/api',
headers={'Content-Type': 'application/json'},
data=data)
resp = urlopen(req)
print(json.load(resp))