forked from EleutherAI/lm-evaluation-harness
-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathwrite_out.py
97 lines (78 loc) · 2.9 KB
/
write_out.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import argparse
import os
import random
import numpy as np
from lm_eval import tasks
from lm_eval.tasks import TaskManager
from lm_eval.utils import eval_logger, join_iters
EXAMPLE_DIVIDER = "!!@@##@@!! -- Example {i}\n"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--output_base_path", "--output_path", required=True)
parser.add_argument("--tasks", default="all_tasks")
parser.add_argument("--sets", type=str, default="val") # example: val,test
parser.add_argument("--num_fewshot", type=int, default=1)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--num_examples", type=int, default=1)
parser.add_argument(
"--include_path",
type=str,
default=None,
help="Additional path to include if there are external tasks to include.",
)
parser.add_argument(
"--verbosity",
type=str,
default="INFO",
help="Log error when tasks are not registered.",
)
return parser.parse_args()
def main():
args = parse_args()
np.random.seed(args.seed)
if args.include_path is not None:
eval_logger.info(f"Including path: {args.include_path}")
task_manager = TaskManager(args.verbosity, include_path=args.include_path)
if args.tasks == "all_tasks":
task_names = task_manager.all_tasks
else:
task_names = args.tasks.split(",")
task_dict = tasks.get_task_dict(task_names, task_manager)
os.makedirs(args.output_base_path, exist_ok=True)
for task_name, task in task_dict.items():
if isinstance(task, tuple):
_, task = task
rnd = random.Random()
rnd.seed(args.seed)
iters = []
for set in args.sets.split(","):
docs = None
if set == "train" and task.has_training_docs():
docs = task.training_docs()
if set == "val" and task.has_validation_docs():
docs = task.validation_docs()
if set == "test" and task.has_test_docs():
docs = task.test_docs()
if docs is not None:
iters.append(docs)
if len(iters) == 0:
raise ValueError(
f"Passed --sets '{args.sets}' but this task has no splits which match. Please specify a different --sets value."
)
docs = join_iters(iters)
with open(
os.path.join(args.output_base_path, task_name), "w", encoding="utf8"
) as f:
for i, doc in (
zip(range(args.num_examples), docs)
if args.num_examples > 0
else enumerate(docs)
):
f.write(EXAMPLE_DIVIDER.format(i=i))
ctx = task.fewshot_context(
doc=doc,
num_fewshot=args.num_fewshot,
)
f.write(ctx + "\n")
if __name__ == "__main__":
main()