-
Notifications
You must be signed in to change notification settings - Fork 18
/
Copy pathWikiNewsDiacritization.py
58 lines (51 loc) · 2 KB
/
WikiNewsDiacritization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from llmebench.datasets.dataset_base import DatasetBase
from llmebench.tasks import TaskType
class WikiNewsDiacritizationDataset(DatasetBase):
def __init__(self, **kwargs):
super(WikiNewsDiacritizationDataset, self).__init__(**kwargs)
@staticmethod
def metadata():
return {
"language": "ar",
"citation": """@inproceedings{darwish-etal-2017-arabic,
title = "{A}rabic Diacritization: Stats, Rules, and Hacks",
author = "Darwish, Kareem and
Mubarak, Hamdy and
Abdelali, Ahmed",
booktitle = "Proceedings of the Third {A}rabic Natural Language Processing Workshop",
month = apr,
year = "2017",
address = "Valencia, Spain",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-1302",
doi = "10.18653/v1/W17-1302",
pages = "9--17",
}""",
"link": "https://github.com/kdarwish/Farasa/tree/master",
"license": "Research Purpose Only",
"splits": {
"test": "WikiNewsTruth.txt",
"train": "WikiNewsTruthDev.txt",
},
"task_type": TaskType.Other,
}
@staticmethod
def get_data_sample():
return {
"input": "Original sentence",
"label": "Sentence with diacritized words",
}
def load_data(self, data_path, no_labels=False):
data_path = self.resolve_path(data_path)
data = []
with open(data_path, "r") as fp:
for line_idx, line in enumerate(fp):
text, diacritized_text = line.split("\t")
data.append(
{
"input": text.strip(),
"label": diacritized_text.strip(),
"line_number": line_idx,
}
)
return data