-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwiki_word2vec.py
66 lines (49 loc) · 1.91 KB
/
wiki_word2vec.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import os
import re
import sys
import glob
import logging
import gensim
from tokenization import BasicTokenizer
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def sentence_tokenize(para):
para = re.sub('([。!?\?])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注
# 意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽
# 略,需要的再做些简单调整即可。
return para.split("\n")
class WikiSentenceGenerator:
def __init__(self, wiki_dir):
self.wiki_dir = wiki_dir
self.tokenizer = BasicTokenizer()
def __iter__(self):
for text in glob.glob(os.path.join(self.wiki_dir, '*/wiki_*')):
with open(text, 'r', encoding='utf8') as fin:
for line in fin:
if line.startswith('<doc') or line.startswith('</doc'):
continue
line = line.strip()
if not line:
continue
for sentence in sentence_tokenize(line):
# TODO: change low frequency word to <unk>, and use FullTokenizer
yield self.tokenizer.tokenize(sentence)
def main(argv):
wiki_dir = argv[1]
output_path = argv[2]
sentences = WikiSentenceGenerator(wiki_dir)
# TODO: first build vocab
model = gensim.models.Word2Vec(
sentences,
min_count=5,
workers=8,
iter=10)
model.wv.save_word2vec_format(output_path)
if __name__ == "__main__":
main(sys.argv)