-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvocab_build.py
84 lines (71 loc) · 2.8 KB
/
vocab_build.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import nltk
import pickle
import argparse
from collections import Counter
import json
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self):
#need two dictionaries for easy access of words and indices
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
#if that particular word is not present then add and update id
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
#Constructor
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx['<unk>']
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
def print_vocab(self):
print(self.idx2word)
def build_vocab(json_file, threshold):
file = open(json_file, "r")
data = json.loads(file.read())
counter = Counter()
#There are two ids in the keys, images and dataset. Dataset is just a string
for i, id in enumerate(data.keys()):
for j in range(0, len(data['images'])):
name = data['images'][j]['filename'].split('_')[0]
sentence = data['images'][j]['sentences'][0]['raw']
tokens = nltk.tokenize.word_tokenize(sentence)
counter.update(tokens)
print("[{}/{}] Tokenized the captions.".format(i+1, len(data.keys())))
# If the word frequency is less than 'threshold', then the word is discarded.
words = [word for word, cnt in counter.items() if cnt >= threshold]
words = [word.lower() for i, word in enumerate(words) if word.isalpha()]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word('<pad>')
vocab.add_word('<start>')
vocab.add_word('<end>')
vocab.add_word('<unk>')
# Add the words to the vocabulary.
for i, word in enumerate(words):
vocab.add_word(word)
vocab.print_vocab()
return vocab
def main(args):
vocab = build_vocab(json_file=args.caption_path, threshold=args.threshold)
vocab_path = args.vocab_path
with open(vocab_path, 'wb') as f:
pickle.dump(vocab, f)
print("Total vocabulary size: {}".format(len(vocab)))
print("Saved the vocabulary wrapper to '{}'".format(vocab_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--caption_path', type=str,
default="",
help='path for train annotation file')
parser.add_argument('--vocab_path', type=str, default='',
help='path for saving vocabulary wrapper')
parser.add_argument('--threshold', type=int, default=1,
help='minimum word count threshold')
args = parser.parse_args()
main(args)