args.embedding_path)
// Add OOV words if the token_embedding can impute them
token_set = set()
token_set.update(
filter(lambda x: x in model,
evaluation.get_tokens_in_evaluation_datasets(args)))
After Change
// Pre-compute all words in vocabulary in case of analogy evaluation
idx_to_token = [
model.token_to_idx[idx]
for idx in range(len(model.token_to_idx))
]
if args.max_vocab_size:
idx_to_token = idx_to_token[:args.max_vocab_size]
else: