// Pre-compute all words in vocabulary in case of analogy evaluation
idx_to_token = [
model.token_to_idx[idx]
for idx in range(len(model.token_to_idx))
]
if args.max_vocab_size:
idx_to_token = idx_to_token[:args.max_vocab_size]
After Change
idx_to_token = sorted(model._token_to_idx, key=model._token_to_idx.get)
if not args.analogy_datasets:
// Prune tokens not used in evaluation datasets
eval_tokens_ = set(
evaluation.get_tokens_in_evaluation_datasets(args))
idx_to_token = [t for t in idx_to_token if t in eval_tokens_]
if args.max_vocab_size:
idx_to_token = idx_to_token[:args.max_vocab_size]