f4275c0b80197e0f1bbd3a2a1a31cf07d85013b1,scripts/word_embeddings/evaluate_pretrained.py,,,#,173

Before Change


                allow_extend=True,
                unknown_autoextend=True)
        else:
            token_embedding_ = nlp.embedding.create(
                args_.embedding_name, source=args_.embedding_source)
        name = "-" + args_.embedding_name + "-" + args_.embedding_source
    else:
        token_embedding_ = load_embedding_from_path(args_)
        name = ""

    enforce_max_size(token_embedding_, args_.max_vocab_size)
    known_tokens = set(token_embedding_.idx_to_token)
    // Auto-extend token_embedding with unknown extra eval tokens
    if token_embedding_.unknown_lookup is not None:
        eval_tokens = evaluation.get_tokens_in_evaluation_datasets(args_)
        // pylint: disable=pointless-statement
        token_embedding_[[
            t for t in eval_tokens - known_tokens
            if t in token_embedding_.unknown_lookup
        ]]

        if args_.max_vocab_size is not None and len(
                token_embedding_.idx_to_token) > args_.max_vocab_size:
            logging.warning("Computing embeddings for OOV words that occur "
                            "in the evaluation dataset lead to having "
                            "more words than --max-vocab-size. "
                            "Have %s words (--max-vocab-size %s)",
                            len(token_embedding_.idx_to_token),
                            args_.max_vocab_size)

    similarity_results = evaluation.evaluate_similarity(
        args_, token_embedding_, ctx, logfile=os.path.join(
            args_.logdir, "similarity{}.tsv".format(name)))
    analogy_results = evaluation.evaluate_analogy(
        args_, token_embedding_, ctx, logfile=os.path.join(
            args_.logdir, "analogy{}.tsv".format(name)))

After Change


    enforce_max_size(token_embedding_, args_.analogy_max_vocab_size)
    known_tokens = set(token_embedding_.idx_to_token)

    if args_.similarity_datasets:
        with utils.print_time("find relevant tokens for similarity"):
            tokens = evaluation.get_similarity_task_tokens(args_)
        vocab = nlp.Vocab(nlp.data.count_tokens(tokens))
        with utils.print_time("set {} embeddings".format(len(tokens))):
            vocab.set_embedding(token_embedding_)
        evaluation.evaluate_similarity(
            args_, vocab.embedding, ctx, logfile=os.path.join(
                args_.logdir, "similarity{}.tsv".format(name)))
    if args_.analogy_datasets:
        with utils.print_time("extend open vocabulary with "
                              "OOV tokens for analogy"):
            tokens = evaluation.get_analogy_task_tokens(args_)
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 5

Instances


Project Name: dmlc/gluon-nlp
Commit Name: f4275c0b80197e0f1bbd3a2a1a31cf07d85013b1
Time: 2019-01-09
Author: leonard@lausen.nl
File Name: scripts/word_embeddings/evaluate_pretrained.py
Class Name:
Method Name:


Project Name: NifTK/NiftyNet
Commit Name: 44a908b07a2c6a32dbd868e59147dd777714775b
Time: 2018-05-15
Author: d.shakir@ucl.ac.uk
File Name: niftynet/layer/loss_segmentation.py
Class Name: LossFunction
Method Name: __init__


Project Name: NifTK/NiftyNet
Commit Name: 44a908b07a2c6a32dbd868e59147dd777714775b
Time: 2018-05-15
Author: d.shakir@ucl.ac.uk
File Name: niftynet/layer/loss_regression.py
Class Name: LossFunction
Method Name: __init__