ea2b8969e20223b33a9bad6df9d5ee8065998855,textgenrnn/utils.py,,textgenrnn_generate,#,39

Before Change



    if single_text:
        if word_level:
            text = prefix.split() if prefix else [""]
        else:
            text = list(prefix) if prefix else [""]
        max_gen_length += maxlen
    else:
        if word_level:
            text = [meta_token] + prefix.split() if prefix else [meta_token]
        else:
            text = [meta_token] + list(prefix) if prefix else [meta_token]
    next_char = ""

    if not isinstance(temperature, list):
        temperature = [temperature]

    if model_input_count(model) > 1:
        model = Model(inputs=model.input[0], outputs=model.output[1])

    while next_char != meta_token and len(text) < max_gen_length:
        encoded_text = textgenrnn_encode_sequence(text[-maxlen:],
                                                  vocab, maxlen)
        next_temperature = temperature[(len(text) - 1) % len(temperature)]
        next_index = textgenrnn_sample(
            model.predict(encoded_text, batch_size=1)[0],
            next_temperature)
        next_char = indices_char[next_index]
        text += [next_char]

    collapse_char = " " if word_level else ""

    // if single text, ignore sequences generated w/ padding
    // if not single text, strip the <s> meta_tokens
    if single_text:
        text = text[maxlen:]
    else:
        text = text[1:-1]

    text_joined = collapse_char.join(text)

After Change


    if word_level and prefix:
        punct = "!"//$%&()*+,-./:;<=>?@[\]^_`{|}~\\n\\t\"‘’“”’–—"
        prefix = re.sub("([{}])".format(punct), r" \1 ", prefix)
        prefix_t = [x.lower() for x in prefix.split()]

    if not word_level and prefix:
        prefix_t = list(prefix)

    if single_text:
        text = prefix_t if prefix else [""]
        max_gen_length += maxlen
    else:
        text = [meta_token] + prefix_t if prefix else [meta_token]

    next_char = ""

    if not isinstance(temperature, list):
        temperature = [temperature]

    if model_input_count(model) > 1:
        model = Model(inputs=model.input[0], outputs=model.output[1])

    while next_char != meta_token and len(text) < max_gen_length:
        encoded_text = textgenrnn_encode_sequence(text[-maxlen:],
                                                  vocab, maxlen)
        next_temperature = temperature[(len(text) - 1) % len(temperature)]
        next_index = textgenrnn_sample(
            model.predict(encoded_text, batch_size=1)[0],
            next_temperature)
        next_char = indices_char[next_index]
        text += [next_char]

    collapse_char = " " if word_level else ""

    // if single text, ignore sequences generated w/ padding
    // if not single text, strip the <s> meta_tokens
    if single_text:
        text = text[maxlen:]
    else:
        text = text[1:-1]

    text_joined = collapse_char.join(text)
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 2

Instances


Project Name: minimaxir/textgenrnn
Commit Name: ea2b8969e20223b33a9bad6df9d5ee8065998855
Time: 2018-08-05
Author: max@minimaxir.com
File Name: textgenrnn/utils.py
Class Name:
Method Name: textgenrnn_generate


Project Name: OpenNMT/OpenNMT-py
Commit Name: bb178764e1822ef0ad558d9991b905c35d882551
Time: 2017-11-02
Author: dengyuntian@gmail.com
File Name: translate.py
Class Name:
Method Name: main


Project Name: OpenNMT/OpenNMT-py
Commit Name: b40c5085bfd8f46a7bfca10b73f91b55a353c918
Time: 2019-01-29
Author: benzurdopeters@gmail.com
File Name: onmt/decoders/decoder.py
Class Name: InputFeedRNNDecoder
Method Name: _run_forward_pass