aeb516494daa207720b428072ab49eeea7bfff75,scripts/text_generation/sequence_sampling.py,,,#,33
Before Change
"We load a LSTM model that is pre-trained on "
"WikiText as our encoder.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--use-sampling", action="store_true",
help="Use sampling instead of beam search.")
group.add_argument("--use-beam-search", action="store_true",
help="Use beam search instead of random sampling.")
parser.add_argument("--lm_model", type=str, default="awd_lstm_lm_1150",
help="type of the pre-trained model to load, can be "standard_lstm_lm_200", "
After Change
// beam search sampler options
subparsers = parser.add_subparsers(help="Sequence generation methods.",
dest="command")
subparsers.required = True
beam_search_parser = subparsers.add_parser("beam-search", help="Use beam search for decoding.")
beam_search_parser.add_argument("--alpha", type=float, default=0.0,
help="Alpha in the length penalty term.")
beam_search_parser.add_argument("--k", type=int, default=5, help="K in the length penalty term.")
// random sampler options
random_sample_parser = subparsers.add_parser("random-sample",
help="Use random sampling for decoding.")
random_sample_parser.add_argument("--temperature", type=float, default=1.0,
help="Softmax temperature used in sampling.")
random_sample_parser.add_argument("--use-top-k", type=int, required=False,
help="Sample only from the top-k candidates.")
// shared options
for p in [beam_search_parser, random_sample_parser]:
p.add_argument("--gpu", type=int, default=0,
help="id of the gpu to use. Set it to empty means to use cpu.")
p.add_argument("--lm-model", type=str, default="awd_lstm_lm_1150",
help="type of the pre-trained model to load, can be "standard_lstm_lm_200", "
""standard_lstm_lm_650", "standard_lstm_lm_1500", "
""awd_lstm_lm_1150", etc.")
p.add_argument("--max-length", type=int, default=20, help="Maximum sentence length.")
p.add_argument("--print-num", type=int, default=3, help="Number of sentences to display.")
p.add_argument("--bos", type=str, default="I think this works")
p.add_argument("--beam-size", type=int, default=5,
help="Beam size in the beam search sampler.")
args = parser.parse_args()
print(args)
if args.gpu is not None and args.gpu < mx.context.num_gpus():
In pattern: SUPERPATTERN
Frequency: 3
Non-data size: 3
Instances Project Name: dmlc/gluon-nlp
Commit Name: aeb516494daa207720b428072ab49eeea7bfff75
Time: 2019-06-13
Author: xshiab@ust.hk
File Name: scripts/text_generation/sequence_sampling.py
Class Name:
Method Name:
Project Name: chainer/chainercv
Commit Name: 056ec6b546ba3f3120ff12398dec85ef69c4851a
Time: 2017-10-04
Author: yuyuniitani@gmail.com
File Name: examples/classification/train_imagenet_mn.py
Class Name:
Method Name: main
Project Name: NervanaSystems/nlp-architect
Commit Name: d50fdcc5aeb2d5ab80c3ac4ec26fef08358f598d
Time: 2018-07-15
Author: jonathan.mamou@intel.com
File Name: solutions/set_expansion/set_expand.py
Class Name:
Method Name: