save_training_state(args.state_path)
print("Stopping because %d epochs was reached." % args.max_epochs)
validation_cost = scorer.negative_log_probability(validation_iter)
trainer.append_validation_cost(validation_cost)
if trainer.validations_since_min_cost() == 0:
best_params = rnnlm.get_state()
return best_params
After Change
if (args.validation_interval >= 1) and \
(trainer.total_updates % args.validation_interval == 0):
validation_cost = -scorer.score_text(validation_iter)
if numpy.isnan(validation_cost):
print("Stopping because an invalid floating point operation was performed while computing validation set cost. (Gradients exploded or vanished?)")
return best_params
if numpy.isinf(validation_cost):
print("Stopping because validation set cost exploded to infinity.")
return best_params
trainer.append_validation_cost(validation_cost)
trainer.print_cost_history()
sys.stdout.flush()
validations_since_best = trainer.validations_since_min_cost()
if validations_since_best == 0:
best_params = rnnlm.get_state()
elif (args.wait_improvement >= 0) and \
(validations_since_best > args.wait_improvement):
if validation_cost >= initial_cost:
args.learning_rate /= 2
rnnlm.set_state(best_params)
trainer.next_epoch()
break
if (args.save_interval >= 1) and \
(trainer.total_updates % args.save_interval == 0):
// Save the best parameters and the current state.
if not best_params is None:
save_model(args.model_path, best_params)
save_training_state(args.state_path)
print("Stopping because %d epochs was reached." % args.max_epochs)
validation_cost = -scorer.score_text(validation_iter)
trainer.append_validation_cost(validation_cost)
if trainer.validations_since_min_cost() == 0:
best_params = rnnlm.get_state()
return best_params