targettxt = [ " ".join([text_database.target_vocab._id_to_token_map_py[i] for i in line]) \
for line in target.tolist()]
writer.add_summary(mgd, global_step=step)
for index, line in enumerate(zip(source, sourcetxt, target, targettxt)):
print("{}:{} source:{} txt:{}".format((step-1)*32+index, step, line[0], line[1]))
print("{}:{} target:{} txt:{}".format((step-1)*32+index, step, line[2], line[3]))
for var in tf.trainable_variables():
After Change
// print("{}:{} source:{} txt:{}".format((step-1)*32+index, step, line[0], line[1]))// print("{}:{} target:{} txt:{}".format((step-1)*32+index, step, line[2], line[3]))// print("{}:{} predict:{} txt:{}".format((step-1)*32+index, step, line[4], line[5]))
print("var cnt:{}".format(len(tf.trainable_variables())))
for var in tf.trainable_variables():
print("name:{}\tshape:{}\ttype:{}".format(var.name, var.shape, var.dtype))
exit()