loss =
tf.nn.seq2seq.sequence_loss_by_example(
[logits], # output [batch*numsteps, vocab_size]
[tf.reshape(self._targets, [-1])], # target, [batch_size, num_steps] 然后展开成一维【列表】
[logits], # output [batch*numsteps, vocab_size]
[tf.reshape(self._targets, [-1])], # target, [batch_size, num_steps] 然后展开成一维【列表】
[tf.ones([batch_size * num_steps], dtype=data_type())]) # weight
改为:
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits], # output [batch*numsteps, vocab_size]
[tf.reshape(self._targets, [-1])], # target, [batch_size, num_steps] 然后展开成一维【列表】
[tf.ones([batch_size * num_steps], dtype=data_type())]) # weight