import tensorflow as tf
# 定义一个由字符串编码完成的稀疏张量,shape应该为[batch_size, max_label_seq_length],短句不足地方以0补全
label = tf.Variable([[1917, 3468, 1024, 2744, 4092, 2613, 112, 922, 4785, 1675],
[119, 16, 202, 2352, 2945, 3468, 2744, 112, 0, 0]])
print(label)
# 模拟一个模型lstm输出的logits,形状应该为[frames, batch_size, num_labels],
# 这里的frame/timestamp是155, batch_size是2,labels的总数为5530
logits = tf.random.normal((155, 2, 5530))
print(len(logits))
# label里面有效编码(不包括0)的长度, 形状应该为(batch_size, )
label_length = tf.Variable([10, 8])
# logits里面frames的长度,形状应该为(batch_size, ), 因为一般frame都是相同的,所以也可以写成[frame, ]
logits_length = tf.Variable([155, 155])
# 与上面的效果是一样的
# logits_length = tf.Variable([155])
loss = tf.nn.ctc_loss(label, logits, label_length=label_length, logit_length=logits_length)
print(loss)
# 返回每一句话的CTC loss
# tf.Tensor([1329.7363 1323.1757], shape=(2,), dtype=float32)
ck:
https://www.cnblogs.com/judes/p/12627177.html
https://blog.csdn.net/weixin_37721058/article/details/99702801