不讲原理,不将原始代码实现,一切都从调用tensorflow出发。
只有示例,没有讲解。
代码参考:TensorFlow-GitHub
import numpy as np
import tensorflow as tf
# 输入 batch=3, max_words=6, embedding=4
x = np.random.randn(3, 6, 4)
x[1, 4:] = 0
x_length = [6, 4, 6]
# tag_size=3,
y = np.random.randint(3, size=[3, 6]).astype(np.int32)
embed_size = 4
tag_size = 3
# input datas
# assume b = batch, n = max_word, d = embed_size, t = tag_size
# shape = [batch, max_word, embed_size], also as [b, n, d]
inputs = tf.Variable(x, dtype=tf.float32)
# [b]
inputs_len = tf.Variable(x_length, dtype=tf.int32)
# [b, n]
targets = tf.Variable(y, dtype=tf.int32)
batch_size = tf.shape(inputs)[0]
max_word = tf.shape(inputs)[1]
# [d, t]
w = tf.Variable(tf.random_normal([embed_size, tag_size]))
# [t]
b = tf.Variable(tf.random_normal([tag_size]))
# [b*n, d]
inputs = tf.reshape(inputs, [-1, embed_size])
# [b*n, d]
unary_scors = tf.matmul(inputs, w) + b
# [b, n, t]
unary_scors = tf.reshape(unary_scors, [batch_size, max_word, tag_size])
# log_likelihood: [b]
# transition_params: [t, t]
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(unary_scors, targets, inputs_len)
loss = tf.reduce_mean(-log_likelihood)
# viterbi_sequence: [b, n]
# viterbi_score: [b]
viterbi_sequence, viterbi_score = tf.contrib.crf.crf_decode(unary_scors, transition_params, inputs_len)
pred = viterbi_sequence
with sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(viterbi_sequence)