词性标注实战
1 基本原理
2 维特力算法
3 代码实现
import numpy as np
word2id, tag2id = {}, {}
id2word, id2tag = {}, {}
# 数据预处理
def preprocess():
for line in open("data/postag.txt"):
items = line.split("/")
word, tag = items[0], items[1].rstrip()
if word not in word2id:
word2id[word] = len(word2id)
id2word[len(id2word)] = word
if tag not in tag2id:
tag2id[tag] = len(tag2id)
id2tag[len(id2tag)] = tag
preprocess()
M = len(word2id) # 18978 number of words
N = len(tag2id) # 54 number of tag
# 计算模型概率
pi = np.zeros(N) # 每个单词出现在句首的概率
A = np.zeros((N, M)) # 给定tag(i)出现word(j)的概率
B = np.zeros((N, M)) # tagi 出现后下一个出现tagj的概率
# 计算完了所需参数,接下来寻找最优的词性序列
def log(v):
if v == 0:
return np.log(v+0.0001) # 平滑
return np.log(v)
# 维特力算法
def viterbi(x, pi, A, B):
"""
x: user input string/sentence
"""
x = [word2id[word] for word in x.split(" ")]
T = len(x)
dp = np.zeros((T, N)) # dp[i][j]:w1,w2,...,wt假设wi的tag是第j个tag
ptr = np.array([[0 for x in range(N)] for y in range(T)])
for j in range(N): # basecase for DP
dp[0][j] = log(pi[j]) + log(A[j][x[0]])
for i in range(T): # 每个单词
for j in range(N): #每个词性
dp[i][j] = -9999
for k in range(N): # 从每个k到j
score = dp[i-1][k] + log(B[k][j]) + log(A[j][x[i]])
if score > dp[i][j]:
dp[i][j] = score
ptr[i][j] = k
# print(ptr)
# decoding 把最好的tag sequence打印出来
best_seq = [0]*T
# step1:找出最后一个单词对应的tag
best_seq[T-1] = np.argmax(dp[T-1])
# step2: 通过从后到前的顺序找出每个单词的词性
for i in range(T-2, -1, -1): # T-2, T-3,...,3,2,1,0
best_seq[i] = ptr[i+1][best_seq[i+1]]
# 到目前为止,best_seq存放了对应于x的词性序列
for i in range(len(best_seq)):
print(id2tag[best_seq[i]])
x = "Social Security number , passport number and details about the services provided for the payment"
viterbi(x , pi, A, B)
结果
NNP
NNP
NN
,
DT
NN
CC
NNS
IN
DT
NNS
VBN
IN
DT
NN