# 构建词典库
word_dic = set([line.rstrip() for line in open('vocab.txt')])
# 生成所有候选集合
def generate_candidates(word):
"""
word: 给定的输入(错误的输入)
返回所有(valid)候选集合
"""
# 生成编辑距离为1的单词
# 1.insert 2. delete 3. replace
# appl: replace: bppl, cppl, aapl, abpl...
# insert: bappl, cappl, abppl, acppl....
# delete: ppl, apl, app
letters ="abcdefghijklmnopqrstuvwxyz"
splits = [(word[:i],word[i:]) for i in range(len(word)+1)]
#insert
inserts = [L+i+R for L,R in splits for i in letters]
#delete
deletes= [L+R[1:] for L,R in splits if R]
# replace
replaces = [L + i +R[1:] for L, R in splits if R for i in letters]
candidates = set(inserts+deletes+replaces)
# 过来掉不存在于词典库里面的单词
return [word for word in candidates if word in vocab]
def generate_edit_two(word):
"""
给定一个字符串,生成编辑距离不大于2的字符串
"""
return [e2 for e1 in generate_candidates(word) for e2 in generate_candidates(e1)]
# #下载nltk reuters
# import nltk
# nltk.download('reuters')
# nltk.download('punkt')
# 读取语料库(一些句子),为构建语言模型准备
from nltk.corpus import reuters
categories = reuters.categories()
corpus =reuters.sents(categories=categories)
# 构建语言模型: Bigram
term_count = {}
bigram_count = {}
for doc in corpus:
doc = ['<s>'] + doc
for i in range(0, len(doc) - 1):
# bigram: [i,i+1]
term = doc[i]
bigram = doc[i:i + 2]
if term in term_count:
term_count[term] += 1
else:
term_count[term] = 1
bigram = ' '.join(bigram)
if bigram in bigram_count:
bigram_count[bigram] += 1
else:
bigram_count[bigram] = 1
# 用户通常输入错的概率 - channel probability
channel_prob={}
with open("spell-errors.txt", 'r', encoding='utf8') as f:
for line in f:
temp=line.split(":")
correct=temp[0].strip()
mistakes=[sub_mis.strip() for sub_mis in temp[1].strip().split(",")]
channel_prob[correct]={}
for mis in mistakes:
channel_prob[correct][mis]=1.0/len(mistakes)
V=len(term_count)
with open("testdata.txt", 'r', encoding='utf8') as f:
for line in f:
items = line.rstrip().split('\t')
line = re.sub('\.', '', items[2])
line= re.sub(',', '', line).split()
# line = ["I", "like", "playing"]
# print(line)
for word in line:
word=word.strip('.')
word=word.strip(',')
if word not in word_dic:
# print("word:",word)
# 需要替换word成正确的单词
# Step1: 生成所有的(valid)候选集合
candidates_one = generate_candidates(word)
candidates= [word for word in candidates_one if word in word_dic]
# 一种方式: if candidate = [], 多生成几个candidates, 比如生成编辑距离不大于2的
# TODO : 根据条件生成更多的候选集合
if len(candidates) < 1:
candidates_two = generate_edit_two(word)
candidates = [word for word in candidates_two if word in word_dic]
if len(candidates)<1:
continue
probs = []
# print("candidates:", candidates)
# 对于每一个candidate, 计算它的score
# score = p(correct)*p(mistake|correct)
# = log p(correct) + log p(mistake|correct)
# 返回score最大的candidate
for candi in candidates:
prob = 0
# a. 计算channel probability
if candi in channel_prob and word in channel_prob[candi]:
prob += np.log(channel_prob[candi][word])
else:
prob += np.log(0.00001)
# b. 计算语言模型的概率
sentence= re.sub('\.', '', items[2])
idx = re.sub(',', '', sentence).split().index(word)
bigram_1 = ' '.join([items[2].split()[idx-1],candi])
if bigram_1 in bigram_count and items[2].split()[idx-1] in term_count:
prob += np.log((bigram_count[bigram_1] + 1.0) / (
term_count[items[2].split()[idx-1]] + V))
else:
prob += np.log(1.0 / V)
# TODO: 也要考虑当前 [word, post_word]
# prob += np.log(bigram概率)
if idx + 1 < len(items[2].split()):
bigram_2 = ' '.join([candi,items[2].split()[idx + 1]])
if bigram_2 in bigram_count and candi in term_count:
prob += np.log((bigram_count[bigram_2] + 1.0) / (
term_count[candi] + V))
else:
prob += np.log(1.0 / V)
probs.append(prob)
max_idx = probs.index(max(probs))
print(word, candidates[max_idx])
【自然语言处理】拼写纠错
最新推荐文章于 2021-03-24 21:41:33 发布