# 中文分词 类似 我/爱/北京/天安门 这样断句
# ------Start------
def load_dict(filename):
word_dict = set()
max_len = 1
file = open(filename, 'r', encoding='UTF-8')
for line in file:
word = line.strip()
word_dict.add(word)
if len(word) > max_len:
max_len = len(word)
# print(word_dict)
file.close()
return max_len, word_dict
# 正向匹配最大分词
def cut_sentence_by_word(filename, sentence):
max_len, word_dict = load_dict(filename)
begin = 0
words = []
# 判断句子是否是空
while begin < len(sentence):
# 不断减小末端的边界
for end in range(begin + max_len, begin, -1):
if sentence[begin:end] in word_dict:
words.append(sentence[begin:end])
break
# 将开始的坐标更改为已匹配的单词的末端,继续匹配下面的词组
begin = end
return words
print(cut_sentence_by_word("/dictionary.dic", "不到长城非好汉"))
# ------End------
效果如图所示: