遇到一个任务,将很多句子(每行一句,不是整段话)进行分词,然后统计里边每个词语的频数,最后生成txt文件,包括词语和出现的次数
使用了jieba库,代码如下:
import jieba
def seg_word(line ,temp ,counts):
seg = jieba.cut(line,strip())
for word in seg:
if word != '':
temp += word
temp += '\n'
counts[word] = counts.get(word,0) + 1
return counts
def output(inputfilename, outputfilename):
inputfile = open(inputfilename, encoding="UTF-8", mode-'r')
outputfile = open(outputfilename, encoding='UTF-8' mode='w')
temp = ""
counts = {}
for line in inputfile.readlines():
line_seg = seg_word(linetemp,counts)
line_seg = sorted(line_seg.items(),key=lambda x:x[1], reverse=True)
for i j in line_seg:
outputfile.write(i + ' '+ str(j) +'\n')
inputfile.close()
outputfile.close()
return outputfile
if __name__ == '__main__':
inputfilename = 'l.txt'
outputfilename ='2.txt'
output(inputfilename, outputfilename)