def seg_words(contents):
contents_segs = list()
for content in contents:
segs = jieba.lcut(content)
contents_segs.append(" ".join(segs))
return contents_segs
读数据拼接起始符:
#Read the data and append SENTENCE_START and SENTENCE_END tokens
print "Reading CSV file..."
with open('data/reddit-comments-2015-08.csv', 'rb') as f:
reader = csv.reader(f, skipinitialspace=True) #这两句读取csv文件,记得用rb
reader.next()
# Split full comments into sentences把所有评论分割成句子
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode('utf-8').lower()) for x in reader])#chain函数拼接多个数组生成迭代器,x[0]抽取字符串
# Append SENTENCE_START and SENTENCE_END拼接起始符,for in生成数组的用法
sentences = ["%s %s %s" % (sentence_start_token, x, sentence_end_token) for x in senten