input:
import jieba
#stopwords = {}.fromkeys([ line.rstrip() for line in open('stopword.txt') ])
stopwords = {}.fromkeys(['的', '附近'])
segs = jieba.cut('北京附近的租房', cut_all=False)
final = []
for seg in segs:
if seg not in stopwords:
final.append(seg)
final
out:
['北京', '租房']
----------------------------------------------- 分割线 -------------------------------------------------
imput:
segs = jieba.cut('北京附近的租房', cut_all=False)
for seg in segs:
print(seg)
segs
out:
北京
附近
的
租房
<generator object Tokenizer.cut at 0x0000017EDFE641C8>