# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import nltk.tokenize as tk
#需要分词的文本
doc = "Are you ok? \
I'm fun,and you? \
I'm ok."
#文本分句
tokens = tk.sent_tokenize(doc
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import nltk.tokenize as tk
#需要分词的文本
doc = "Are you ok? \
I'm fun,and you? \
I'm ok."
#文本分句
tokens = tk.sent_tokenize(doc