注意:
安装jieba用此命令:easy_install jieba。pip install jieba有时不能正确安装。
中文情况下analyzer='word'参数需要带上,vocabulary=cv.vocabulary_这样的参数是为了使测试集和训练集特征数对齐
代码:
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
import jieba
import jieba.posseg as pseg
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
corpus=["我爱河南。",#第一类文本切词后的结果,词之间以空格隔开
"你恨河南。",
"他总是爱河南。",
"我有时候恨河南。"]
tokenized_corpus = []
for text in corpu