from numpy import *
"""
func: 加载数据
param:
None
return:
postingList: 进行词条切分后的文档集合
classVec: 类别标签的集合
"""
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1] #1 is abusive, 0 not
return postingList,classVec
"""
func: 创建一个包含在所有文档中出现的不重复词的列表
param:
None
return:
list(vocaSet): 文档中所有不重复词的列表
"""
def createVocabList(dataSet):
vocabSet = set([]) #create empty set
for document in dataSet:
vocabSet = vocabSet | set(document) #union of the two sets
return list(vocabSet)
"""
func: 词集模型;
将词汇表转为文档向量,每一个元素为1或0,分别表示词汇表中的单词在输入文档中是否出现。
param:
vocabList: 词汇表
inputSet:某个文档
return:
returnVec:文档向量, 第i个元素表示词汇表中第i个单词是否出现
"""
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else: print("the word: %s is not in my Vocabulary!" % word)
return returnVec
"""
func: 朴素贝叶斯词袋模型
param:
vocabList: 词汇表
inputSet:某个文档
return:
returnVec:文档向量, 第i个元素表示词汇表中第i个单词出现的次数
"""
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
"""
func: 朴素贝叶斯分类器训练函数
param:
trainMatrix: 文档矩阵,每个元素为0或1,表示是否出现在该文档上
trainCategory: 由每篇文档类别标签所构成的向量
return:
p0Vect: 向量,p(Wi|C0),即文档是非侮辱性的条件下,单词Wi出现的概率的对数
p1Vect: 向量, p(Wi|C1),即文档是侮辱性的条件下,单词Wi出现的概率的对数
pAbusive: 文档属于侮辱性文档的概率
"""
def trainNB0(trainMatrix,trainCategory):
# 训练文档的数量
numTrainDocs = len(trainMatrix)
# 词汇表的数量
numWords = len(trainMatrix[0])
# 侮辱性文档(class=1)占总训练文档的比,即其概率
pAbusive = sum(trainCategory)/float(numTrainDocs)
# 初始化概率,p1Num为向量,第i个元素表示词汇表中第i个单词在侮辱性(class=1)文档中出现的次数;
# p0Num为向量,第i个元素表示词汇表第i个单词在非侮辱性(class=0)文档中出现的次数;
p0Num = ones(numWords); p1Num = ones(numWords) #change to ones() , 防止某个单词概率为0导致p(c|w)为0
# p0Denom表示侮辱性(class=0)文档中单词的总数
# p1Denom表示侮辱性(class=1)文档中单词的总数
p0Denom = 2.0; p1Denom = 2.0 #change to 2.0, 防止某个单词概率为0导致p(c|w)为0
# 对每篇训练文档
for i in range(numTrainDocs):
# 对每个类别:
if trainCategory[i] == 1:
# 如果词条出现在文档中-> 增加该词条的计数值
p1Num += trainMatrix[i]
# 增加所有词条的计数值
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
# 对每个词条,将该词条的数目除以从此条数得到条件概率,即P(W|C)的值,C为类标签,w为某词条
p1Vect = log(p1Num/p1Denom) #change to log(), 防止下溢出,即太多很小的数相乘导致最后结果为0
p0Vect = log(p0Num/p0Denom) #change to log()
return p0Vect,p1Vect,pAbusive
"""
func: 朴素贝叶斯分类函数
param:
vec2Classify: 要分类的向量
p0Vec: 向量,p(Wi|C0),即文档是非侮辱性的条件下,单词Wi出现的概率的对数
p1Vec: 向量, p(Wi|C1),即文档是侮辱性的条件下,单词Wi出现的概率的对数
pClass1: 文档属于侮辱性文档的概率
return:
分类的类别:1或0
"""
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
# p(w|c1)p(c1),因为目标是判断p(c|w)的值的大小,所以可不求p(w),只求p(w|c)p(c)
p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult
# p(w|c0)p(c0)
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
"""
func: 对给定的词向量,判断并打印出其类别
param:
None
return:
None
"""
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
# p0V,p1V,pAb无需重复求解
testEntry = ['love', 'my', 'dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
"""
func: 切分文本
param:
bigString: 长字符串
return:
满足长度大于2的单词的列表,且将大写改为小写
"""
def textParse(bigString): #input is big string, #output is word list
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
"""
func: 完整的垃圾邮件测试函数,打印出错误率
param:
None
return:
None
"""
def spamTest():
docList=[]; classList = []; fullText =[]
# 导入并解析文本文件
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' % i).read())
# 注意append和extend的区别:
# a = [1,2,3] b=[4,5,6]
# a.append(b) [1,2,3,[4,5,6]]
# a.extend(b) [1,2,3,4,5,6]
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
# 创建词汇表
vocabList = createVocabList(docList)#create vocabulary
# 随机构建训练集, trainingSet/testSet存储训练/测试文档集的编号,
trainingSet = list(range(50)); testSet=[] #create test set
for i in range(10):
# random.uniform:从给定数据范围的均匀分布中抽取样本
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
# 对测试集分类
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print("classification error",docList[docIndex])
print('the error rate is: ',float(errorCount)/len(testSet))
#return vocabList,fullText
"""
func: 计算单词频数
param:
vocaList: 词汇表
fullTest: 所有文档中的所有单词列表,包含重复单词
return:
频数最好的30个单词
"""
def calcMostFreq(vocabList,fullText):
import operator
freqDict = {}
# 计算出现频率
for token in vocabList:
freqDict[token]=fullText.count(token)
sortedFreq = sorted(freqDict.items(), key=operator.itemgetter(1), reverse=True)
return sortedFreq[:30]
"""
func: RSS源分类器及高频词去除函数
param:
feed1:RSS源
feed0:RSS源
return:
vocabList: 词汇表
p0V:向量,p(Wi|C0),即文档是第一个地区的条件下,单词Wi出现的概率的对数
p1V:向量, p(Wi|C1),即文档是第二个地区的条件下,单词Wi出现的概率的对数
"""
def localWords(feed1,feed0):
import feedparser
docList=[]; classList = []; fullText =[]
minLen = min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
# 每次访问一次RSS源
wordList = textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #NY is class 1
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
# 去除出现次数最高的那些词
vocabList = createVocabList(docList)#create vocabulary
top30Words = calcMostFreq(vocabList,fullText) #remove top 30 words
for pairW in top30Words:
if pairW[0] in vocabList: vocabList.remove(pairW[0])
trainingSet = range(2*minLen); testSet=[] #create test set
for i in range(20):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print('the error rate is: ',float(errorCount)/len(testSet))
return vocabList,p0V,p1V
"""
func: 最具表征性的词汇显示函数,打印出分类错误率,及大于某个阈值的所有单词
param:
ny: RSS源
sy:RSS源
return:
None
"""
def getTopWords(ny,sf):
import operator
vocabList,p0V,p1V=localWords(ny,sf)
topNY=[]; topSF=[]
for i in range(len(p0V)):
if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i]))
if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i]))
sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)
print("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**")
for item in sortedSF:
print(item[0])
sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)
print("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**")
for item in sortedNY:
print(item[0])
机器学习实战---朴素贝叶斯
最新推荐文章于 2022-07-27 16:31:00 发布