python贝叶斯算法的论文_机器学习 Python实现 贝叶斯算法

# -*- coding: cp936 -*-

from numpy import *

#过滤网站的恶意留言

# 创建一个实验样本

def loadDataSet():

postingList = [['my','dog','has','flea','problems','help','please'],

['maybe','not','take','him','to','dog','park','stupid'],

['my','dalmation','is','so','cute','I','love','him'],

['stop','posting','stupid','worthless','garbage'],

['mr','licks','ate','my','steak','how','to','stop','him'],

['quit','buying','worthless','dog','food','stupid']]

classVec = [0,1,0,1,0,1]

return postingList, classVec

# 创建一个包含在所有文档中出现的不重复词的列表

def createVocabList(dataSet):

vocabSet = set([]) #创建一个空集

for document in dataSet:

vocabSet = vocabSet | set(document) #创建两个集合的并集

return list(vocabSet)

#将文档词条转换成词向量

def setOfWords2Vec(vocabList, inputSet):

returnVec = [0]*len(vocabList) #创建一个其中所含元素都为0的向量

for word in inputSet:

if word in vocabList:

#returnVec[vocabList.index(word)] = 1 #index函数在字符串里找到字符第一次出现的位置 词集模型

returnVec[vocabList.index(word)] += 1 #文档的词袋模型 每个单词可以出现多次

else: print "the word: %s is not in my Vocabulary!" % word

return returnVec

#朴素贝叶斯分类器训练函数 从词向量计算概率

def trainNB0(trainMatrix, trainCategory):

numTrainDocs = len(trainMatrix) #文本数量

numWords = len(trainMatrix[0]) #文本中词汇数量也是词库的大小

pAbusive = sum(trainCategory)/float(numTrainDocs) #P(wi)

# p0Num = zeros(numWords); p1Num = zeros(numWords)

#p0Denom = 0.0; p1Denom = 0.0

p0Num = ones(numWords); p1Num = ones(numWords)#避免一个概率值为0,最后的乘积也为0

p0Denom = 2.0; p1Denom = 2.0

for i in range(numTrainDocs):

if trainCategory[i] == 1:

p1Num += trainMatrix[i] #按行进行相加,每个词汇自己计算总数

p1Denom += sum(trainMatrix[i]) #所有词汇一共出现的次数

else:

p0Num += trainMatrix[i]

p0Denom += sum(trainMatrix[i])

#p1Vect = p1Num / p1Denom

#p0Vect = p0Num / p0Denom #计算获得p(wi|c1),p(wi|c0)

#print (p1Num)

#print (p1Denom)

p1Vect = log(p1Num / p1Denom)

p0Vect = log(p0Num / p0Denom) #避免下溢出或者浮点数舍入导致的错误 下溢出是由太多很小的数相乘得到的

return p0Vect, p1Vect, pAbusive

#朴素贝叶斯分类器

def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1): #vec2Classify表示需要分类的向量

p1 = sum(vec2Classify*p1Vec) + log(pClass1)

p0 = sum(vec2Classify*p0Vec) + log(1.0-pClass1)

if p1 > p0:

return 1

else: return 0

def test():

listPosts,listClasses = loadDataSet()

myVocablist = createVocabList(listPosts)

set1 = setOfWords2Vec(myVocablist,listPosts[0])

#print(set1)

set2 = setOfWords2Vec(myVocablist,listPosts[1])

#print(set2)

trainMat = []

for postinDoc in listPosts:

trainMat.append(setOfWords2Vec(myVocablist,postinDoc))

#print(trainMat)

#print(listClasses)

p0V,p1V,pAb = trainNB0(trainMat,listClasses)

print(pAb)

print(p1V)

testEntry = ['love','my','dalmation']

thisDoc = array(setOfWords2Vec(myVocablist, testEntry))

print testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb)

testEntry = ['stupid','garbage']

thisDoc = array(setOfWords2Vec(myVocablist, testEntry))

print testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb)

#垃圾邮件过滤的例子:

def textParse(bigString): #正则表达式进行文本分割

import re

listOfTokens = re.split(r'\W*',bigString)

return [tok.lower() for tok in listOfTokens if len(tok) > 2]

def spamTest():

docList = []; classList = []; fullText = []

for i in range(1,26): #导入并解析文本文件

wordList = textParse(open('E:/python project/Bayes/email/spam/%d.txt' % i).read())

docList.append(wordList)

fullText.extend(wordList)

classList.append(1)

wordList = textParse(open('E:/python project/Bayes/email/ham/%d.txt' % i).read())

docList.append(wordList)

fullText.extend(wordList)

classList.append(0)

vocabList = createVocabList(docList)

trainingSet = range(50);testSet = []

for i in range(10): #随机构建10个测试集

randIndex = int(random.uniform(0,len(trainingSet)))

testSet.append(trainingSet[randIndex])

del(trainingSet[randIndex])

trainMat = []; trainClasses = []

for docIndex in trainingSet:

trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))

trainClasses.append(classList[docIndex])

p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))

errorCount = 0

for docIndex in testSet: #对测试集进行分类

wordVector = setOfWords2Vec(vocabList, docList[docIndex])

if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:

errorCount += 1

print 'the error rate is: ', float(errorCount)/len(testSet)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值