贝叶斯分类python代码调试_贝叶斯代码(python)

teVocabList(listOPosts)

trainMat = []

for postinDoc in listOPosts:

trainMat.append(setOfWords2Vec(myVocabList, postinDoc))

p0V, p1V, pAb = trainNB0(array(trainMat), array(listClasses))

testEntry = ['love', 'my', 'dalmation']

thisDoc = array(setOfWords2Vec(myVocabList, testEntry))

print testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb)

testEntry = ['stupid', 'garbage']

thisDoc = array(setOfWords2Vec(myVocabList, testEntry))

print testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb)

#文件解析及完整的垃圾邮件测试函数

def textParse(bigString):

import re

listOfTokens = re.split(r'\W*', bigString)

return [tok.lower() for tok in listOfTokens if len(tok) &gt 0]

def spamTest():

docList = []; classList = []; fullText = []

for i in range(1, 26):

wordList = textParse(open('email/spam/%d.txt' % i).read())

docList.append(wordList)

fullText.extend(wordList)

classList.append(1)

wordList = textParse(open('email/ham/%d.txt' % i).read())

docList.append(wordList)

fullText.extend(wordList)

classList.append(0)

vocabList = createVocabList(docList)

trainingSet = range(50); testSet = []

for i in range(10):

randIndex = int(random.uniform(0, len(trainingSet)))

testSet.append(trainingSet[randIndex])

del(trainingSet[randIndex])

trainMat = []; trainClasses = []

print testSet

for docIndex in trainingSet:

trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))

trainClasses.append(classList[docIndex])

p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))

errorCount = 0

for docIndex in testSet:

wordVector = setOfWords2Vec(vocabList, docList[docIndex])

if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:

errorCount += 1

print 'the error rate is: ', float(errorCount)/len(testSet)

#RSS源分类器及高频词去除数

def calcMostFreq(vocabList, fullText):

import operator

freqDict = {}

for token in vocabList:

freqDict[token] = fullText.count(token) #fullText.count(token):计算token在fullText中出现的次数

sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)

return sortedFreq[:30]

def localWords(feed1, feed0):

import feedparser

docList = []; classList = []; fullText = []

minLen = min(len(feed1['entries']), len(feed0['entries']))

for i in range(minLen):

wordList = textParse(feed1['entries'][i]['summary'])

docList.append(wordList)

fullText.extend(wordList)

classList.append(1)

wordList = textParse(feed0['entries'][i]['summary'])

docList.append(wordList)

fullText.extend(wordList)

classList.append(0)

vocabList = createVocabList(docList)

top30Words

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值