from numpy import *
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1]
return postingList,classVec
def createVocabList(dataSet):
vocabSet = set()
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList,inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print "the word: %s is not in my Vocabulary!" % word
return returnVec
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory)/float(numTrainDocs)
# p0Num = zeros(numWords)
# p1Num = zeros(numWords)
# p0Denom = 0.0
# p1Denom = 0.0
p0Num = ones(numWords)
p1Num = ones(numWords)
p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = log(p1Num/p1Denom)
# print sum(p1Num),p1Denom
p0Vect = log(p0Num/p0Denom)
return p0Vect,p1Vect,pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec,pClass1):
p1 = sum(vec2Classify*p1Vec) + log(pClass1) #after the log operation,the multiply change to add operation
p0 = sum(vec2Classify*p0Vec) + log(1-pClass1)
return 1 if p1>p0 else 0
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0v,p1v,pab = trainNB0(trainMat, listClasses)
testEntity = ['love','my','dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntity))
print testEntity, 'classifiied as:',classifyNB(thisDoc, p0v, p1v, pab)
testEntity = ['stupid','garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntity))
print testEntity, 'classifiied as:',classifyNB(thisDoc, p0v, p1v, pab)
def bagOfWord2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
def textParse(bigString):
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok)>2]
def spamTest():
docList=[];classList=[];fullText=[]
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)
trainingSet = range(50);testSet = []
for i in range(10):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat = [];trainClasses = []
for docIndex in trainingSet:
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0v,p1v,pSpam = trainNB0(trainMat, trainClasses)
errorCount = 0
for docIndex in testSet:
wordVector = setOfWords2Vec(vocabList, docList[docIndex])
if classifyNB(wordVector, p0v, p1v, pSpam)!=classList[docIndex]:
errorCount += 1
print 'the error rate is: ',float(errorCount)/len(testSet)
def calcMostFreq(vocabList, fullText):
pass
if __name__ == '__main__':
spamTest()
import feedparser
ny = feedparser.parse('http://newyork.craigslist.org/stp/index.rss')
print len(ny['entries'])
机器学习实战之朴素贝叶斯
最新推荐文章于 2021-05-28 14:30:03 发布