机器学习实战笔记之朴素贝叶斯

bayes.py

from numpy import *

def loadDataSet():
    postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                   ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                   ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                   ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                   ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                   ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    classVec = [0, 1, 0, 1, 0, 1]  # 1 is abusive, 0 not
    return postingList, classVec

#去重
def createVocabList(dataSet):
    vocabSet = set([])  # create empty set
    for document in dataSet:
        vocabSet = vocabSet | set(document)  # union of the two sets
    return list(vocabSet)

def setOfWords2Vec(vocabList, inputSet):
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] = 1
        else:
            print
        "the word: %s is not in my Vocabulary!" % word
    return returnVec

def bagOfWords2VecMN(vocabList, inputSet):
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] += 1
    return returnVec

# def trainNB0(trainMatrix,trainCategory):
#     numTrainDocs = len(trainMatrix)
#     numWords = len(trainMatrix[0])
#     pAbusive = sum(trainCategory)/float(numTrainDocs)
#     p0Num = zeros(numWords); p1Num = zeros(numWords)      #change to ones()
#     #p0Denom = 2.0; p1Denom = 2.0                        #change to 2.0
#     p0Denom = 0.0;
#     p1Denom = 0.0
#     for i in range(numTrainDocs):
#         if trainCategory[i] == 1:
#             p1Num += trainMatrix[i]
#             p1Denom += sum(trainMatrix[i])
#         else:
#             p0Num += trainMatrix[i]
#             p0Denom += sum(trainMatrix[i])
#     p1Vect = (p1Num/p1Denom)          #change to log()
#     p0Vect = (p0Num/p0Denom)          #change to log()
#     return p0Vect,p1Vect,pAbusive
def trainNB0(trainMatrix,trainCategory):
    numTrainDocs = len(trainMatrix) #6
    numWords = len(trainMatrix[0]) #32
    pAbusive = sum(trainCategory)/float(numTrainDocs)
    p0Num = ones(numWords); p1Num = ones(numWords)      #change to ones()
    p0Denom = 2.0; p1Denom = 2.0                        #change to 2.0
    for i in range(numTrainDocs):
        if trainCategory[i] == 1:
            p1Num += trainMatrix[i]
            p1Denom += sum(trainMatrix[i])
        else:
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])
    p1Vect = log(p1Num/p1Denom)          #change to log()
    p0Vect = log(p0Num/p0Denom)          #change to log()
    return p0Vect,p1Vect,pAbusive


def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
    p1 = sum(vec2Classify * p1Vec) + log(pClass1)
    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
    if p1>p0:
        return 1
    else:
        return 0

def testingNB():
    listOPosts, listClasses = loadDataSet()
    myVocabList = createVocabList(listOPosts)
    #print(myVocabList)
    # print(setOfWords2Vec(myVocabList, listPost[0]))
    # print(setOfWords2Vec(myVocabList, listPost[3]))
    trainMat = []
    for postinDoc in listOPosts:
        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))

    p0V, p1V, pAb = trainNB0(array(trainMat), array(listClasses))
    print(p0V)
    print(p1V)
    print(pAb)
    testEntry = ['love', 'my', 'dalmation']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))

    testEntry=['stupid','garbage']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))

def textParse(bigString):
    import re
    listOfTokens = re.split(r'\W*', bigString)
    return [tok.lower() for tok in listOfTokens if len(tok) > 2]
def spamTest():
    docList=[];    classList=[];    fullText=[]
    for i in range(1,26):
        wordList = textParse(open('email/spam/%d.txt' %i).read())
        docList.append(wordList)
        fullText.append(wordList)
        classList.append(1)

        wordList = textParse(open('email/ham/%d.txt' %i).read())
        docList.append(wordList)
        fullText.append(wordList)
        classList.append(0)

    vocabList = createVocabList(docList)
    trainingSet = list(range(50)); testSet = []
    for i in range(10):
        randIndex = int(random.uniform(0, len(trainingSet)))
        testSet.append(trainingSet[randIndex]) #随机选择10个放入测试集,剩余的作为训练集
        del(trainingSet[randIndex])

    trainMat=[]; trainClasses=[]
    for docIndex in trainingSet:
        trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    p0V,p1V,pSpam = trainNB0(array(trainMat), array(trainClasses))
    errorCount = 0
    for docIndex in testSet:
        wordVector = setOfWords2Vec(vocabList, docList[docIndex])
        if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
            print('the error doc is: ', docList[docIndex])
    print('the error rate is: ', float(errorCount)/len(testSet))

#testingNB()
#spamTest()

def calcMostFreq(vocabList,fullText):
    import operator
    freqDict = {}
    for token in vocabList:
        freqDict[token]=fullText.count(token)
    sortedFreq = sorted(freqDict.items(), key=operator.itemgetter(1), reverse=True)
    return sortedFreq[:30]

def stopWords():
    import re
    wordList =  open('stopword.txt').read() # see http://www.ranks.nl/stopwords
    listOfTokens = re.split(r'\W*', wordList)
    return [tok.lower() for tok in listOfTokens]
    print ('read stop word from \'stopword.txt\':',listOfTokens)
    return listOfTokens

def localWords(feed1,feed0):
    import feedparser
    docList=[]; classList = []; fullText =[]
    print ('feed1 entries length: ', len(feed1['entries']), '\nfeed0 entries length: ', len(feed0['entries']))
    minLen = min(len(feed1['entries']),len(feed0['entries']))
    print ('\nmin Length: ', minLen)
    for i in range(minLen):
        wordList = textParse(feed1['entries'][i]['summary'])
        print ('\nfeed1\'s entries[',i,']\'s summary - ','parse text:\n',wordList)
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(1) #NY is class 1

        wordList = textParse(feed0['entries'][i]['summary'])
        print ('\nfeed0\'s entries[',i,']\'s summary - ','parse text:\n',wordList)
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)
    vocabList = createVocabList(docList)#create vocabulary
    print ('\nVocabList is ',vocabList)
    print ('\nRemove Stop Word:')
    stopWordList = stopWords()
    for stopWord in stopWordList:
        if stopWord in vocabList:
            vocabList.remove(stopWord)
            print ('Removed: ',stopWord)
    # top30Words = calcMostFreq(vocabList,fullText)   #remove top 30 words
    # print ('\nTop 30 words: ', top30Words)
    # for pairW in top30Words:
    #    if pairW[0] in vocabList:
    #        vocabList.remove(pairW[0])
    #        print ('\nRemoved: ',pairW[0])
    trainingSet = list(range(2*minLen)); testSet=[]           #create test set
    print ('\n\nBegin to create a test set: \ntrainingSet:',trainingSet,'\ntestSet',testSet)
    for i in range(5):
        randIndex = int(random.uniform(0,len(trainingSet)))
        testSet.append(trainingSet[randIndex])
        del(trainingSet[randIndex])
    print ('random select 5 sets as the testSet:\ntrainingSet:',trainingSet,'\ntestSet',testSet)
    trainMat=[]; trainClasses = []
    for docIndex in trainingSet:#train the classifier (get probs) trainNB0
        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    print ('\ntrainMat length:',len(trainMat))
    print ('\ntrainClasses',trainClasses)
    print ('\n\ntrainNB0:')
    p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
    #print '\np0V:',p0V,'\np1V',p1V,'\npSpam',pSpam
    errorCount = 0
    for docIndex in testSet:        #classify the remaining items
        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
        classifiedClass = classifyNB(array(wordVector),p0V,p1V,pSpam)
        originalClass = classList[docIndex]
        result =  classifiedClass != originalClass
        if result:
            errorCount += 1
        print ('\n',docList[docIndex],'\nis classified as: ',classifiedClass,', while the original class is: ',originalClass,'. --',not result)
    print ('\nthe error rate is: ',float(errorCount)/len(testSet))
    return vocabList,p0V,p1V

def testRSS():
    import feedparser
    ny=feedparser.parse('http://www.nasa.gov/rss/dyn/image_of_the_day.rss')
    sf=feedparser.parse('http://sports.yahoo.com/nba/teams/hou/rss.xml')
    vocabList,pSF,pNY = localWords(ny,sf)

def testTopWords():
    import feedparser
    ny=feedparser.parse('http://www.nasa.gov/rss/dyn/image_of_the_day.rss')
    sf=feedparser.parse('http://sports.yahoo.com/nba/teams/hou/rss.xml')
    getTopWords(ny,sf)

def getTopWords(ny,sf):
    import operator
    vocabList,p0V,p1V=localWords(ny,sf)
    topNY=[]; topSF=[]
    for i in range(len(p0V)):
        if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i]))
        if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i]))
    sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)
    print ("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**")
    for item in sortedSF:
        print (item[0])
    sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)
    print ("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**")
    for item in sortedNY:
        print (item[0])

def test42():
    print ('\n*** Load DataSet ***')
    listOPosts,listClasses = loadDataSet()
    print ('List of posts:\n', listOPosts)
    print ('List of Classes:\n', listClasses)

    print ('\n*** Create Vocab List ***')
    myVocabList = createVocabList(listOPosts)
    print ('Vocab List from posts:\n', myVocabList)

    print ('\n*** Vocab show in post Vector Matrix ***')
    trainMat=[]
    for postinDoc in listOPosts:
        trainMat.append(bagOfWords2Vec(myVocabList,postinDoc))
    print ('Train Matrix:\n', trainMat)

    print ('\n*** Train ***')
    p0V,p1V,pAb = trainNB0(trainMat,listClasses)
    print ('p0V:\n',p0V)
    print ('p1V:\n',p1V)
    print ('pAb:\n',pAb)

#testRSS()
testTopWords()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值