《机器学习实战》-chapter4-朴素贝叶斯

这一章主要是介绍了朴素贝叶斯的算法思想以及如何构造一个简单的朴素贝叶斯网络用来过滤掉垃圾邮件。

首先在朴素贝叶斯中有两个非常重要的假设:

  1. 假设所有的特征都是独立的
  2. 假设所有的特征都是同等重要的

具体的细节大家可以在书上找到,下面说一下这一章需要改的地方(由于Python版本不同而导致的错误,我是在python3.6下进行实验的):

  • emai/ham/23.txt需要把第二行第一个单词后面的问号删掉,否则会报错:UnicodeDecodeError: 'gbk' codec can't decode byte 0xae in position 199: illegal multibyte sequence
  • 把spamTest()函数中的 trainingSet = range(50) 改为 trainingSet = list(range(50)),否则报错:'range' object doesn't support item deletion。由于python3.x   range返回的是range对象,不返回数组对象
  • 在最后的从个人广告中获取区域倾向中书上给的网址已经不能访问了,所以我用了另外两个rss源:
  • http://www.nasa.gov/rss/dyn/image_of_the_day.rss
  • http://sports.yahoo.com/nba/teams/hou/rss.xml

下面是具体的代码以及注释(任何有问题的地方都可以留言哟!):

# -*- coding: utf-8 -*-

from  numpy import *

def loadDataSet():
    postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                 ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                 ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                 ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                 ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                 ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    #1是侮辱性言论
    classVec = [0,1,0,1,0,1] 
    return postingList, classVec

#创建了一个文档中的不重复词列表
def createVocabList(dataSet):
    vocabSet = set([])
    for document in dataSet:
        vocabSet = vocabSet | set(document)
    return list(vocabSet)

#将输入的词条转化为词汇表对应的0,1数字来表示其是否存在。词集模型
def setOfWord2Vec(vocabList,inputSet):
    returnVec = [0]*len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] = 1
        else:
            print("the word %s is not in my vocalbulary!" % word)
    return returnVec

#词袋模型
def bagOfWord2Vec(vocabList,inputSet):
    returnVec = [0]*len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] += 1
    return returnVec

#训练算法其实就是计算条件概率
def trainNB(trainMatrix,trainCategory):
    numTrainDocs = len(trainMatrix)
    numWords = len(trainMatrix[0])
    pAbusive = sum(trainCategory)/float(numTrainDocs)
    #在计算时防止概率值为0带来的影响,所以初始化成0.5
    p0Num = ones(numWords)
    p1Num = ones(numWords)
    p0Denom = 2.0; p1Denom = 2.0
    for i in range(numTrainDocs):
        #如果是侮辱性词条就把该词条对应的出现次数加一,整体的词条出现次数也相加
        if trainCategory[i] == 1:
            p1Num += trainMatrix[i]
            p1Denom += sum(trainMatrix[i])
        else:
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])
    p1Vect = log(p1Num/p1Denom)
    p0Vect = log(p0Num/p0Denom)
    return p0Vect,p1Vect,pAbusive

#书上原本没有,是为了方便不想打很多遍就写成了个函数    
def test(myvocablist,listoposts):
    train=[]
    for postinDoc in listoposts:
        train.append(setOfWord2Vec(myvocablist,postinDoc))
    return train

def classifyNB(vec2classify,p0Vec,p1Vec,pClass1):
    #为了防止下溢出因此将取值取了对数,
    #所以这里本来是p(w|c)*p(c),p(w|c)=乘积(p(wi|ci))就变成了取和
    p1 = sum(vec2classify * p1Vec) + log(pClass1)
    p0 = sum(vec2classify * p0Vec) + log(1-pClass1)
    if p1>p0:
        return 1
    else:
        return 0
def testingNB():
    #创建数据集
    listOPosts,listClasses = loadDataSet()
    #对数据集去重,形成字典
    myVocabList = createVocabList(listOPosts)
    #把数据集中的每个语句转化为向量
    trainMat=test(myVocabList,listOPosts)
    #计算每个词语的条件概率
    pov,p1v,pab=trainNB(trainMat,listClasses)
    #第一个测试语句
    testEntry = ['love','my','dalmation']
    #将该语句转化为向量
    thisDoc = array(setOfWord2Vec(myVocabList,testEntry))
    #查看分类情况
    print(testEntry,"classified as: ",classifyNB(thisDoc,pov,p1v,pab))
    #第二个测试语句
    testEntry = ['stupid','garbage']
    #将该语句转化为向量
    thisDoc = array(setOfWord2Vec(myVocabList,testEntry))
    #查看分类情况
    print(testEntry,"classified as: ",classifyNB(thisDoc,pov,p1v,pab))

#将文本文件转化为列表
def textParse(bigstring):
    import re
    regex = re.compile('\\W*')
    listoftokens = regex.split(bigstring)
    return [tok.lower() for tok in listoftokens if len(tok) > 2]

def spamTest():
    #词条也就是里面存的是一句话的单词
    doclist = []
    classlist = []
    fulltext = []
    
    #只读取25个词条作为数据,也是分了两个类
    for i in range(1,26):
        wordList = textParse(open('./email/spam/%d.txt' % i).read())
        doclist.append(wordList)
        fulltext.append(wordList)
        classlist.append(1)
        wordList = textParse(open('./email/ham/%d.txt' % i).read())
        doclist.append(wordList)
        fulltext.append(wordList)
        classlist.append(0)
    vocabulist = createVocabList(doclist)
    trainingset = list(range(50))
    testset = []
    
    #随机生成10个数据作为测试集,剩下的作为训练集
    #这种生成训练集和测试集的方法叫留存交叉验证
    for i in range(10):
        randIndex = int(random.uniform(0,len(trainingset)))
        testset.append(trainingset[randIndex])
        del(trainingset[randIndex])
    
    #这里的训练严格来说和我们平时所接触的深度学习的训练不太一样,
    #说白了这里的训练就是在统计词条出现的频率并将该频率当作概率
    trainmat=[];trainclass=[]
    for docindex in trainingset:
        trainmat.append(setOfWord2Vec(vocabulist,doclist[docindex]))
        trainclass.append(classlist[docindex])
    p0v,p1v,pab = trainNB(array(trainmat),array(trainclass))
    
    errorcount = 0
    for docindex in testset:
        wordvector = setOfWord2Vec(vocabulist,doclist[docindex])
        if classifyNB(array(wordvector),p0v,p1v,pab) != classlist[docindex]:
            errorcount += 1
    print("the error rate is: ",float(errorcount)/len(testset))

def clacMostFreq(vocabList,fullText):
    import operator
    freDict = {}
    for token in vocabList:
        freDict[token] = fullText.count(token)
    #把fredict中的值按第一个关键字进行排序,默认false升序
    sortedFreq = sorted(freDict.items(),key = operator.itemgetter(1),reverse=True)
    return sortedFreq[:30]

def localWords(feed1,feed0):
    import feedparser
    #doclist里面存的是单独的每一条语句
    doclist=[];classlist=[];fulltext=[]
    minlen = min(len(feed1['entries']),len(feed0['entries']))
    for i in range(minlen):
        wordlist = textParse(feed1['entries'][i]['summary'])
        doclist.append(wordlist)
        fulltext.extend(wordlist)
        classlist.append(1)
        
        wordlist = textParse(feed0['entries'][i]['summary'])
        doclist.append(wordlist)
        fulltext.extend(wordlist)
        classlist.append(0)
    vocablist = createVocabList(doclist)
    #排除了30个高频词,由于许多高频词没有实际含义但是会影响分类结果
    top30words = clacMostFreq(vocablist,fulltext)

    for pair in top30words:
        if pair[0] in vocablist:
            vocablist.remove(pair[0])
            
    trainingset = list(range(1*minlen))
    testset=[]
    
    #留存法
    for i in range(10):
        #随机产生一个实数在[0,len(trainingset))
        randindex = int(random.uniform(0,len(trainingset)))
        testset.append(trainingset[randindex])
        del(trainingset[randindex])
    
    trainmat=[];trainclasses=[]
    for docindex in trainingset:
        trainmat.append(bagOfWord2Vec(vocablist,doclist[docindex]))
        trainclasses.append(classlist[docindex])
    p0v,p1v,pab = trainNB(array(trainmat),array(trainclasses))
    errorcount = 0
    for docindex in testset:
        wordvector = bagOfWord2Vec(vocablist,doclist[docindex])
        if classifyNB(array(wordvector),p0v,p1v,pab) != classlist[docindex]:
            errorcount += 1
    print("the error rate is: ",float(errorcount)/len(testset))
    return vocablist,p0v,p1v

def gettopwords(ny,sf):
    import operator
    vocablist,p0v,p1v=localWords(ny,sf)
    topNY=[];topSF=[]
    for i in range(len(p0v)):
        if p0v[i]>-6.0:
            topSF.append((vocablist,p0v[i]))
        if p1v[i]>-6.0:
            topSF.append((vocablist,p1v[i]))
    sortedsf = sorted(topSF,key=lambda pair: pair[1],reverse=True)
    print("---------------SF-------------")
    for item in sortedsf[:30]:
        print(item[0])
    sortedny = sorted(topNY,key=lambda pair: pair[1],reverse=True)
    print("---------------NY-------------")
    for item in sortedny[:30]:
        print(item[0])
 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值