python-垃圾邮箱分类-贝叶斯方法
#公式
贝叶斯定理是关于随机事件A和B的条件概率(或边缘概率)的一则定理。以邮件过滤为例:
其中:
Pr(S|W) 为邮件为垃圾邮件(spam)的概率,在已知词汇W的条件下。
Pr(S) 为垃圾邮件的概率
Pr(W|S) 为垃圾邮件中,词汇W的概率
#朴素贝叶斯的优点和缺点
优点:在数据较少的情况下仍然有效,可以处理多类别问题
缺点:对于输入数据的准备方式较为敏感;由于朴素贝叶斯的“朴素”特点,所以会带来一些准确率上的损失
我们使用的邮件数据集是 Ling-spam,并编写一个垃圾邮件的过滤器
使用拉普拉斯平滑解决零概率问题;
对乘积结果取自然对数避免下溢出问题,采用自然对数进行处理不会有任何损失。
代码如下:
import numpy as npimport reimport random
"""
函数说明:将切分的实验样本词条整理成不重复的词条列表,也就是词汇表
Pa
"""def createVocabList(dataSet):
vocabSet = set([]) for document in dataSet:
vocabSet = vocabSet | set(document) #
return list(vocabSet)
"""
函数说明:根据vocabList词汇表,将inputSet向量化,向量的每个元素为1或0
"""def setOfWords2Vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print("the word: %s is not in my Vocabulary!" % word)
return returnVec
"""
函数说明:根据vocabList词汇表,构建词袋模型
"""def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
"""
函数说明:朴素贝叶斯分类器训练函数
"""def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory) / float(numTrainDocs)
p0Num = np.ones(numWords)
p1Num = np.ones(numWords) p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = np.log(p1Num / p1Denom)
p0Vect = np.log(p0Num / p0Denom)
return p0Vect, p1Vect, pAbusive
"""
函数说明:朴素贝叶斯分类器分类函数
"""def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1=sum(vec2Classify*p1Vec)+np.log(pClass1)
p0=sum(vec2Classify*p0Vec)+np.log(1.0-pClass1)
if p1 > p0:
return 1
else:
return 0
"""
函数说明:接收一个大字符串并将其解析为字符串列表
"""def textParse(bigString):
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
"""
函数说明:测试朴素贝叶斯分类器,使用朴素贝叶斯进行交叉验证
"""def spamTest():
docList = []
classList = []
fullText = []
for i in range(1, 26):
wordList = textParse(open('D:/贝叶斯算法实战数据集/Naive_Bayes-master/email/spam/%d.txt' % i, 'r').read())
docList.append(wordList)
fullText.append(wordList)
classList.append(1)
wordList = textParse(open('D:/贝叶斯算法实战数据集/Naive_Bayes-master/email/ham/%d.txt' % i, 'r').read())
docList.append(wordList)
fullText.append(wordList)
classList.append(0)
vocabList = createVocabList(docList)
trainingSet = list(range(50))
testSet = []
for i in range(10):
randIndex = int(random.uniform(0, len(trainingSet)))
testSet.append(trainingSet[randIndex])
del (trainingSet[randIndex])
trainMat = []
trainClasses = []
for docIndex in trainingSet:
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses)) #
errorCount = 0
for docIndex in testSet:
wordVector = setOfWords2Vec(vocabList, docList[docIndex])
if classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
errorCount += 1
print("分类错误的测试集:", docList[docIndex])
print('错误率:%.2f%%' % (float(errorCount) / len(testSet) * 100))
if __name__ == '__main__':
spamTest()
作者:JerryLoveCoding