机器学习实战代码_Python3.6_朴素贝叶斯

from numpy import *
import feedparser

def load_data_set():
    posting_list = [['my', 'dog', 'has', 'flea', 'problem', 'help', 'please', ], \
                    ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], \
                    ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], \
                    ['stop', 'posting', 'stupid', 'worthless', 'garbage'], \
                    ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], \
                    ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    class_vec = [0, 1, 0, 1, 0, 1]
    return posting_list, class_vec

def create_vocab_list(data_set):
    vocab_set = set([])
    for document in data_set:
        vocab_set = vocab_set | set(document)
    return list(vocab_set)

def set_of_words_to_vec(vocab_list, input_set):
    return_vec = [0] * len(vocab_list)
    for word in input_set:
        if word in vocab_list:
            return_vec[vocab_list.index(word)] = 1
        else:
            print('the word : %s is not in my vocabulary!'%word)
    return return_vec


def trainNB0(train_matrix, train_category):
    num_train_docs = len(train_matrix)
    num_words = len(train_matrix[0])
    p_abusive =  sum(train_category)/float(num_train_docs)

    '''
    p0_num = zeros(num_words)
    p1_num = zeros(num_words)
    p0_denom = 0.0
    p1_denom = 0.0
    '''
    p0_num = ones(num_words)
    p1_num = ones(num_words)
    p0_denom = 2.0
    p1_denom = 2.0

    for i in range(num_train_docs):
        if train_category[i] == 1:
            p1_num += train_matrix[i]
            p1_denom += sum(train_matrix[i])
        else:
            p0_num += train_matrix[i]
            p0_denom += sum(train_matrix[i])
    '''
    p1_vect = p1_num/p1_denom
    p0_vect = p0_num/p0_denom
    '''
    p1_vect = log(p1_num/p1_denom)
    p0_vect = log(p0_num/p0_denom)

    return  p0_vect, p1_vect, p_abusive

def classifyNB(vec_to_classify, p0_vec, p1_vec, p_calss1):
    p1 = sum(vec_to_classify*p1_vec) + log(p_calss1)
    p0 = sum(vec_to_classify*p0_vec) + log(1.0-p_calss1)
    if p1 > p0:
        return 1
    else:
        return 0

def testingNB():
    list_of_posts, list_classes = load_data_set()
    my_vocab_list = create_vocab_list(list_of_posts)
    train_mat = []
    for posting_doc in list_of_posts:
        train_mat.append(set_of_words_to_vec(my_vocab_list, posting_doc))
    p0_v, p1_v, p_ab = trainNB0(array(train_mat), array(list_classes))
    test_entry = ['love', 'my', 'dalmation']
    this_doc = array(set_of_words_to_vec(my_vocab_list, test_entry))
    print(test_entry, 'calssified as : ', classifyNB(this_doc, p0_v, p1_v, p_ab))
    test_entry = ['stupid', 'garbage']
    this_doc = array(set_of_words_to_vec(my_vocab_list, test_entry))
    print(test_entry, 'classified as: ', classifyNB(this_doc, p0_v, p1_v, p_ab))

def bag_of_words_to_vec_MN(vocab_list, input_set):
    return_vec = [0] * len(vocab_list)
    for word in input_set:
        if word in vocab_list:
            return_vec[vocab_list.index(word)] += 1
    return return_vec

def text_parse(big_string):
    import re
    list_of_tokens = re.split(r'\W*', big_string)
    return [tok.lower() for tok in list_of_tokens if len(tok) > 2 ]

def spam_test():
    doc_list = []
    class_list = []
    full_text = []
    for i in range(1, 26):
        word_list = text_parse(open('email/spam/%d.txt'%i).read())
        doc_list.append(word_list)
        full_text.extend(word_list)
        class_list.append(1)
        word_list = text_parse(open('email/ham/%d.txt'%i).read())
        doc_list.append(word_list)
        full_text.extend(word_list)
        class_list.append(0)
    vocab_list = create_vocab_list(doc_list)
    training_set = list(range(50))
    test_set = []
    for i in range(10):
        rand_index = int(random.uniform(0, len(training_set)))
        test_set.append(training_set[rand_index])
        del(training_set[rand_index])
    train_mat = []
    train_classes = []
    for doc_index in training_set:
        train_mat.append(set_of_words_to_vec(vocab_list, doc_list[doc_index]))
        train_classes.append(class_list[doc_index])
    p0_v, p1_v, p_spam = trainNB0(array(train_mat), array(train_classes))
    error_count = 0
    for doc_index in test_set:
        word_vector = set_of_words_to_vec(vocab_list, doc_list[doc_index])
        if classifyNB(array(word_vector), p0_v, p1_v, p_spam) != class_list[doc_index]:
            error_count += 1

    print('the error rate is : ', float(error_count)/len(test_set))


def calc_most_freq(vocab_list, full_text):
    import operator
    freq_dict = {}
    for token in vocab_list:
        freq_dict[token] = full_text.count(token)
    sorted_freq = sorted(freq_dict.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sorted_freq[:30]

def local_words(feed1, feed0):

    doc_list = []
    class_list = []
    full_text = []
    min_len = min(len(feed1['entries']), len(feed0['entries']))
    for i in range(min_len):
        work_list = text_parse(feed1['entries'][1]['summary'])
        doc_list.append(work_list)
        full_text.extend(work_list)
        class_list.append(1)
        work_list = text_parse(feed0['entries'][1]['summary'])
        doc_list.append(work_list)
        full_text.extend(work_list)
        class_list.append(0)
    vocab_list = create_vocab_list(doc_list)
    top_30_words = calc_most_freq(vocab_list, full_text)
    for pair_w in top_30_words:
        if pair_w[0] in vocab_list:
            vocab_list.remove(pair_w[0])
    training_set = list(range(2*min_len))   #注意需要将range类型转为list,否则del操作会报错
    test_set = []
    for i in range(20):
        rand_index = int(random.uniform(0, len(training_set)))
        test_set.append(training_set[rand_index])
        del(training_set[rand_index])
    train_mat = []
    train_classes = []
    for doc_index in training_set:
        train_mat.append(bag_of_words_to_vec_MN(vocab_list, doc_list[doc_index]))
        train_classes.append(class_list[doc_index])
    p0_v, p1_v, p_spam = trainNB0(array(train_mat), array(train_classes))
    error_count = 0
    for doc_index in test_set:
        word_vector = bag_of_words_to_vec_MN(vocab_list, doc_list[doc_index])
        if classifyNB(array(word_vector), p0_v, p1_v, p_spam) != class_list[doc_index]:
            error_count += 1
    print('the error rate is : ', float(error_count)/len(test_set))
    return vocab_list, p0_v, p1_v


def get_top_words(ny, sf):
    import operator
    vocab_list, p0_v, p1_v = local_words(ny, sf)
    top_ny = []
    top_sf = []
    for i in range(len(p0_v)):
        if p0_v[i] > -6.0:
            top_sf.append((vocab_list[i], p0_v[i]))
        if p1_v[i] > -6.0:
            top_ny.append((vocab_list[i], p1_v[i]))
        sorted_sf = sorted(top_sf, key=lambda pair: pair[1], reverse=True)
        sorted_ny = sorted(top_ny, key=lambda pair: pair[1], reverse=True)
        print('SF'+'**SF'*10)
        for item in sorted_sf:
            print(item[0], end='\t')
        print('NY'+'**NY'*10)
        for item in sorted_ny:
            print(item[0], end='\t')
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值