使用神经网络-垃圾邮件检测-LSTM或者CNN(一维卷积)效果都不错【代码有问题,pass】...

 

from sklearn.feature_extraction.text import CountVectorizer
import os
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm
from sklearn.feature_extraction.text import TfidfTransformer
import tensorflow as tf
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_1d, global_max_pool
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression
from tflearn.data_utils import to_categorical, pad_sequences
from sklearn.neural_network import MLPClassifier
from tflearn.layers.normalization import local_response_normalization
from tensorflow.contrib import learn


max_features=500
max_document_length=1024



def load_one_file(filename):
    x=""
    with open(filename) as f:
        for line in f:
            line=line.strip('\n')
            line = line.strip('\r')
            x+=line
    return x

def load_files_from_dir(rootdir):
    x=[]
    list = os.listdir(rootdir)
    for i in range(0, len(list)):
        path = os.path.join(rootdir, list[i])
        if os.path.isfile(path):
            v=load_one_file(path)
            x.append(v)
    return x

def load_all_files():
    ham=[]
    spam=[]
    for i in range(1,5):
        path="../data/mail/enron%d/ham/" % i
        print "Load %s" % path
        ham+=load_files_from_dir(path)
        path="../data/mail/enron%d/spam/" % i
        print "Load %s" % path
        spam+=load_files_from_dir(path)
    return ham,spam

def get_features_by_wordbag():
    ham, spam=load_all_files()
    x=ham+spam
    y=[0]*len(ham)+[1]*len(spam)
    vectorizer = CountVectorizer(
                                 decode_error='ignore',
                                 strip_accents='ascii',
                                 max_features=max_features,
                                 stop_words='english',
                                 max_df=1.0,
                                 min_df=1 )
    print vectorizer
    x=vectorizer.fit_transform(x)
    x=x.toarray()
    return x,y

def show_diffrent_max_features():
    global max_features
    a=[]
    b=[]
    for i in range(1000,20000,2000):
        max_features=i
        print "max_features=%d" % i
        x, y = get_features_by_wordbag()
        x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=0)
        gnb = GaussianNB()
        gnb.fit(x_train, y_train)
        y_pred = gnb.predict(x_test)
        score=metrics.accuracy_score(y_test, y_pred)
        a.append(max_features)
        b.append(score)
        plt.plot(a, b, 'r')
    plt.xlabel("max_features")
    plt.ylabel("metrics.accuracy_score")
    plt.title("metrics.accuracy_score VS max_features")
    plt.legend()
    plt.show()

def do_nb_wordbag(x_train, x_test, y_train, y_test):
    print "NB and wordbag"
    gnb = GaussianNB()
    gnb.fit(x_train,y_train)
    y_pred=gnb.predict(x_test)
    print metrics.accuracy_score(y_test, y_pred)
    print metrics.confusion_matrix(y_test, y_pred)

def do_svm_wordbag(x_train, x_test, y_train, y_test):
    print "SVM and wordbag"
    clf = svm.SVC()
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    print metrics.accuracy_score(y_test, y_pred)
    print metrics.confusion_matrix(y_test, y_pred)

def get_features_by_wordbag_tfidf():
    ham, spam=load_all_files()
    x=ham+spam
    y=[0]*len(ham)+[1]*len(spam)
    vectorizer = CountVectorizer(binary=True,
                                 decode_error='ignore',
                                 strip_accents='ascii',
                                 max_features=max_features,
                                 stop_words='english',
                                 max_df=1.0,
                                 min_df=1 )
    print vectorizer
    x=vectorizer.fit_transform(x)
    x=x.toarray()
    transformer = TfidfTransformer(smooth_idf=False)
    print transformer
    tfidf = transformer.fit_transform(x)
    x = tfidf.toarray()
    return  x,y


def do_cnn_wordbag(trainX, testX, trainY, testY):
    global max_document_length
    print "CNN and tf"

    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True, batch_size=100,run_id="spam")

def do_rnn_wordbag(trainX, testX, trainY, testY):
    global max_document_length
    print "RNN and wordbag"

    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data([None, max_document_length])
    net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
              batch_size=10,run_id="spm-run",n_epoch=5)


def do_dnn_wordbag(x_train, x_test, y_train, y_testY):
    print "DNN and wordbag"

    # Building deep neural network
    clf = MLPClassifier(solver='lbfgs',
                        alpha=1e-5,
                        hidden_layer_sizes = (5, 2),
                        random_state = 1)
    print  clf
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    print metrics.accuracy_score(y_test, y_pred)
    print metrics.confusion_matrix(y_test, y_pred)



def  get_features_by_tf():
    global  max_document_length
    x=[]
    y=[]
    ham, spam=load_all_files()
    x=ham+spam
    y=[0]*len(ham)+[1]*len(spam)
    vp=tflearn.data_utils.VocabularyProcessor(max_document_length=max_document_length,
                                              min_frequency=0,
                                              vocabulary=None,
                                              tokenizer_fn=None)
    x=vp.fit_transform(x, unused_y=None)
    x=np.array(list(x))
    return x,y



if __name__ == "__main__":
    print "Hello spam-mail"
    #print "get_features_by_wordbag"
    #x,y=get_features_by_wordbag()
    #x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.4, random_state = 0)

    #print "get_features_by_wordbag_tfidf"
    #x,y=get_features_by_wordbag_tfidf()
    #x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.4, random_state = 0)
    #NB
    #do_nb_wordbag(x_train, x_test, y_train, y_test)
    #show_diffrent_max_features()

    #SVM
    #do_svm_wordbag(x_train, x_test, y_train, y_test)

    #DNN
    #do_dnn_wordbag(x_train, x_test, y_train, y_test)

    print "get_features_by_tf"
    x,y=get_features_by_wordbag()
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.4, random_state = 0)
    #CNN
    do_cnn_wordbag(x_train, x_test, y_train, y_test)


    #RNN
    #do_rnn_wordbag(x_train, x_test, y_train, y_test)

自己写检测算法的时候也记得多个算法比较下

转载于:https://www.cnblogs.com/bonelee/p/7908573.html

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
卷积神经网络(Convolutional Neural Network, CNN)是一种专门针对图像、视频等结构化数据设计的深度学习模型,它在计算机视觉、语音识别、自然语言处理等多个领域都有广泛应用。CNN的核心设计理念源于对生物视觉系统的模拟,尤其是大脑皮层中视觉信息处理的方式,其主要特点包括局部感知、权重共享、多层级抽象以及空间不变性。以下是CNN技术的详细介绍: ### **1. 局部感知与卷积操作** **卷积层**是CNN的基本构建块,它通过使用一组可学习的滤波器(或称为卷积核)对输入图像进行扫描。每个滤波器在图像上滑动(卷积),并以局部区域(感受野)内的像素值与滤波器权重进行逐元素乘法后求和,生成一个输出值。这一过程强调了局部特征的重要性,因为每个滤波器仅对一小部分相邻像素进行响应,从而能够捕获图像中的边缘、纹理、颜色分布等局部特征。 ### **2. 权重共享** 在CNN中,同一滤波器在整个输入图像上保持相同的权重(参数)。这意味着,无论滤波器在图像的哪个位置应用,它都使用相同的参数集来提取特征。这种权重共享显著减少了模型所需的参数数量,增强了模型的泛化能力,并且体现了对图像平移不变性的内在假设,即相同的特征(如特定形状或纹理)不论出现在图像的哪个位置,都应由相同的滤波器识别。 ### **3. 池化操作** **池化层**通常紧随卷积层之后,用于进一步降低数据维度并引入一定的空间不变性。常见的池化方法有最大池化和平均池化,它们分别取局部区域的最大值或平均值作为输出。池化操作可以减少模型对微小位置变化的敏感度,同时保留重要的全局或局部特征。 ### **4. 多层级抽象** CNN通常包含多个卷积和池化层堆叠在一起,形成深度网络结构。随着网络深度的增加,每一层逐渐提取更复杂、更抽象的特征。底层可能识别边缘、角点等低级特征,中间层识别纹理、部件等中级特征,而高层可能识别整个对象或场景等高级语义特征。这种层级结构使得CNN能够从原始像素数据中自动学习到丰富的表示,无需人工设计复杂的特征。 ### **5. 激活函数与正则化** CNN中通常使用非线性激活函数(如ReLU、sigmoid、tanh等)来引入非线性表达能力,使得网络能够学习复杂的决策边界。为了防止过拟合,CNN常采用正则化技术,如L2正则化(权重衰减)来约束模型复杂度,以及Dropout技术,在训练过程中随机丢弃一部分神经元的输出,以增强模型的泛化性能。 ### **6. 应用场景** CNN在诸多领域展现出强大的应用价值,包括但不限于: - **图像分类**:如识别图像中的物体类别(猫、狗、车等)。 - **目标检测**:在图像中定位并标注出特定对象的位置及类别。 - **语义分割**:对图像中的每个像素进行分类,确定其所属的对象或背景类别。 - **人脸识别**:识别或验证个体身份。 - **图像生成**:通过如生成对抗网络(GANs)等技术创建新的、逼真的图像。 - **医学影像分析**:如肿瘤检测、疾病诊断等。 - **自然语言处理**:如文本分类、情感分析、词性标注等,尽管这些任务通常结合其他类型的网络结构(如循环神经网络)。 ### **7. 发展与演变** CNN的概念起源于20世纪80年代,但其影响力在硬件加速(如GPU)和大规模数据集(如ImageNet)出现后才真正显现。经典模型如LeNet-5用于手写数字识别,而AlexNet、VGG、GoogLeNet、ResNet等现代架构在图像识别竞赛中取得突破性成果,推动了CNN技术的快速发展。如今,CNN已经成为深度学习图像处理领域的基石,并持续创新,如引入注意力机制、残差学习、深度可分离卷积等先进思想。 综上所述,卷积神经网络通过其独特的局部感知、权重共享、多层级抽象等特性,高效地从图像数据中提取特征并进行学习,已成为解决图像和视频处理任务不可或缺的工具,并在众多实际应用中取得了卓越的效果
1. ARIMA 2. SARIMA 3. VAR 4. Auto-ARIMA 5. Auto-SARIMA 6. LSTM 7. GRU 8. RNN 9. CNN 10. MLP 11. DNN 12. MLP-LSTM 13. MLP-GRU 14. MLP-RNN 15. MLP-CNN 16. LSTM-ARIMA 17. LSTM-MLP 18. LSTM-CNN 19. GRU-ARIMA 20. GRU-MLP 21. GRU-CNN 22. RNN-ARIMA 23. RNN-MLP 24. RNN-CNN 25. CNN-ARIMA 26. CNN-MLP 27. CNN-LSTM 28. CNN-GRU 29. ARIMA-SVM 30. SARIMA-SVM 31. VAR-SVM 32. Auto-ARIMA-SVM 33. Auto-SARIMA-SVM 34. LSTM-SVM 35. GRU-SVM 36. RNN-SVM 37. CNN-SVM 38. MLP-SVM 39. LSTM-ARIMA-SVM 40. LSTM-MLP-SVM 41. LSTM-CNN-SVM 42. GRU-ARIMA-SVM 43. GRU-MLP-SVM 44. GRU-CNN-SVM 45. RNN-ARIMA-SVM 46. RNN-MLP-SVM 47. RNN-CNN-SVM 48. CNN-ARIMA-SVM 49. CNN-MLP-SVM 50. CNN-LSTM-SVM 51. CNN-GRU-SVM 52. ARIMA-RF 53. SARIMA-RF 54. VAR-RF 55. Auto-ARIMA-RF 56. Auto-SARIMA-RF 57. LSTM-RF 58. GRU-RF 59. RNN-RF 60. CNN-RF 61. MLP-RF 62. LSTM-ARIMA-RF 63. LSTM-MLP-RF 64. LSTM-CNN-RF 65. GRU-ARIMA-RF 66. GRU-MLP-RF 67. GRU-CNN-RF 68. RNN-ARIMA-RF 69. RNN-MLP-RF 70. RNN-CNN-RF 71. CNN-ARIMA-RF 72. CNN-MLP-RF 73. CNN-LSTM-RF 74. CNN-GRU-RF 75. ARIMA-XGBoost 76. SARIMA-XGBoost 77. VAR-XGBoost 78. Auto-ARIMA-XGBoost 79. Auto-SARIMA-XGBoost 80. LSTM-XGBoost 81. GRU-XGBoost 82. RNN-XGBoost 83. CNN-XGBoost 84. MLP-XGBoost 85. LSTM-ARIMA-XGBoost 86. LSTM-MLP-XGBoost 87. LSTM-CNN-XGBoost 88. GRU-ARIMA-XGBoost 89. GRU-MLP-XGBoost 90. GRU-CNN-XGBoost 91. RNN-ARIMA-XGBoost 92. RNN-MLP-XGBoost 93. RNN-CNN-XGBoost 94. CNN-ARIMA-XGBoost 95. CNN-MLP-XGBoost 96. CNN-LSTM-XGBoost 97. CNN-GRU-XGBoost 98. ARIMA-ANN 99. SARIMA-ANN 100. VAR-ANN 上面这些缩写模型的全称及相关用途功能详细解释
07-15
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值