文本数据初试

CNN

import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils, plot_model
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.preprocessing import LabelEncoder
from keras.layers import Dense, Dropout, Flatten, Conv1D, MaxPooling1D,LSTM,Conv1D, MaxPooling1D
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils import to_categorical

# 载入数据
df = pd.read_csv(r"D:\1\1.2\text_data\data\data.1.csv",header=None)
X = np.expand_dims(df.values[:, 0:10].astype(float),axis=2)
Y = df.values[:, 10]
Y_onehot = to_categorical(Y)

# 划分训练集,测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_onehot, test_size=0.25, random_state=0, stratify=Y_onehot)


def evaluate_model(trainX, trainy, testX, testy):
    verbose, epochs, batch_size = 0, 10, 32
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]
    model = Sequential()
    model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps, n_features)))
    model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(100, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)

    _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)

    return accuracy

def run_experiment(trainX, trainy, testX, testy, repeats=10):
    scores = list()
    for r in range(repeats):
        score = evaluate_model(trainX, trainy, testX, testy)
        score = score * 100.0
        print('>#%d: %.3f' % (r + 1, score))
        scores.append(score)

    m, s = np.mean(scores), np.std(scores)
    print('Accuracy: %.3f%% (+/-%.3f)' % (m, s))



run_experiment(X_train, Y_train, X_test, Y_test, repeats=10)

>#1: 86.584

>#2: 86.445

>#3: 94.467

>#4: 95.712

>#5: 95.851

>#6: 89.350

>#7: 95.712

>#8: 87.275

>#9: 81.051

>#10: 96.680

Accuracy: 90.913% (+/-5.177)

LSTM

import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils, plot_model
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.preprocessing import LabelEncoder
from keras.layers import Dense, Dropout, Flatten, Conv1D, MaxPooling1D,LSTM
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools

# 载入数据
df = pd.read_csv(r"D:\1\1.2\text_data\data\data.1.csv",header=None)
X = np.expand_dims(df.values[:, 0:10].astype(float),axis=2)
Y = df.values[:, 10]

# 湿度分类编码为数字
encoder = LabelEncoder()
Y_encoded = encoder.fit_transform(Y)
Y_onehot = np_utils.to_categorical(Y_encoded)

# 划分训练集,测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_onehot, test_size=0.25, random_state=0, stratify=Y_onehot)

def evaluate_model(trainX, trainy, testX, testy):
    verbose, epochs, batch_size = 0, 15, 16
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]

    model = Sequential()
    model.add(LSTM(100, input_shape=(n_timesteps, n_features)))
    model.add(Dropout(0.5))
    model.add(Dense(100, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)

    _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
    return accuracy


def run_experiment(trainX, trainy, testX, testy, repeats=10):
    scores = list()
    for r in range(repeats):
        score = evaluate_model(trainX, trainy, testX, testy)
        score = score * 100.0
        print('>#%d: %.3f' % (r + 1, score))
        scores.append(score)

    m, s = np.mean(scores), np.std(scores)
    print('Accuracy: %.3f%% (+/-%.3f)' % (m, s))

run_experiment(X_train, Y_train, X_test, Y_test, repeats=10)

>#1: 91.010

>#2: 91.286

>#3: 91.148

>#4: 93.638

>#5: 91.563

>#6: 93.914

>#7: 85.201

>#8: 91.978

>#9: 91.148

>#10: 91.148

Accuracy: 91.203% (+/-2.238)

CNN-LSTM

 

import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils, plot_model
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.preprocessing import LabelEncoder
from keras.layers import Dense, Dropout, Flatten, Conv1D, MaxPooling1D,LSTM,Conv1D, MaxPooling1D
from keras.layers import TimeDistributed
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils import to_categorical

# 载入数据
df = pd.read_csv(r"D:\1\1.2\text_data\data\data.1.csv",header=None)
X = np.expand_dims(df.values[:, 0:10].astype(float),axis=2)
Y = df.values[:, 10]
Y_onehot = to_categorical(Y)

# 划分训练集,测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_onehot, test_size=0.25, random_state=0, stratify=Y_onehot)

def evaluate_model(trainX, trainy, testX, testy):
    verbose, epochs, batch_size = 0, 25, 16
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]

    model = Sequential()
    model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=( n_timesteps, n_features)))
    model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(LSTM(100))
    model.add(Dense(100, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)

    _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
    return accuracy


def run_experiment(trainX, trainy, testX, testy, repeats=10):
    scores = list()
    for r in range(repeats):
        score = evaluate_model(trainX, trainy, testX, testy)
        score = score * 100.0
        print('>#%d: %.3f' % (r + 1, score))
        scores.append(score)

    m, s = np.mean(scores), np.std(scores)
    print('Accuracy: %.3f%% (+/-%.3f)' % (m, s))

run_experiment(X_train, Y_train, X_test, Y_test, repeats=10)

>#1: 65.560

>#2: 61.411

>#3: 59.474

>#4: 60.028

>#5: 54.219

>#6: 62.379

>#7: 56.570

>#8: 67.358

>#9: 63.762

>#10: 56.708

Accuracy: 60.747% (+/-3.970)

ConvLstm

import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils, plot_model
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.preprocessing import LabelEncoder
from keras.layers import Dense, Dropout, Flatten, Conv1D, MaxPooling1D, LSTM, TimeDistributed, ConvLSTM2D
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils import to_categorical

# 载入数据
df = pd.read_csv(r"D:\1\1.2\text_data\data\data.1.csv",header=None)
X = np.expand_dims(df.values[:, 0:10].astype(float),axis=2)
Y = df.values[:, 10]
Y_onehot = to_categorical(Y)


# 划分训练集,测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_onehot, test_size=0.25, random_state=0, stratify=Y_onehot)

def evaluate_model(trainX, trainy, testX, testy):
    verbose, epochs, batch_size = 0, 15, 16
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]

    n_steps, n_length = 2, 5

    trainX = trainX.reshape((trainX.shape[0], n_steps, 1, n_length, n_features))
    testX = testX.reshape((testX.shape[0], n_steps, 1, n_length, n_features))

    model = Sequential()
    model.add(
        ConvLSTM2D(filters=64, kernel_size=(1, 3), activation='relu', input_shape=(n_steps, 1, n_length, n_features)))
    model.add(Flatten())
    model.add(Dense(100, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)

    _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
    return accuracy


def run_experiment(trainX, trainy, testX, testy, repeats=10):
    scores = list()
    for r in range(repeats):
        score = evaluate_model(trainX, trainy, testX, testy)
        score = score * 100.0
        print('>#%d: %.3f' % (r + 1, score))
        scores.append(score)

    m, s = np.mean(scores), np.std(scores)
    print('Accuracy: %.3f%% (+/-%.3f)' % (m, s))

run_experiment(X_train, Y_train, X_test, Y_test, repeats=10)

>#1: 97.787

>#2: 93.499

>#3: 95.574

>#4: 96.680

>#5: 96.266

>#6: 97.372

>#7: 93.776

>#8: 96.819

>#9: 97.649

>#10: 95.436

Accuracy: 96.086% (+/-1.436)

MLP

import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils, plot_model
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.preprocessing import LabelEncoder
from keras.layers import Dense, Dropout, Flatten, Conv1D, MaxPooling1D,LSTM,Conv1D, MaxPooling1D
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils import to_categorical

# 载入数据
df = pd.read_csv(r"D:\1\1.2\text_data\data\data.1.csv",header=None)
X = np.expand_dims(df.values[:, 0:10].astype(float),axis=2)
Y = df.values[:, 10]
Y_onehot = to_categorical(Y)

# 划分训练集,测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_onehot, test_size=0.25, random_state=0, stratify=Y_onehot)

def evaluate_model(trainX, trainy, testX, testy):
    verbose, epochs, batch_size = 0, 25, 16
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]

    model = Sequential()
    model.add(Dense(200, activation='relu',input_shape=( 10, )))
    model.add(Dense(100))
    model.add(Dense(50))
    model.add(Dense(n_outputs, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)

    _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
    return accuracy


def run_experiment(trainX, trainy, testX, testy, repeats=10):
    scores = list()
    for r in range(repeats):
        score = evaluate_model(trainX, trainy, testX, testy)
        score = score * 100.0
        print('>#%d: %.3f' % (r + 1, score))
        scores.append(score)

    m, s = np.mean(scores), np.std(scores)
    print('Accuracy: %.3f%% (+/-%.3f)' % (m, s))

run_experiment(X_train, Y_train, X_test, Y_test, repeats=10)

>#1: 96.266

>#2: 93.638

>#3: 95.297

>#4: 93.914

>#5: 88.382

>#6: 96.819

>#7: 77.732

>#8: 96.680

>#9: 94.744

>#10: 96.404

Accuracy: 92.988% (+/-5.607)

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值