据了解rest-net从图像分类大赛上表现优异,那么将rest-net进行降维用于一维数据分类会有怎样表现?
先观察一下Rest-net的结构:由于每个截图有限,所以使用多个图片表现出😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂
在这里插入
图片描述
第一个
第二个
第三个
第四个
第5个
第6个
第7个
第8个
第9个
第10个
结构就如上所示:
代码如下:
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 26 21:16:03 2020
@author: 1
"""
import numpy as np
import pandas as pd
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils,plot_model
from sklearn.model_selection import cross_val_score,train_test_split,KFold
from sklearn.preprocessing import LabelEncoder
from keras.layers import Input,add,Dense,AveragePooling1D, Activation, Flatten, Convolution1D, Dropout,MaxPooling1D,BatchNormalization,GlobalAveragePooling1D,ZeroPadding1D
from keras.models import load_model
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from keras import layers
from keras.optimizers import SGD
#读入数据
df = pd.read_csv(r'C:/Users/1/Desktop/实验数据/凯斯西储.csv')
X = np.expand_dims(df.values[:, 0:1024].astype(float), axis=2)
Y = df.values[:, 1024]
#把Y编辑成独热码
encoder = LabelEncoder()
Y_encoded = encoder.fit_transform(Y)
Y_onehot = np_utils.to_categorical(Y_encoded)
#划分数据集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_onehot, test_size=0.3, random_state=0)
seed = 7
np.random.seed(seed)
def Conv2d_BN(x, nb_filter,kernel_size, strides=1, padding='same',name=None):
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
x = Convolution1D(nb_filter,kernel_size,padding=padding,strides=strides,activation='relu',name=conv_name)(x)
x = BatchNormalization(axis=1,name=bn_name)(x)
return x
def Conv_Block(inpt,nb_filter,kernel_size,strides=1, with_conv_shortcut=False):
x = Conv2d_BN(inpt,nb_filter=nb_filter,kernel_size=1,strides=strides,padding='same')
x = Conv2d_BN(x, nb_filter=nb_filter, kernel_size=3, padding='same')
x = Conv2d_BN(x, nb_filter=nb_filter, kernel_size=1, padding='same')
if with_conv_shortcut:
shortcut = Conv2d_BN(inpt,nb_filter=nb_filter,strides=strides,kernel_size=kernel_size)
x = add([x,shortcut])
return x
else:
x = add([x,inpt])
return x
inpt = Input(shape=(1024,1))
x = ZeroPadding1D(3)(inpt)
x = Conv2d_BN(x,nb_filter=64,kernel_size=7,strides=2,padding='valid')
x = MaxPooling1D(pool_size=3,strides=2,padding='same')(x)
x = Conv_Block(x,nb_filter=64,kernel_size=3,strides=1,with_conv_shortcut=True)
x = Conv_Block(x,nb_filter=64,kernel_size=3)
x = Conv_Block(x,nb_filter=64,kernel_size=3)
x = Conv_Block(x,nb_filter=128,kernel_size=3,strides=2,with_conv_shortcut=True)
x = Conv_Block(x,nb_filter=128,kernel_size=3)
x = Conv_Block(x,nb_filter=128,kernel_size=3)
x = Conv_Block(x,nb_filter=128,kernel_size=3)
x = Conv_Block(x,nb_filter=256,kernel_size=3,strides=2,with_conv_shortcut=True)
x = Conv_Block(x,nb_filter=256,kernel_size=3)
x = Conv_Block(x,nb_filter=256,kernel_size=3)
x = Conv_Block(x,nb_filter=256,kernel_size=3)
x = Conv_Block(x,nb_filter=256,kernel_size=3)
x = Conv_Block(x,nb_filter=256,kernel_size=3)
x = Conv_Block(x,nb_filter=512,kernel_size=3,strides=2,with_conv_shortcut=True)
x = Conv_Block(x,nb_filter=512,kernel_size=3)
x = Conv_Block(x,nb_filter=512,kernel_size=3)
x = AveragePooling1D(pool_size=7)(x)
x = Flatten()(x)
x=Dense(100,activation='relu')(x)
x = Dense(6,activation='softmax')(x)
from keras.models import Model
model = Model(inputs=inpt,outputs=x)
model.summary()
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
#训练批量大小和批次
history=model.fit(X_train, Y_train,epochs=100,validation_data=(X_test, Y_test),batch_size=64)
model.save('C:/Users/1/Desktop/凯斯西储.h5')
#画出损失函数图
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.plot(history.history['loss'],'_',label='train_loss')
plt.plot(history.history['val_loss'],':',label='val_loss')
plt.plot(history.history['accuracy'],'.',label='train_acc')
plt.plot(history.history['val_accuracy'],'r',label='val_acc')
plt.show()
数据集的形式和之前发的那几篇帖子一样,感兴趣的话自行下载,不过类别为6,本人从凯斯西储官网找到数据,进行制作,偷个懒弄6类😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁
训练结果如下:
正确率
损失函数,可以看出来,训练批次少肯定是不行的没有1DCNN那样优秀,但训练达到100次时候还是比较平稳的,还可以通过对噪声信号进行分类研究它和1DCNN那个更好,还可以将AdaBN理论应用进去提高它的性能(参考知网某文献,找不到了所以不写名字了)。