1DCNN故障诊断

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Input, LSTM, Conv2D, Flatten, Reshape,Convolution1D,MaxPooling1D
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

from tensorflow.keras import backend as K
# from keras.layers import Activation
# from keras.utils.generic_utils import get_custom_objects
from tensorflow import keras
# from tensorflow_core.python.keras.utils import np_utils


#
# k=26
# dataset = pd.read_csv('train-data16.csv')
# X = np.array(dataset.values[:, :-1])  #导入数据,维度为1024
# pca = PCA(n_components=k )  #降到k维
# pca.fit(X)                  #训练
# newX=pca.fit_transform(X)   #降维后的数据
# PCA(copy=True, n_components=2, whiten=False)
# print(pca.explained_variance_ratio_)  #输出贡献率
# print(newX)

# labels1 = dataset.values[:, -1]
# # images = dataset.values[:, 2:1026]
# labels2 = pd.get_dummies(labels1)    # 转化one-hot格式
# labels = labels2.values       # DataFrame转化为array
# X = np.expand_dims(newX, axis=2)#增加一维轴
# X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.2)     # 划分训练集和测试集

#新添加的

dataset = pd.read_csv('ENB_nor.csv',header=None)
X = np.array(dataset.values[2:, :-1])  #导入数据
X= (X-X.min(axis=0)) / (X.max(axis=0)-X.min(axis=0))  #数据归一化处理
X = X.reshape(X.shape[0], -1, 1)
print(X.shape)

labels1 = dataset.values[2:, -1]
labels2 = pd.get_dummies(labels1)    # 转化one-hot格式
labels = labels2.values       # DataFrame转化为array

X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.2)     # 划分训练集和测试集

X_train = K.cast_to_floatx(X_train)
y_train = K.cast_to_floatx(y_train)
X_test= K.cast_to_floatx(X_test)
y_test = K.cast_to_floatx(y_test)




def create_model(learning_rate, num_dense_layers,num_dense_nodes):
    # tf.nn.swish(x)

    model = Sequential()
    # model.add(Dense(k,input_shape=(k,)))
    model.add(Convolution1D(filters=64, kernel_size=3, padding='same',
                            activation='relu',input_shape=(52,1)))

    # model.add(MaxPooling1D(pool_size=2))
    model.add(Convolution1D(filters=64, kernel_size=3, padding='same', activation='relu',kernel_regularizer=keras.regularizers.l2(0.001)))
    # model.add(MaxPooling1D(pool_size=2))
    model.add(Convolution1D(filters=64, kernel_size=3, padding='same', activation='relu'))

    model.add(Convolution1D(filters=64, kernel_size=3, padding='same', activation='relu'))
    model.add(Convolution1D(filters=64, kernel_size=3, padding='same', activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    # for i in range(num_dense_layers):
    #     name = 'layer_dense_{0}'.format(i + 1)
    #     # add dense layer
    #     model.add(Dense(num_dense_nodes,activation='relu',name=name))
    # # use softmax-activation for classification.

    model.add(Flatten())
    model.add(Dense(6, activation='softmax', name='output'))

    # Use the Adam method for training the network.
    #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    # optimizer = Adam(lr=learning_rate)

    # compile the model so it can be trained.
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate),loss='categorical_crossentropy',metrics=['accuracy'])
    model.summary()  #打印出模型
    return model

def fitness(learning_rate, num_dense_layers, num_dense_nodes):
    """    Hyper-parameters:    learning_rate:     Learning-rate for the optimizer.    num_dense_layers:  Number of dense layers.    num_dense_nodes:   Number of nodes in each dense layer.    activation:        Activation function for all layers.    """
    # Print the hyper-parameters.
    print('learning rate: {0:.1e}'.format(learning_rate))
    print('num_dense_layers:', num_dense_layers)
    print('num_dense_nodes:', num_dense_nodes)

    # Create the neural network with these hyper-parameters.
    model = create_model(learning_rate=learning_rate,
                         num_dense_layers=num_dense_layers,
                         num_dense_nodes=num_dense_nodes,
                         )

    # Use Keras to train the model.
    history = model.fit(x=X_train,
                        y=y_train,
                        epochs=200,
                        batch_size=64,
                        validation_data=(X_test, y_test),
                        callbacks=None,verbose=2)

    accuracy = history.history['val_accuracy'][-1]

    print("Accuracy: {0:.2%}".format(accuracy))
    return history
history = fitness(0.0005, 0, 0)  #设置学习率,隐藏层数及其节点
# model.save('rnn_weight.h5')
# plt.plot(epoches,accurcy)
# plt.show()
accuracy = history.history['val_accuracy']
accura = history.history['accuracy']

# plt.plot(history.history['loss'], label='train')
plt.plot(accuracy, label='test')
plt.plot(accura, label='train')

# plt.plot(history.history('loss'),label='train')
# plt.plot(history.history('val_loss'),label='test')
plt.title('1DCNN', fontsize='12')
plt.ylabel('acc', fontsize='10')
plt.xlabel('epoch', fontsize='10')
plt.legend()
plt.show()

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

l小小新人l

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值