2021-03-22-TensorFlow故障诊断代码-一维信号转二维灰度图

故障诊断代码-一维信号转二维灰度图

凯斯西储大学数据划分及灰度图转换

https://mp.weixin.qq.com/s?__biz=MzkxMzE5Mzk0Mw==&mid=2247483664&idx=1&sn=bd3f32dea556196cfc79e3f01cb2a66d&chksm=c1002fa8f677a6bea82caf5a28ee493416685d119901c3bc508e8713bdc28f162cffe779f18b#rd
灰度图转换来自于该论文:A New Convolutional Neural Network-Based Data-Driven Fault Diagnosis Method-IEEE TRANSACTIONS ON INDUSTRIAL ELECTRONICS 华科的文龙老师

故障诊断代码

这个代码是比较简单的卷积神经网络.运行框架为TensorFlow2.0

import os, glob
import random, csv
from matplotlib import pyplot as plt
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers,optimizers, losses
from PIL import Image
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from sklearn import preprocessing  # 0-1编码
from sklearn.metrics import confusion_matrix
import seaborn as sns

def divide(root, mode='train'):  #划分训练集
    images, labels = load_csv(root, '3.12.csv', ratio)
    if mode == 'train':
        images = images[:int(0.6 * len(images))]
        labels = labels[:int(0.6 * len(labels))]
    elif mode == 'val':
        images = images[int(0.6 * len(images)):int(0.8 * len(images))]
        labels = labels[int(0.6 * len(labels)):int(0.8 *len(labels))]
    else:
        images = images[int(0.8 * len(images)):int(len(images))]
        labels = labels[int(0.8 * len(labels)):int(len(labels))]
    return images, labels

def load_csv(root, filename,  ratio):

    name2label = {}  # 编码表字典,"sq...":0
    file = os.listdir(root)        #os.path.isdir()用于判断对象是否为一个目录
    for name in file:
        if not os.path.isdir(os.path.join(root, name)):
            continue
        name2label[name] = len(name2label.keys())
    images = []
    for name in name2label.keys():
         images += glob.glob(os.path.join(root, name, '*.jpg'))[0:int(500*ratio)]
    random.shuffle(images)
    with open(os.path.join(root, filename), mode='w', newline='') as f:
        writer = csv.writer(f)
        for img in images:
            name = img.split(os.sep)[-2]                        #-2为读取上一级目录的名称,即为读取标签名
            label = name2label[name]
            writer.writerow([img, label])
        print('written into csv file:', filename)

    images, labels = [], []
    with open(os.path.join(root, filename)) as f:
        reader = csv.reader(f)
        for row in reader:
            img, label = row
            label = int(label)
            images.append(img)
            labels.append(label)

    return images, labels


def read_image(img_name):
    im = Image.open(img_name).convert('L')
    data = np.array(im)
    data = data/255
    return data

def one_hot(Y_samples):
    Y_samples = np.array(Y_samples).reshape([-1, 1])
    Encoder = preprocessing.OneHotEncoder()
    Encoder.fit(Y_samples)
    Y_samples = Encoder.transform(Y_samples).toarray()
    Y_samples = np.asarray(Y_samples, dtype=np.int32)
    return Y_samples

def reshuffle(X , Y):
    state = np.random.get_state()
    np.random.shuffle(X)
    np.random.set_state(state)
    np.random.shuffle(Y)
    return X ,Y

model = keras.Sequential([

    layers.Conv2D(filters=32, kernel_size=5, strides=2, activation='relu',padding='same'),  # Padding method),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=2, strides=2, padding='same'),


    layers.Conv2D(filters=64, kernel_size=3, strides=2, activation='relu',padding='same'),  # Padding method),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=2, strides=2, padding='same'),


    layers.Conv2D(filters=128, kernel_size=3, strides=2, activation='relu',padding='same'),  # Padding method),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=2, strides=2, padding='same'),


    layers.Conv2D(filters=256, kernel_size=3, strides=2, activation='relu',padding='same'),  # Padding method),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=2, strides=2, padding='same'),


    layers.Flatten(),
    # (-1,7*7*64)->(-1,256)
    layers.Dense(256, activation='relu'),
    # (-1,256)->(-1,10)
    layers.Dense(128, activation='relu'),
    layers.Dense(10, activation='softmax')
])

root  = r'E:\CWRU\灰度图'
ratio = 0.4
ratio_test = 0.2
trainx, trainy = divide(root, mode='train')
train_x = []
for i in trainx:
    train_x.append(read_image(i))
testx, testy = divide(root, mode='test')
test_x = []
for i in testx:
    test_x.append(read_image(i))

validx, validy = divide(root, mode='val')
valid_x = []
for i in validx:
    valid_x.append(read_image(i))
train_y = one_hot(trainy)
test_y = one_hot(testy)
valid_y = one_hot(validy)
train_x,train_y = reshuffle(train_x,train_y)
test_x,test_y = reshuffle(test_x,test_y)
valid_x,valid_y = reshuffle(valid_x,valid_y)
train_x = np.asarray(train_x)
test_x = np.asarray(test_x)
valid_x = np.asarray(valid_x)
train_x, valid_x, test_x = train_x[:,:,:,np.newaxis], valid_x[:,:,:,np.newaxis], test_x[:,:,:,np.newaxis]

batch_size = 32
epochs = 100

model.build(input_shape=(None, 64, 64, 1))
model.summary()


model.compile(optimizer=optimizers.Adam(lr=3e-5),
               loss=losses.CategoricalCrossentropy(from_logits=True),
               metrics=['accuracy'])
history  = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs,
          verbose=1, validation_data=(valid_x, valid_y), shuffle=True
          )

score = model.evaluate(x=test_x, y=test_y, verbose=0)
acc = history.history['val_accuracy']
loss = history.history['val_loss']
epochs = range(1,len(acc) + 1)

y_pred = model.predict_classes(test_x)
truelabel = test_y.argmax(-1)
con_mat = confusion_matrix(truelabel, y_pred)
con_mat_norm = con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis]     # 归一化
con_mat_norm = np.around(con_mat_norm, decimals=2)
plt.plot(epochs,acc,label='val acc')
plt.title('val accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,loss,label='val loss')
plt.title('val  loss')
plt.legend()
plt.figure(figsize=(8, 8))
sns.heatmap(con_mat_norm, annot=True, cmap='Blues')
plt.ylim(0, 10)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.show()
  • 3
    点赞
  • 39
    收藏
    觉得还不错? 一键收藏
  • 7
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值