简单的基于自编码器的心电信号异常检测(tensorflow,keras)

109 篇文章 1 订阅
105 篇文章 5 订阅
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
dataframe = pd.read_csv('data\ecg.csv',header=None)
dataframe.head()

data = dataframe.values
data.shape
(4998, 141)
# The last column contains the labels
labels = data[:, -1]


features = data[:, 0:-1]


xtrain, xtest, ytrain, ytest = train_test_split(
    features, labels, test_size=0.4, random_state=55
)
xval,xtest,yval,ytest=train_test_split(
    xtest, ytest, test_size=0.5, random_state=56)
ytest.shape

(1000,)

Normalizing data using min max scalar

min_vals = tf.reduce_min(xtrain)
max_vals = tf.reduce_max(xtrain)


xtrain = (xtrain - min_vals) / (max_vals - min_vals)
xval = (xval - min_vals) / (max_vals - min_vals)
xtest = (xtest - min_vals) / (max_vals - min_vals)


xtrain = tf.cast(xtrain, tf.float32)
xtest = tf.cast(xtest, tf.float32)
xval=tf.cast(xval, tf.float32)
ytrain.shape

(2998,)

ytrain = ytrain.astype(bool)
ytest = ytest.astype(bool)
yval = yval.astype(bool)


normal_xtrain = xtrain[ytrain]
normal_xval = xval[yval]


anomalous_xtrain = xtrain[~ytrain]
anomalous_xval = xval[~yval]
plt.grid()
plt.plot(np.arange(140), normal_xtrain[0])
plt.title("Normal ECG")
plt.show()

plt.grid()
plt.plot(np.arange(140), anomalous_xtrain[0])
plt.title("Anomalous ECG")
plt.show()

class AnomalyDetector(Model):
    def __init__(self):
        super(AnomalyDetector, self).__init__()
        self.encoder = tf.keras.Sequential([
          layers.Dense(32, activation="relu"),
          layers.Dense(8, activation="relu")])


        self.decoder = tf.keras.Sequential([
          layers.Dense(32, activation="relu"),
          layers.Dense(140, activation="sigmoid")])


    def call(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return decoded


autoencoder = AnomalyDetector()
autoencoder.compile(optimizer='nadam', loss='mae')
history = autoencoder.fit(normal_xtrain, normal_xtrain, 
          epochs=100, 
          batch_size=512,
          validation_data=(xval, xval),
          shuffle=True)
plt.plot(history.history["loss"], label="Training Loss")
plt.plot(history.history["val_loss"], label="Validation Loss")
plt.legend()

encoded_imgs = autoencoder.encoder(normal_xval).numpy()
decoded_imgs = autoencoder.decoder(encoded_imgs).numpy()


plt.plot(normal_xval[0],'b')
plt.plot(decoded_imgs[0],'r')
plt.fill_between(np.arange(140), decoded_imgs[0], normal_xval[0], color='yellow' )
plt.legend(labels=["Input", "Reconstruction", "Error"])
plt.show()

encoded_imgs = autoencoder.encoder(anomalous_xval).numpy()
decoded_imgs = autoencoder.decoder(encoded_imgs).numpy()


plt.plot(anomalous_xval[0],'b')
plt.plot(decoded_imgs[0],'r')
plt.fill_between(np.arange(140), decoded_imgs[0], anomalous_xval[0], color='yellow' )
plt.legend(labels=["Input", "Reconstruction", "Error"])
plt.show()

reconstructions = autoencoder.predict(normal_xtrain)
train_loss = tf.keras.losses.mae(reconstructions, normal_xtrain)


plt.hist(train_loss, bins=50)
plt.xlabel("Train loss")
plt.ylabel("No of examples")
plt.show()

reconstructions = autoencoder.predict(anomalous_xtrain)
test_loss = tf.keras.losses.mae(reconstructions, anomalous_xtrain)


plt.hist(test_loss, bins=50)
plt.xlabel("anamoly loss")
plt.ylabel("No of examples")
plt.show()

#From the above train loss and anomaly loss plots thresold(hyper parameter) is taken.
threshold = 0.03
reconstructions = autoencoder(xval)
loss = tf.keras.losses.mae(reconstructions, xval)
preds = tf.math.less(loss, threshold)


print("Accuracy = {}".format(accuracy_score(yval, preds)))
print("Precision = {}".format(precision_score(yval, preds)))
print("Recall = {}".format(recall_score(yval, preds)))
Accuracy = 0.967
Precision = 0.9878472222222222
Recall = 0.9563025210084034reconstructions = autoencoder(xtest)

loss = tf.keras.losses.mae(reconstructions, xtest)

preds = tf.math.less(loss, threshold)




print("Accuracy = {}".format(accuracy_score(ytest, preds)))

print("Precision = {}".format(precision_score(ytest, preds)))

print("Recall = {}".format(recall_score(ytest, preds)))

Accuracy = 0.963

Precision = 0.9857142857142858

Recall = 0.9500860585197934

工学博士,担任《Mechanical System and Signal Processing》《中国电机工程学报》《控制与决策》等期刊审稿专家,擅长领域:现代信号处理,机器学习,深度学习,数字孪生,时间序列分析,设备缺陷检测、设备异常检测、设备智能故障诊断与健康管理PHM等。

  • 10
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
编码器异常检测是一种基于无监督学习的异常检测方法,它通过训练一个自编码器来学习数据的内部表示,并通过重构误差来判断数据是否异常。下面是一个简单的自编码器异常检测的代码示例: ```python import numpy as np import tensorflow as tf # 构建自编码器模型 class Autoencoder(tf.keras.Model): def __init__(self, input_dim, encoding_dim): super(Autoencoder, self).__init__() self.encoder = tf.keras.layers.Dense(encoding_dim, activation='relu') self.decoder = tf.keras.layers.Dense(input_dim, activation='sigmoid') def call(self, inputs): encoded = self.encoder(inputs) decoded = self.decoder(encoded) return decoded # 训练自编码器模型 def train_autoencoder(data, encoding_dim, epochs): input_dim = data.shape[1] autoencoder = Autoencoder(input_dim, encoding_dim) autoencoder.compile(optimizer='adam', loss='mse') autoencoder.fit(data, data, epochs=epochs, batch_size=32) return autoencoder # 使用自编码器进行异常检测 def detect_anomalies(data, auto, threshold): reconstructions = autoencoder.predict(data) errors = np.mean(np.square(data - reconstructions), axis=1) anomalies = np.where(errors > threshold)[0] return anomalies # 示例数据 data = np.random.normal(loc=0, scale=1, size=(1000, 10)) # 训练自编码器 encoding_dim = 5 epochs = 50 autoencoder = train_autoencoder(data, encoding_dim, epochs) # 使用自编码器进行异常检测 threshold = 0.1 anomalies = detect_anomalies(data, autoencoder, threshold) print("检测到的异常数据索引:", anomalies) ``` 以上代码中,首先定义了一个自编码器模型,包括编码器和解码器部分。然后使用给定的数据训练自编码器模型。训练完成后,使用训练好的自编码器模型对数据进行重构,并计算重构误差。最后,根据设定的阈值判断数据是否异常,并返回异常数据的索引。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

哥廷根数学学派

码字不易,且行且珍惜

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值