import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from tqdm import tqdm
import PCAs_errorReconstruccion_primerCuarto
from PCAs_errorReconstruccion_primerCuarto import *
df_stats_Ch6_test1 = pd.read_csv("estadisticos_test1_ch6.csv" , sep = ',')
X_Ch6 = df_stats_Ch6_test1[['Min', 'Max', 'Kurt', 'ImpFactor', 'RMS', 'MargFactor', 'Skewness',
'ShapeFactor', 'PeakToPeak', 'CrestFactor']].values
pca_pipeline = make_pipeline(StandardScaler(), PCA())
pca_pipeline.fit(X_Ch6)
modelo_pca = pca_pipeline.named_steps['pca']
prop_varianza_acum = modelo_pca.explained_variance_ratio_.cumsum()
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
ax.plot(np.arange(modelo_pca.n_components_) + 1, prop_varianza_acum, marker = 'o')
for x, y in zip(np.arange(modelo_pca.n_components_) + 1, prop_varianza_acum):
label = round(y, 2)
ax.annotate( label, (x,y), textcoords = "offset points", xytext = (0,10), ha = 'center')
ax.set_ylim(0, 1.2)
ax.set_xticks(np.arange(modelo_pca.n_components_) + 1)
ax.set_title('Cumulative explained variance')
ax.set_xlabel('Number of principal components')
ax.set_ylabel('Explained variance');
pca_pipeline = make_pipeline(StandardScaler(), PCA())
pca_pipeline.fit(X_Ch6[:int(len(X_Ch6)/4)])
modelo_pca = pca_pipeline.named_steps['pca']
prop_varianza_acum = modelo_pca.explained_variance_ratio_.cumsum()
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
ax.plot(np.arange(modelo_pca.n_components_) + 1, prop_varianza_acum, marker = 'o')
for x, y in zip(np.arange(modelo_pca.n_components_) + 1, prop_varianza_acum):
label = round(y, 2)
ax.annotate( label, (x,y), textcoords = "offset points", xytext = (0,10), ha = 'center')
ax.set_ylim(0, 1.2)
ax.set_xticks(np.arange(modelo_pca.n_components_) + 1)
ax.set_title('Cumulative explained variance')
ax.set_xlabel('Number of principal components')
ax.set_ylabel('Explained variance');
reconstruccion, error_reconstruccion = pca_reconstruccion_error_reconstruccion_primerCuarto(df_stats_Ch6_test1, 6, imp = 1)
df_resultados = pd.DataFrame({
'error_reconstruccion' : error_reconstruccion,
})
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5.5, 3.5))
sns.boxplot(
y = 'error_reconstruccion',
data = df_resultados,
#color = "white",
palette = 'tab10',
ax = ax
)
ax.set_yscale("log")
ax.set_title('Distribución de los errores de reconstrucción (PCA)')
df_resultados.quantile(0.98)[0]
# Distribución del error de reconstrucción
# ==============================================================================
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
sns.distplot(
error_reconstruccion,
hist = False,
rug = True,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax
)
ax.set_title('Distribución de los errores de reconstrucción (PCA)')
ax.set_xlabel('Error de reconstrucción');
# Distribución del error de reconstrucción
# ==============================================================================
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
sns.distplot(
error_reconstruccion,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax
)
ax.set_title('Distribution of reconstruction errors (PCA)')
ax.set_xlabel('Reconstruction error');
# Entrenamiento modelo PCA con escalado de los datos
X_primerCuarto = X_Ch6[:int(len(X_Ch6)/4)]
pca_pipeline = make_pipeline(StandardScaler(), PCA(n_components = 6))
pca_pipeline.fit(X_primerCuarto)
# Proyectar los datos
proyecciones_train = pca_pipeline.transform(X_primerCuarto)
# Reconstrucción
reconstruccion_train = pca_pipeline.inverse_transform(X = proyecciones_train)
# RMSE:
error_reconstruccion_train = np.sqrt(((reconstruccion_train - X_primerCuarto) ** 2).mean(axis=1))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(15, 6))
sns.distplot(
error_reconstruccion_train,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax1
)
ax1.set_title('Distribution of reconstruction errors (PCA) - Train')
ax1.set_xlabel('Reconstruction error');
sns.distplot(
error_reconstruccion,
hist = False,
rug = False,
color = 'red',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax2
)
ax2.set_title('Distribution of reconstruction errors (PCA) - Complete signal')
ax2.set_xlabel('Reconstruction error');
# Entrenamiento modelo PCA:
X_primerCuarto = X_Ch6[:int(len(X_Ch6)/4)]
pca_pipeline = make_pipeline(StandardScaler(), PCA(n_components = 6))
pca_pipeline.fit(X_primerCuarto)
# Proyectar los datos
proyecciones_train = pca_pipeline.transform(X_primerCuarto)
# Reconstrucción
reconstruccion_train = pca_pipeline.inverse_transform(X = proyecciones_train)
# RMSE:
error_reconstruccion_train = np.sqrt(((reconstruccion_train - X_primerCuarto) ** 2).mean(axis=1))
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 6))
sns.distplot(
error_reconstruccion,
hist = False,
rug = False,
color = 'red',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax,
label = 'Complete signal'
)
sns.distplot(
error_reconstruccion_train,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax,
label = 'Train'
)
ax.set_title('Distribution of reconstruction errors (PCA) - Train vs Complete signal')
ax.set_xlabel('Reconstruction error');
ax.legend()
error_reconstruccion = error_reconstruccion.values
from sklearn.mixture import GaussianMixture
gm = GaussianMixture(n_components=2, random_state=0).fit(error_reconstruccion[int(len(error_reconstruccion)/4):].reshape(-1, 1))
gm.means_
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9, 6))
sns.distplot(
error_reconstruccion[int(len(error_reconstruccion)/4):],
hist = False,
rug = False,
color = 'orange',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax
)
ax.axvline(gm.means_[0], ls = '--', color = 'black')
ax.annotate(str(round(gm.means_[0][0],8)), xy=(0.037, 1), xytext=(1, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05)
)
ax.axvline(gm.means_[1], ls = ':', color = 'black')
ax.annotate(str(round(gm.means_[1][0],8)), xy=(0.42, 1.75), xytext=(1.4, 2.25),
arrowprops=dict(facecolor='black', shrink=0.05),
)
ax.set_title('Distribution of reconstruction errors (PCA) - Complete signal except the first quarter')
ax.set_xlabel('Reconstruction error');
pred_GM = gm.predict(error_reconstruccion[int(len(error_reconstruccion)/4):].reshape(-1, 1))
sum(pred_GM)
pred_GM = [0] * int(len(error_reconstruccion)/4)
pred_GM_3cuartos = gm.predict(error_reconstruccion[int(len(error_reconstruccion)/4):].reshape(-1, 1))
for i in range(len(pred_GM_3cuartos)):
pred_GM.append(pred_GM_3cuartos[i])
pred_GM = np.array(pred_GM)
colores = ["#00cc44", "#f73e05"]
n_signal = list(range(len(pred_GM)))
n_signal = np.array(n_signal)
signals_0 = n_signal[pred_GM == 0]
error_rec_0 = error_reconstruccion[pred_GM == 0]
signals_1 = n_signal[pred_GM == 1]
error_rec_1 = error_reconstruccion[pred_GM == 1]
plt.figure(figsize=(10,6))
plt.scatter(signals_0, error_rec_0, c = "#00cc44", label = 'Normal')
plt.scatter(signals_1, error_rec_1, c = "#f73e05", label = 'Anomalies')
plt.title('Reconstruction error (PCA) - Ch6 test1')
plt.xlabel('Signal')
plt.ylabel('Error')
plt.legend()
comienzo_1hora_anomalias = 'NA'
for i in range(len(pred_GM)):
if pred_GM[i:i+6].all():
comienzo_1hora_anomalias = i
break
pred_GM_1hora_anomalias = [0] * comienzo_1hora_anomalias + [1] * (len(pred_GM) - comienzo_1hora_anomalias)
colores = ["#00cc44", "#f73e05"]
x = np.arange(-10, len(df_stats_Ch6_test1)+10, 0.02)
n_signal = list(range(len(pred_GM_1hora_anomalias)))
plt.figure(figsize=(10,6))
plt.scatter(n_signal, error_reconstruccion, c = np.take(colores, pred_GM_1hora_anomalias))
plt.axvline(comienzo_1hora_anomalias, color = 'r', label = 'Beginning of anomalies')
plt.fill_between(x, min(error_reconstruccion)-0.5, max(error_reconstruccion)+1, where = x < comienzo_1hora_anomalias,
facecolor = 'green', alpha = 0.2, label = 'Normal')
plt.fill_between(x, min(error_reconstruccion)-0.5, max(error_reconstruccion)+1, where = x > comienzo_1hora_anomalias,
facecolor = 'red', alpha = 0.5, label = 'Anomalies ')
plt.title('Reconstruction error (PCA) - Ch6 test1')
plt.xlabel('Signal')
plt.ylabel('Error')
plt.legend(loc = 2)
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10)) = plt.subplots(nrows=5, ncols=2, figsize=(20, 30))
sns.distplot(
error_min,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax1
)
ax1.set_title('Distribución de los errores de reconstrucción - Min (PCA)')
ax1.set_xlabel('Error de reconstrucción');
sns.distplot(
error_max,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax2
)
ax2.set_title('Distribución de los errores de reconstrucción - Max (PCA)')
ax2.set_xlabel('Error de reconstrucción');
sns.distplot(
error_kurt,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax3
)
ax3.set_title('Distribución de los errores de reconstrucción - Kurtosis (PCA)')
ax3.set_xlabel('Error de reconstrucción');
sns.distplot(
error_if,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax4
)
ax4.set_title('Distribución de los errores de reconstrucción - Impulse Factor (PCA)')
ax4.set_xlabel('Error de reconstrucción');
sns.distplot(
error_rms,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax5
)
ax5.set_title('Distribución de los errores de reconstrucción - RMS (PCA)')
ax5.set_xlabel('Error de reconstrucción');
sns.distplot(
error_mf,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax6
)
ax6.set_title('Distribución de los errores de reconstrucción - Margin Factor (PCA)')
ax6.set_xlabel('Error de reconstrucción');
sns.distplot(
error_skew,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax7
)
ax7.set_title('Distribución de los errores de reconstrucción - Skewness (PCA)')
ax7.set_xlabel('Error de reconstrucción');
sns.distplot(
error_sf,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax8
)
ax8.set_title('Distribución de los errores de reconstrucción - Shape Factor (PCA)')
ax8.set_xlabel('Error de reconstrucción');
sns.distplot(
error_ptp,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax9
)
ax9.set_title('Distribución de los errores de reconstrucción - Peal to Peak (PCA)')
ax9.set_xlabel('Error de reconstrucción');
sns.distplot(
error_cf,
hist = False,
rug = False,
color = 'blue',
kde_kws = {'shade': True, 'linewidth': 1},
ax = ax10
)
ax10.set_title('Distribución de los errores de reconstrucción - Crest Factor (PCA)')
ax10.set_xlabel('Error de reconstrucción');
知乎学术咨询:
https://www.zhihu.com/consult/people/792359672131756032?isMe=1
工学博士,担任《Mechanical System and Signal Processing》《中国电机工程学报》《控制与决策》等期刊审稿专家,擅长领域:现代信号处理,机器学习,深度学习,数字孪生,时间序列分析,设备缺陷检测、设备异常检测、设备智能故障诊断与健康管理PHM等。