import numpy as np
import ceemdan
import vmd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from transformers import TransformerModel, TFAutoModel
生成示例数据
假设我们有一个包含多个变量的时间序列数据,存储在一个二维数组中
data = np.random.rand(100, 3) # 这里假设有3个变量,时间步长为100
数据预处理
scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(data)
使用CEEMDAN进行信号分解
emd = ceemdan.CEEMDAN()
imfs = emd.ceemdan(scaled_data)
使用VMD进行信号分解
vmd = vmd.VMD()
modes = vmd.decompose(scaled_data.T)
将分解后的信号重新组合为训练集和测试集
这里假设我们使用最后一个分解模式作为目标变量
X = np.concatenate(imfs[:-1], axis=1)
y = modes[-1].T
划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False)
使用Transformer进行特征提取
transformer_model = TFAutoModel.from_pretrained(“bert-base-uncased”)
transformer = TransformerModel(transformer_model)
X_train_transformed = transformer.predict(X_train)
X_test_transformed = transformer.predict(X_test)
定义LSTM模型
model = Sequential()
model.add(LSTM(64, input_shape=(X_train_transformed.shape[1], X_train_transformed.shape[2])))
model.add(Dense(y_train.shape[1]))
model.compile(loss=‘mean_squared_error’, optimizer=‘adam’)
训练模型
model.fit(X_train_transformed, y_train, epochs=100, batch_size=32, verbose=2)
在测试集上进行预测
predictions = model.predict(X_test_transformed)
将预测结果反向缩放
scaled_predictions = scaler.inverse_transform(predictions)
打印预测结果
print(scaled_predictions)