import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
# 极限学习机类
class ELMRegressor:
def __init__(self, n_hidden):
self.n_hidden = n_hidden
self.W = None
self.bias = None
def relu(self, x):
return np.maximum(x, 0)
def fit(self, X, y):
n_samples, n_features = X.shape
# 随机生成输入层到隐藏层的权重矩阵和偏置向量
self.W = np.random.randn(n_features, self.n_hidden)
self.bias = np.random.randn(self.n_hidden)
# 计算隐藏层输出
H = self.relu(np.dot(X, self.W) + self.bias)
# 使用最小二乘法计算输出层权重
self.beta = np.linalg.pinv(H).dot(y)
def predict(self, X):
H = self.relu(np.dot(X, self.W) + self.bias)
return H.dot(self.beta)
# 加载波士顿房价数据集
boston = load_boston()
X, y = boston.data, boston.target
# 对数据进行标准化处理
scaler = StandardScaler()
X = scaler.fit_transform(X)
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 构建 ELM 模型
elm = ELMRegressor(n_hidden=50) # 隐藏层节点数为 50
elm.fit(X_train, y_train) # 模型训练
# 在测试集上进行预测,并计算均方误差(MSE)
y_pred = elm.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print("MSE: {:.3f}".format(mse))
# 绘制预测值与测试值对比图
plt.figure(figsize=(8, 6))
plt.plot(y_test, 'b', label='True Values')
plt.plot(y_pred, 'r', label='Predictions')
plt.legend()
plt.show()
04-16
5344
06-18
2927
11-17
2249
08-11