吴恩达机器学习课后习题5

该代码实现了一个基于线性回归的机器学习模型,用于预测大坝的出水量。通过数据预处理、特征工程(包括多项式特征升维和归一化)、正则化(L2范数)以及梯度下降优化算法来训练模型。文章探讨了不同正则化参数对训练和验证集误差的影响,寻找最优的正则化参数以平衡过拟合和欠拟合问题。
摘要由CSDN通过智能技术生成

# 预测大坝出水量

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.io as loadmat
from scipy.optimize import minimize


# 原始训练图
def plot_data():
    fig, ax = plt.subplots()
    ax.scatter(X_train[:, 1], y_train)
    ax.set(xlabel='change in water level(x)',
           ylabel='water flowing out og th dam(y)')


def reg_cost(theta, X, y, lamda):
    cost = np.sum(np.power((X @ theta - y.flatten()), 2))
    reg = theta[1:] @ theta[1:] * lamda

    return (cost + reg) / (2 * len(X))


def reg_gradient(theta, X, y, lamda):
    grad = (X @ theta - y.flatten()) @ X
    print(X.shape, theta.shape, y.shape, grad.shape)  ###维度得匹配
    reg = lamda * theta
    reg[0] = 0  # 不参与运算

    return (grad + reg) / (len(X))


# 训练模式
def train_model(X, y, lamda):
    theta = np.ones(X.shape[1])

    res = minimize(fun=reg_cost,
                   x0=theta,
                   args=(X, y, lamda),
                   method='TNC',
                   jac=reg_gradient)
    return res.x


# 成本曲线训练集和验证集
def plot_learining_curve(X_train, y_train, X_val, y_val, lamda):
    x = range(1, len(X_train) + 1)
    training_cost = []
    cv_cost = []

    for i in x:
        res = train_model(X_train[:i, :], y_train[:i, :], lamda)  # 不断加大训练集
        training_cost_i = reg_cost(res, X_train[:i, :], y_train[:i, :], lamda)  #
        cv_cost_i = reg_cost(res, X_val, y_val, lamda)  # 所有验证集集的成本
        training_cost.append(training_cost_i)
        cv_cost.append(cv_cost_i)

    plt.plot(x, training_cost, label='training cost', c='r')
    plt.plot(x, cv_cost, label='cv cost', c='b')
    plt.legend()
    plt.xlabel("number of training examples")
    plt.ylabel("error")
    plt.show()


# 升维
def poly_feature(X, power):
    for i in range(2, power + 1):
        X = np.insert(X, X.shape[1], np.power(X[:, 1], i), axis=1)

    return X


# 获取标准差和方差
def get_means_stds(X):
    means = np.mean(X, axis=0)
    stds = np.std(X, axis=0)
    return means, stds


# 归一化通过高斯函数
def feature_normailze(X, means, stds):
    X[:, 1:] = (X[:, 1:] - means[1:]) / stds[1:]

    return X


# 高次拟合图
def plot_ploy_fit():
    plot_data()

    x = np.linspace(-60, 60, 100)
    xx = x.reshape(100, 1)
    xx = np.insert(xx, 0, 1, axis=1)
    xx = poly_feature(xx, power)
    xx = feature_normailze(xx, train_means, train_stds)

    plt.plot(x, xx @ theta_fit, 'r--')


data = loadmat.loadmat('ex5data1.mat')  ##得写两遍
print(data.keys())

# 验证集
X_val, y_val = data['Xval'], data['yval']
# 测试集
X_test, y_test = data['Xtest'], data['ytest']
# 训练集
X_train, y_train = data['X'], data['y']
print(X_train.shape)
X_train = np.insert(X_train, 0, 1, axis=1)
X_val = np.insert(X_val, 0, 1, axis=1)
X_test = np.insert(X_test, 0, 1, axis=1)

theta = np.ones(X_train.shape[1])
lamda = 1
reg_cost1 = reg_cost(theta, X_train, y_train, lamda)
print(reg_cost1)

reg_grad = reg_gradient(theta, X_train, y_train, lamda)
print(reg_grad)
print(X_train.shape)
theta_final = train_model(X_train, y_train, lamda=0)
print(theta_final)
plot_data()
plt.plot(X_train[:, 1], X_train @ theta_final, c='r')
plt.show()
plot_learining_curve(X_train, y_train, X_val, y_val, lamda)
plt.show()

# 升维度
power = 6
X_train_poly = poly_feature(X_train, power)
X_val_poly = poly_feature(X_val, power)
X_test_poly = poly_feature(X_test, power)
train_means, train_stds = get_means_stds(X_train_poly)
X_train_norm = feature_normailze(X_train_poly, train_means, train_stds)
X_val_norm = feature_normailze(X_val_poly, train_means, train_stds)
X_test_norm = feature_normailze(X_test_poly, train_means, train_stds)
theta_fit = train_model(X_train_norm, y_train, lamda=0)
plot_ploy_fit()
plt.show()
plot_learining_curve(X_train_norm, y_train, X_val_norm, y_val, lamda=100)  # 拉姆达太高欠拟合
plt.show()
lamdas = [0, 0.001, 0.003, 0.03, 0.1, 0.3, 1, 3, 10]
training_cost = []
cv_cost = []

for lamda in lamdas:
    res = train_model(X_train_norm, y_train, lamda)

    tc = reg_cost(res, X_train_norm, y_train, lamda=0)
    cv = reg_cost(res, X_val_norm, y_val, lamda=0)

    training_cost.append(tc)
    cv_cost.append(cv)

plt.plot(lamdas,training_cost,label='training cost')
plt.plot(lamdas,cv_cost,label='cv cost')
plt.legend()
plt.show()

lamdas[np.argmin(cv_cost)]

res = train_model(X_train_norm,y_train,lamda =3)
test_cost = reg_cost(res,X_test_norm,y_test,lamda = 0)
print(test_cost)

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
最优正则化参数3 此时cost:4.397616217271634

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值