24.过拟合overfitting

'''
Description: overfitting
Autor: 365JHWZGo
Date: 2021-11-14 22:22:39
LastEditors: 365JHWZGo
LastEditTime: 2021-11-14 23:08:40
'''
import torch
import numpy as np
import matplotlib.pyplot as plt

# hyper parameters
N_SAMPLES = 20
N_HIDDENS = 300

# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), dim=1)
y = x+0.3*torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))

# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), dim=1)
test_y = test_x+0.3 * \
    torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
# show data
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=50, alpha=0.5, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()
# create network
# overfitting
net_overfitting = torch.nn.Sequential(
    torch.nn.Linear(1, N_HIDDENS),
    torch.nn.ReLU(),
    torch.nn.Linear(N_HIDDENS, N_HIDDENS),
    torch.nn.ReLU(),
    torch.nn.Linear(N_HIDDENS, 1)
)

# dropout
net_dropout = torch.nn.Sequential(
    torch.nn.Linear(1, N_HIDDENS),
    torch.nn.Dropout(0.5),
    torch.nn.ReLU(),
    torch.nn.Linear(N_HIDDENS, N_HIDDENS),
    torch.nn.Dropout(0.5),
    torch.nn.ReLU(),
    torch.nn.Linear(N_HIDDENS, 1)
)

# create optimizer
optimizer1 = torch.optim.Adam(net_overfitting.parameters(), lr=0.01)
optimizer2 = torch.optim.Adam(net_dropout.parameters(), lr=0.01)

# loss_fun
loss_func = torch.nn.MSELoss()

plt.ion()

# training
if __name__ == '__main__':
    for i in range(500):
        pred_overfitting = net_overfitting(x)
        pred_dropout = net_dropout(x)

        loss_overfitting = loss_func(pred_overfitting, y)
        loss_dropout = loss_func(pred_dropout, y)

        optimizer1.zero_grad()
        optimizer2.zero_grad()
        loss_overfitting.backward()
        loss_dropout.backward()
        optimizer1.step()
        optimizer2.step()

        if i % 10 == 0:
            net_overfitting.eval()
            # eval()将神经网络转化成测试形式,画好图之后在转化为训练模式
            net_dropout.eval()

            plt.cla()

            test_pred_ofit = net_overfitting(test_x)
            test_pred_dout = net_dropout(test_x)

            plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta',
                        s=50, alpha=0.3, label='train points')
            plt.scatter(test_x.data.numpy(), test_y.data.numpy(),
                        c='cyan', s=50, alpha=0.3, label='test points')
            plt.plot(test_x.data.numpy(), test_pred_ofit.data.numpy(),
                     'r-', lw=3, label='overfitting line')
            plt.plot(test_x.data.numpy(), test_pred_dout.data.numpy(),
                     'b--', lw=3, label='dropout line')

            plt.text(0, -1.2, 'overfitting loss%.2f' % loss_func(test_pred_ofit,
                     test_y).data.numpy(), fontdict={'size': 20, 'color':  'red'})
            plt.text(0, -1.5, 'dropout loss%.2f' % loss_func(test_pred_dout,
                     test_y).data.numpy(), fontdict={'size': 20, 'color':  'black'})
            plt.legend(loc='best')
            plt.ylim((-2.5, 2.5))
            plt.pause(0.1)

            net_overfitting.train()
            net_dropout.train()
    plt.ioff()
    plt.show()

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

365JHWZGo

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值