多项式函数拟合实验
注:t.my_ones_packages是我自己根据《动手学深度学习》目前学习过程中出现的所有自定义函数进行集中得到的文档。
《动手学深度学习》也向读者提供了一个包含所有自定义函数的包“d2lzh”大家可以自行下载 侵删 链接如下 :link. 提取码: un5p 时间设置是永久的 如果失效可以联系我更新
import t.my_ones_packages as mop
from mxnet import autograd, gluon, nd
from mxnet.gluon import data as gdata, loss as gloss, nn
# 生成数据集
num_train, num_test, true_w, true_b = 100, 100, [1.2, -3.4, 5.6], 5 # 设置输入个数、输出个数真实的w和真实的b的值
features = nd.random.normal(shape=(num_train + num_test, 1)) # 设置特征为 随机的 200*1形状的矩阵
ploy_feature = nd.concat(features, nd.power(features, 2), nd.power(features, 3)) # 拼接三个矩阵
# 对应 w0*x0+w1*x1+w2*x2+b
labels = (true_w[0] * ploy_feature[:, 0] + true_w[1] * ploy_feature[:, 1] + true_w[2] * ploy_feature[:, 2] + true_b)
labels += nd.random.normal(scale=0.1, shape=labels.shape)
# 绘图函数
def semilogy(x_val, y_val, x_lab, y_lab, x2_val=None, y2_val=None, legend=None, figsize=(7.5, 5.5)):
mop.set_figsize(figsize)
mop.plt.xlabel(x_lab) # x方向的数据来源
mop.plt.ylabel(y_lab) # y方向额数据来源
mop.plt.semilogy(x_val, y_val) # 递归调用自己
if x2_val and y2_val:
mop.plt.semilogy(x2_val, y2_val, linestyle=':') # 递归调用自己
# legend(loc, frameon, edgecolor, facecolor, title, ['',''])
# legend(图例位置,是否去掉图例边框,设置图例边框颜色,设置图例背景(若无边框,无效),设置图例标题,设置图例内的分类名字)
mop.plt.legend(legend)
mop.plt.show() # 显示图像
num_epochs = 100
loss = gloss.L2Loss() # 定义L2范数损失
def fit_and_plot(train_features, test_features, train_labels, test_labels):
net = nn.Sequential() # Sequential是容器
net.add(nn.Dense(1)) # 添加输出层,输出层单元个数为1
net.initialize() # 对net初始化
batch_size = min(10, train_labels.shape[0])
train_iter = gdata.DataLoader(gdata.ArrayDataset(train_features, train_labels), batch_size, shuffle=True) # shuffle对应是否打乱数据
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})
train_ls = []
test_ls = []
for _ in range(num_epochs):
for x, y in train_iter:
with autograd.record():
l = loss(net(x), y) # l是有关net(x)和y的小批量的损失
l.backward() # 使用反向传播自动计算梯度
trainer.step(batch_size) # 仅保存参数和超级参数 ,根据batch_size进行迭代,数据更新
# append(x)函数是在列表最后加上x
train_ls.append(loss(net(train_features), train_labels).mean().asscalar())
test_ls.append(loss(net(test_features), test_labels).mean().asscalar())
print('final epoch :train loss', train_ls[-1], 'test loss', test_ls[-1])
semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('weight', net[0].weight.data().asnumpy(), '\nbias', net[0].bias.data().asnumpy())
# 三项多项式函数拟合
fit_and_plot(ploy_feature[:num_train, :], ploy_feature[num_train:, :], labels[:num_train], labels[num_train:])
# 线性函数拟合(欠拟合)
'''
fit_and_plot(features[:num_train, :], features[num_train:, :], labels[:num_train], labels[num_train:])
'''
# 训练样本不足(过拟合)
'''
fit_and_plot(ploy_feature[0:2, :], ploy_feature[num_train:, :], labels[0:2], labels[num_train:])
'''