[MXNet逐梦之旅]练习二·使用MXNet拟合直线简洁实现
- code
#%%
#%matplotlib inline
from matplotlib import pyplot as plt
from mxnet import autograd, nd
import random
#%%
num_inputs = 1
num_examples = 100
true_w = 1.56
true_b = 1.24
features = nd.arange(0,10,0.1).reshape((-1, 1))
labels = true_w * features + true_b
labels += nd.random.normal(scale=0.2, shape=labels.shape)
features[0], labels[0]
#%%
# 本函数已保存在d2lzh包中方便以后使用
from mxnet import gluon as gl
from mxnet.gluon import data as gdata
batch_size = 10
# 将训练数据的特征和标签组合
dataset = gl.data.ArrayDataset(features, labels)
# 随机读取小批量
data_iter = gl.data.DataLoader(dataset, batch_size, shuffle=True)
for X, y in data_iter:
print(X, y)
break
#%%
model = gl.nn.Sequential()
#%%
model.add(gl.nn.Dense(1))
model
#%%
import mxnet as mx
model.initialize(mx.init.Normal(sigma=0.01))
#%%
loss = gl.loss.L2Loss() # 平方损失又称L2范数损失
#%%
trainer = gl.Trainer(model.collect_params(), 'adam', {'learning_rate': 0.5})
#%%
num_epochs = 10
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
with autograd.record():
l = loss(model(X), y)
l.backward()
trainer.step(batch_size)
l = loss(model(features), labels)
print('epoch %d, loss: %f' % (epoch, l.mean().asnumpy()))
#%%
pre = model(features)
pre
plt.scatter(features.asnumpy(), labels.asnumpy(), 1)
plt.scatter(features.asnumpy(), pre.asnumpy(), 1)
plt.show()
#%%
print(model)
print("w:",model.collect_params()["dense0_weight"].data())
print("b:",model.collect_params()["dense0_bias"].data())
- out
<NDArray 10x1 @cpu(0)>
epoch 1, loss: 5.570210
epoch 2, loss: 2.831637
epoch 3, loss: 0.995476
epoch 4, loss: 0.332262
epoch 5, loss: 0.060224
epoch 6, loss: 0.027413
epoch 7, loss: 0.031316
epoch 8, loss: 0.030222
epoch 9, loss: 0.027907
epoch 10, loss: 0.032840
Sequential(
(0): Dense(1 -> 1, linear)
)
w:
[[1.5745053]]
<NDArray 1x1 @cpu(0)>
b:
[1.2476798]
<NDArray 1 @cpu(0)>
蓝色是原始数据
黄色为拟合数据