本次使用mxnet的gluon实现线性回归,更简洁
1.生成数据集
from mxnet import nd,autograd
from mxnet.gluon import data as gdata
num_input=2
num_example=1000
true_w=[2,-3.4]
true_b=4.2
features=nd.random.normal(scale=1,shape=(num_example,num_input))
labels=true_w[0]*features[:,0]+true_w[1]*features[:,1]+true_b
c=nd.random.normal(scale=0.01,shape=labels.shape) #噪声
labels+=c
batch_size=10
datatest=gdata.ArrayDataset(features,labels)
data_iter=gdata.DataLoader(datatest,batch_size,shuffle=True)#随机取数据
for X,y in data_iter:
print(X)
print(y)
break
2.定义模型及初始化
先从mxnet库中导入nn(神经网络)模块,定义net变量,可以看做串联各层的容器,把每一层的输出作为下一层的输入。线性回归只有一个输入层,一个输出层,这种叫全连接层,在gluon为Dense的实例,定义输出层为1
from mxnet.gluon import nn
from mxnet import init
from mxnet.gluon import loss as gloss
net=nn.Sequential()#定义模型
net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01))#初始化参数:均值为 0 标准差为 0.01 的正态分布
loss=gloss.L2Loss()#定义损失函数:L2范数
3.训练模型并比较结果
from mxnet import gluon
trainer=gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.03})
num_epouchs=3
for epoch in range(num_epouchs):
for X,y in data_iter:
with autograd.record():
l=loss(net(X),y)
l.backward()
trainer.step(batch_size)
l=loss(net(features),labels)
#print('epoch %d, loss: %f' % (epoch, l.mean().asnumpy()))
print(true_w,net[0].weight.data())
print(true_b,net[0].bias.data())
输出:
[2, -3.4]
[[ 2.0001616 -3.3998892]]
<NDArray 1x2 @cpu(0)>
4.2
[4.20024]
<NDArray 1 @cpu(0)>
可以看出,使用gluon模块可以更简洁实现模型,最后附上全部代码,已经运行通过:
from mxnet import nd,autograd
from mxnet.gluon import data as gdata
num_input=2
num_example=1000
true_w=[2,-3.4]
true_b=4.2
features=nd.random.normal(scale=1,shape=(num_example,num_input))
labels=true_w[0]*features[:,0]+true_w[1]*features[:,1]+true_b
c=nd.random.normal(scale=0.01,shape=labels.shape) #噪声
labels+=c
batch_size=10
datatest=gdata.ArrayDataset(features,labels)
data_iter=gdata.DataLoader(datatest,batch_size,shuffle=True)
from mxnet.gluon import nn
from mxnet import init
from mxnet.gluon import loss as gloss
net=nn.Sequential()#定义模型
net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01))#初始化参数:均值为 0 标准差为 0.01 的正态分布
loss=gloss.L2Loss()#定义损失函数:L2范数
from mxnet import gluon
trainer=gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.03})
num_epouchs=3
for epoch in range(num_epouchs):
for X,y in data_iter:
with autograd.record():
l=loss(net(X),y)
l.backward()
trainer.step(batch_size)
#l=loss(net(features),labels)
#print('epoch %d, loss: %f' % (epoch, l.mean().asnumpy()))
print(true_w,net[0].weight.data())
print(true_b,net[0].bias.data())