1.纯粹的MXNet实现
from IPython import display
import matplotlib.pyplot as plt
from mxnet import autograd,nd
import d2lzh
import random
#生成数据集
w=[2,-3.4]
b=4.2
features=nd.random.normal(shape=(1000,2))
labels=w[0]*features[:,0]+w[1]*features[:,1]+b
labels+=nd.random.normal(scale=0.01,shape=labels.shape)
print(features[0],labels[0])
'''#读取数据
batch_size=10
for X1,y1 in d2lzh.data_iter(batch_size,features,labels):
print(X1,y1)
break'''
#初始化模型参数
w_train=nd.random.normal(scale=0.01,shape=(2,1))
b_train=nd.zeros(shape=(1,))
w_train.attach_grad()
b_train.attach_grad()
#训练模型
lr=0.03
num_epochs=3
net=d2lzh.linreg
loss=d2lzh.squared_loss
for epoch in range(num_epochs):
for X,y in d2lzh.data_iter(batch_size,features,labels):
with autograd.record():
l=loss(net(X,w_train,b_train),y)
l.backward()
d2lzh.sgd([w_train,b_train],lr,batch_size)
train_l=loss(net(features,w_train,b_train),labels)
print("epoch %d, loss %f"%(epoch+1,train_l.mean().asnumpy()))
print(w,w_train)
print(b,b_train)
2.Gluon接口实现
from mxnet import autograd,nd,gluon,init
from mxnet.gluon import data,nn,loss
#生成数据集
num_inputs=2
num_examples=1000
w=[2,-3.4]
b=4.2
features=nd.random.normal(shape=(num_examples,num_inputs))
labels=w[0]*features[:,0]+w[1]*features[:,1]+b
labels+=nd.random.normal(scale=0.01,shape=labels.shape)
print(features[0],labels[0])
#读取数据
batch_size=10
dataset=data.ArrayDataset(features,labels)
data_iter=data.DataLoader(dataset,batch_size,shuffle=True)
for X,y in data_iter:
print(X,y)
break
#定义模型
net=nn.Sequential()
net.add(nn.Dense(1))
#初始化模型参数
net.initialize(init.Normal(sigma=0.01))
#定义损失函数
loss=loss.L2Loss()
#定义优化算法
trainer=gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.03})
#训练模型
num_epochs=3
for epoch in range(1,num_epochs+1):
for X,y in data_iter:
with autograd.record():
l=loss(net(X),y)
l.backward()
trainer.step(batch_size)
l=loss(net(features),labels)
print("epoch %d, loss: %f"%(epoch,l.mean().asnumpy()))
dense=net[0]
print(w,dense.weight.data())
print(b,dense.bias.data())