from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon
num_examples=1000
num_inputs=2
true_w=[2,-3.4]
true_b=4.2
x=nd.random_normal(shape=(num_examples,num_inputs))
y=true_w[0]*x[:,0]+true_w[0]*x[:,1]+true_b
y+=0.01*nd.random_normal(shape=y.shape)
batch_size=10
dataset=gluon.data.ArrayDataset(x,y)
data_iter=gluon.data.DataLoader(dataset,batch_size,shuffle=True)
net=gluon.nn.Sequential()
net.add(gluon.nn.Dense(1))
net.initialize()
square_loss=gluon.loss.L2Loss()
trainer=gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.1})
epochs=5
batch_size=10
for e in range(epochs):
total_loss=0
for data,label in data_iter:
with autograd.record():
output=net(data)
loss=square_loss(output,label)
loss.backward()
trainer.step(batch_size) #根据前面的trainer定义,trainer知道模型的参数,这里只需迭代向前一步,不需要之前的sgd
total_loss+=nd.sum(loss).asscalar()
print('Epoch: %d , average loss : %f'%(e,total_loss/num_examples))