mxnet 实现线性回归
[参考李沐动手学习深度学习视频](https://www.bilibili.com/video/BV1tx41147Bp)
from mxnet import ndarray as nd
from mxnet import autograd as ag
import random
w_true=nd.array([5,2])
b_true=nd.array([3])
num_input=1000
num_dim=2
x_input = nd.random_normal(shape=(num_input,num_dim))
y_input = nd.dot(x_input,w_true)+b_true
y_input+=.001*nd.random_normal(shape=(num_input,))
def data_iter(batch_size=10):
idx=list(range(num_input))
random.shuffle(idx)
for i in range(0,num_input,batch_size):
j=nd.array(idx[i:min(i+batch_size,num_input)])
yield nd.take(x_input,j), nd.take(y_input,j)
w=nd.random_normal(shape=(num_dim,))
w.attach_grad()
b=nd.ones(shape=(1,))
b.attach_grad()
params=[w,b]
def net(x):
return nd.dot(x,w)+b
def mse_loss(y_hat,y):
return nd.power((y_hat-y),2)
def step(params,lr=0.001):
'''
传入参数组,进行反向传播
'''
for param in params:
param[:]=param - lr * param.grad
def train(epoch,batch_size=10,lr=0.001):
for e in range(epoch):
total_loss=0
for minibatch_x,minibatch_y in data_iter():
with ag.record():
output=net(minibatch_x)
loss=mse_loss(output,minibatch_y)
loss.backward()
step(params,lr)
total_loss+=nd.sum(loss).asscalar()
print(f"epoch {e}, average loss: {total_loss/num_input}")
train(epoch=5)
print(f'w_true:{w_true.asnumpy().tolist()}, w_pre={w.asnumpy().tolist()}')
print(f'b_true:{b_true.asnumpy().tolist()}, b_pre={b.asnumpy().tolist()}')