基于mxnet的线性回归模型
from mxnet import nd,autograd
import random
#真实的参数
true_w=[2,-3.14]
true_b=-2
#生成数据
num_input=2
num_examples=1000
features=nd.random.normal(scale=1,shape=(num_examples,num_input))
norse=nd.random.normal(scale=0.01,shape=(num_examples,))
labels=true_w[0]*features[:,0]+true_w[1]*features[:,1]+true_b+norse
#定义读取数据函数data_inter()
def data_inter(features,labels,batch_size):
num_examples=len(features)
indecies=list(range(num_examples))
random.shuffle(indecies)
for i in range(0,num_examples,batch_size):
j=nd.array(indecies[i:min(num_examples-1,i+batch_size)])
yield features.take(j),labels.take(j)
#定义线性回归模型
def linreg(X,w,b):
return nd.dot(X,w)+b
#定义损失函数
def squared_loss(y_hat, y): # 本函数已保存在d2lzh包中方便以后使用
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
#定义优化算法(学习率lr)
def sgd(params,lr,batch_size):
for param in params:
param[:]=param-lr/batch_size*param.grad
#初始化模型参数
w=nd.random.normal(scale=0.01,shape=(2,))
b=nd.random.normal(scale=0.1,shape=(1,))
#创建参数w,b的梯度
w.attach_grad()
b.attach_grad()
#开始训练模型
lr=0.1
loss=squared_loss
net=linreg
num_epochs=20
batch_size=10
for epoch in range(num_epochs):
for X,y in data_inter(features,labels,batch_size):
with autograd.record():
l=loss(net(X,w,b),y)
l.backward()
sgd([w,b],lr,batch_size)
train_l=loss(net(features,w,b),labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().asnumpy()))