mxnet实现线性回归

mxnet 实现线性回归

[参考李沐动手学习深度学习视频](https://www.bilibili.com/video/BV1tx41147Bp)
from mxnet import ndarray as nd
from mxnet import autograd as ag
import random
#定义输入
w_true=nd.array([5,2])
b_true=nd.array([3])
num_input=1000
num_dim=2
x_input = nd.random_normal(shape=(num_input,num_dim))
y_input = nd.dot(x_input,w_true)+b_true#dot() 向量与向量内积, 向量与矩阵,矩阵与矩阵
y_input+=.001*nd.random_normal(shape=(num_input,))
# print(y_input)


def data_iter(batch_size=10):
    idx=list(range(num_input))
    random.shuffle(idx)
    for i in range(0,num_input,batch_size):
        j=nd.array(idx[i:min(i+batch_size,num_input)])
        #是否nd.take(tensor,idx) idx必须为NDArray类型?
        yield nd.take(x_input,j), nd.take(y_input,j)
#定义初始化参数
w=nd.random_normal(shape=(num_dim,))
w.attach_grad()
b=nd.ones(shape=(1,))
b.attach_grad()
params=[w,b]

#定义模型
def net(x):
    return nd.dot(x,w)+b

#定义 loss 

def mse_loss(y_hat,y):
    return  nd.power((y_hat-y),2)  #如果loss不是标量会自动求和

#定义反向传播
def step(params,lr=0.001):
    '''
    传入参数组,进行反向传播
    '''
    for param in params:
        param[:]=param - lr * param.grad #param[:]进行本地操作
    
#定义train

def train(epoch,batch_size=10,lr=0.001):
    for e in range(epoch):
        total_loss=0
        for minibatch_x,minibatch_y in data_iter():
            with ag.record():
                output=net(minibatch_x)
                loss=mse_loss(output,minibatch_y)#在forward 记录计算图?
            loss.backward()
            step(params,lr)
            total_loss+=nd.sum(loss).asscalar()
        print(f"epoch {e}, average loss: {total_loss/num_input}")

train(epoch=5)
print(f'w_true:{w_true.asnumpy().tolist()}, w_pre={w.asnumpy().tolist()}')
print(f'b_true:{b_true.asnumpy().tolist()}, b_pre={b.asnumpy().tolist()}')
#epoch 0, average loss: 14.304927275657654
#epoch 1, average loss: 0.21985875668004154
#epoch 2, average loss: 0.003455003286479041
#epoch 3, average loss: 5.654240959120216e-05
#epoch 4, average loss: 1.9240497645114375e-06
#w_true:[5.0, 2.0], w_pre=[4.999731063842773, 1.9999667406082153]
#b_true:[3.0], b_pre=[2.999972343444824]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值