import random
from mxnet import autograd
lr=0.01
input_dim=2
input_len=100
output_dim=1
input_signal=nd.random.normal(scale=1,shape=(input_len,input_dim))
output_ideal=nd.random.normal(scale=1,shape=(input_len, output_dim))
# print(input_signal)
w=nd.random.normal(scale=1,shape=(input_dim,output_dim))
b=2.5
epochs_num=10
batch_size=5
def linreg(x,w,b):
return nd.dot(x,w)+b
def squared_loss(y_hat,y):
return (y_hat-y.reshape(y_hat.shape))**2/2
def sgd (params,lr,batch_size):
for param in params:
param[:]=param-lr*param.grad/batch_size
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices) # 样本的读取顺序是随机的
for i in range(0, num_examples, batch_size):
j = nd.array(indices[i: min(i + batch_size, num_examples)])
yield features.take(j), labels.take(j) # take函数根据索引返回对应元素
net=linreg
loss=squared_loss
for epoch in range(epochs_num):
i=0
for fea,lab in data_iter(batch_size,input_signal,output_ideal):
with autograd.record():
# output_hat = net(fea, w, b)
l=loss(net(fea, w, b),lab)
l.backward()
sgd([w,b],lr,batch_size)
i = i + 1
print('training',epoch,i)
报错
mxnet.base.MXNetError: [15:30:46] c:\jenkins\workspace\mxnet-tag\mxnet\src\operator\tensor\./dot-inl.h:1241: Check failed: L[!Ta].Size() == R[Tb].Size() (2 vs. 1000) : dot shape error: [10,2] X [1000,1]
应先分配内存
b.attach_grad()
w.attach_grad()
且b不可为float类型
b=nd.zeros(shape=(1,))