from mxnet import gluon
from mxnet import ndarray as nd
from mxnet import autograd
def transform(data,label):
return data.astype('float32')/255, label.astype('float32')
mnist_train=gluon.data.vision.FashionMNIST(train=True,transform=transform)
mnist_test=gluon.data.vision.FashionMNIST(train=False,transform=transform)
batch_size=256
train_data=gluon.data.DataLoader(mnist_train,batch_size,shuffle=True)
test_data=gluon.data.DataLoader(mnist_test,batch_size,shuffle=False)
num_inputs=784
net=gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Flatten()) #将数据展开为batch_size*X格式的
net.add(gluon.nn.Dense(10))
net.initialize()
softmax_cross_entropy=gluon.loss.SoftmaxCrossEntropyLoss()#Softmax 与CrossEntropyLoss的复合函数
trainer=gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.1})
def accuracy(output,label):
return nd.mean(output.argmax(axis=1)==label).asscalar()
def evaluate_accuracy(test_data,net):
acc=.0
for data,label in test_data:
output=net(data)
acc+=accuracy(output,label)
return acc/len(test_data)
epochs=15
for epoch in range(epochs):
total_loss=.0
total_acc=.0
for data,label in train_data:
with autograd.record():
output=net(data)
loss=softmax_cross_entropy(output,label)
loss.backward()
trainer.step(batch_size)
total_loss+=nd.mean(loss).asscalar()
total_acc+=accuracy(output,label)
test_acc=evaluate_accuracy(train_data,net)
print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" %(epoch, total_loss/len(train_data),total_acc/len(train_data),test_acc))