深度学习softmax回归gpu

由于书中fashion_MNIST数据集的加载需要联网下载,而我们已经下好,因此使用自己写的加载函数,同时,由于train_ch3无法进行gpu计算,同样对其进行重写。

import d2lzh as d2l
import mxnet as mx
from mxnet import gluon, init,autograd
from mxnet.gluon import loss as gloss,nn
from mxnet.gluon import data as gdata
import sys

#获取数据
def load_data_fashion_mnist(batch_size):
    mnist_train = gdata.vision.FashionMNIST(root=r'C:/Users/Wu/AppData/Roaming/mxnet/datasets/fashion-mnist/',train=True)
    mnist_test = gdata.vision.FashionMNIST(root=r'C:/Users/Wu/AppData/Roaming/mxnet/datasets/fashion-mnist/',train=False)
    transformer = gdata.vision.transforms.ToTensor()
    if sys.platform.startswith('win'):
        num_workers = 0  # 表示不用额外进程来加速读取数据
    else:
        num_workers = 4

    train_iter = gdata.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True,num_workers=num_workers)
    test_iter = gdata.DataLoader(mnist_test.transform_first(transformer), batch_size, shuffle=False,num_workers=num_workers)
    return train_iter,test_iter

#小批量数据集
batch_size = 256
train_iter, test_iter = load_data_fashion_mnist((batch_size))


#定义初始化模型
net = nn.Sequential()
net.add(nn.Dense(10))#输出为10
net.initialize(init.Normal(sigma=0.01),ctx=mx.gpu())#初始化权重参数

#交叉熵损失
loss = gloss.SoftmaxCrossEntropyLoss()

#优化算法
trainer = gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.1})

#训练模型
num_epochs = 5
#cpu计算
# d2l.train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None,trainer)

#gpu计算,重写train_ch3

#评价模型net在数据集上的精确度
def evaluate_accuracy(data_iter, net):
    #分布准确的数量除以总数
    acc_sum, n = 0.0, 0
    for x, y in data_iter:
        x = x.copyto(mx.gpu())
        y = y.copyto(mx.gpu())
        y = y.astype('float32')
        acc_sum += (net(x).argmax(axis=1) == y).sum().asscalar()
        n += y.size
    return acc_sum / n
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params=None, lr=None, trainer=None):
    for epoch in range(num_epochs):
        # 总的损失# 总的准确率
        train_loss_sum , train_acc_sum, n = 0.0, 0.0, 0
        for X, y in train_iter:
            X = X.copyto(mx.gpu())
            y = y.copyto(mx.gpu())
            with autograd.record():
                y_hat = net(X)
                los = loss(y_hat, y).sum()
            # 求梯度
            los.backward()
            if trainer is None:
                # 小批量随机梯度下降
                d2l.sgd(params, lr, batch_size)
            else:
                trainer.step(batch_size)
            train_loss_sum += los.asscalar()
            train_acc_sum += (y_hat.argmax(axis=1) == y.astype('float32')).sum().asscalar()
            n += y.size
        test_acc = evaluate_accuracy(test_iter,net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
              % (epoch + 1, train_loss_sum / n, train_acc_sum / n,test_acc))

train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None,trainer)

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值