卷积神经网络LeNet的实现--mxnet

#卷积神经网络LeNet的实现

#1导入库
from mxnet import autograd,nd,gluon,init
from mxnet.gluon import loss as gloss ,nn ,data as gdata
import mxnet as mx
import time
import d2l.mxnet as d2l

#2模型创建
net = nn.Sequential()
net.add(
    nn.Conv2D(channels=6,kernel_size=5,activation='sigmoid'),
    nn.MaxPool2D(pool_size=2,strides=2),
    nn.Conv2D(channels=16,kernel_size=5,activation='sigmoid'),
    nn.MaxPool2D(pool_size=2,strides=2),
    nn.Dense(120,activation='relu'),
    nn.Dense(84,activation='relu'),
    nn.Dense(10),
)

# X = nd.random.uniform(shape=(1,1,28,28))
# net.initialize()
# for layer in net:
#     X = layer(X)
#     print(layer.name,':',X.shape)

#3读取数据
batch_size = 256
# mnist_train = gdata.vision.FashionMNIST(train=True)
# mnist_test = gdata.vision.FashionMNIST(train=False)

train_iter ,test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)

#4切换gup计算
ctx = d2l.try_gpu()
def evaluate_accuracy(data_iter ,net, ctx):
    acc_sum , n = nd.array([0],ctx=ctx),0
    for X ,y in data_iter:
        #数据都转移到显存上
        X , y = X.as_in_context(ctx) , y.as_in_context(ctx).astype('float32')
        acc_sum += (net(X).argmax(axis=1)==y).sum()
        n += y.size
    return acc_sum.asscalar()/n

#训练函数
def leNettrain(net,train_iter,test_iter,trainer,batch_size,num_epochs,ctx):
    print("train on ",ctx)
    loss = gloss.SoftmaxCrossEntropyLoss()
    for epoch in range(num_epochs):
        train_l_sum ,train_acc_sum , n , start = 0.0,0.0,0,time.time()
        for X,y in train_iter:
            X, y = X.as_in_context(ctx), y.as_in_context(ctx).astype('float32')
            with autograd.record():
                y_hat = net(X)
                l = loss(y_hat,y).sum()
            l.backward()
            trainer.step(batch_size)
            #y = y.astype('float32')
            train_l_sum += l.asscalar()
            train_acc_sum += (y_hat.argmax(axis=1)==y).sum().asscalar()
            n += y.size
        test_acc = evaluate_accuracy(test_iter,net,ctx)
        print("epcoh:%d ,train_acc:%.4f ,test_acc:%.4f time_cost:%.6f"%(
            epoch+1,train_acc_sum/n,test_acc,time.time()-start
        ))

lr , num_epochs = 0.9 , 10
net.initialize(force_reinit=True,ctx=ctx,init=init.Xavier())
trainer = gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':lr})
leNettrain(net,train_iter,test_iter,trainer,batch_size,num_epochs,ctx)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值