mxnet框架使用小结

构建神经网络

import gluonbook as gb
import mxnet as mx
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import loss as gloss, nn
import time

net = nn.Sequential()
net.add(nn.Conv2D(channels=6, kernel_size=5, activation='sigmoid'),
        nn.MaxPool2D(pool_size=2, strides=2),
        nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),
        nn.MaxPool2D(pool_size=2, strides=2),
        nn.Dense(120, activation='sigmoid'),
        nn.Dense(84, activation='sigmoid'),
        nn.Dense(10))

# X = nd.random.uniform(shape=(1, 1, 28, 28))
# net.initialize()
# for layer in net:
#     X = layer(X)
#     print(layer.name, 'output shape:', X.shape)
batch_size = 256
train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size)


def try_gpu():
    try:
        ctx = mx.gpu()
        _ = nd.zeros((1,), ctx=ctx)
    except:
        ctx = mx.cpu()
    return ctx


ctx = try_gpu()
lr, num_epochs = 0.9, 5

net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})


def evaluate_accuracy(data_iter, net, ctx):
    acc = nd.array([0], ctx=ctx)
    for X, y in data_iter:
        # 如果 ctx 是 GPU,将数据复制到 GPU 上。
        X, y = X.as_in_context(ctx), y.as_in_context(ctx)
        acc += gb.accuracy(net(X), y)
    return acc.asscalar() / len(data_iter)


def train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs):
    print('training on', ctx)
    loss = gloss.SoftmaxCrossEntropyLoss()
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, start = 0, 0, time.time()
        for X, y in train_iter:
            X, y = X.as_in_context(ctx), y.as_in_context(ctx)
            with autograd.record():
                y_hat = net(X)
                l = loss(y_hat, y)
            l.backward()
            trainer.step(batch_size)
            train_l_sum += l.mean().asscalar()
            train_acc_sum += gb.accuracy(y_hat, y)
        test_acc = evaluate_accuracy(test_iter, net, ctx)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
              'time %.1f sec' % (epoch + 1, train_l_sum / len(train_iter),
                                 train_acc_sum / len(train_iter),
                                 test_acc, time.time() - start))


train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)
import find_mxnet
import mxnet as mx
import argparse
import os, sys
import train_model

def _download(data_dir):
    if not os.path.isdir(data_dir):
        os.system("mkdir " + data_dir)
    os.chdir(data_dir)
    if (not os.path.exists('train-images-idx3-ubyte')) or \
       (not os.path.exists('train-labels-idx1-ubyte')) or \
       (not os.path.exists('t10k-images-idx3-ubyte')) or \
       (not os.path.exists('t10k-labels-idx1-ubyte')):
        os.system("wget http://data.dmlc.ml/mxnet/data/mnist.zip")
        os.system("unzip -u mnist.zip; rm mnist.zip")
    os.chdir("..")

def get_loc(data, attr={'lr_mult':'0.01'}):
    """
    the localisation network in lenet-stn, it will increase acc about more than 1%,
    when num-epoch >=15
    """
    loc = mx.symbol.Convolution(data=data, num_filter=30, kernel=(5, 5), stride=(2,2))
    loc = mx.symbol.Activation(data = loc, act_type='relu')
    loc = mx.symbol.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
    loc = mx.symbol.Convolution(data=loc, num_filter=60, kernel=(3, 3), stride=(1,1), pad=(1, 1))
    loc = mx.symbol.Activation(data = loc, act_type='relu')
    loc = mx.symbol.Pooling(data=loc, global_pool=True, kernel=(2, 2), pool_type='avg')
    loc = mx.symbol.Flatten(data=loc)
    loc = mx.symbol.FullyConnected(data=loc, num_hidden=6, name="stn_loc", attr=attr)
    return loc

def get_mlp():
    """
    multi-layer perceptron
    """
    data = mx.symbol.Variable('data')
    fc1  = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
    act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
    fc2  = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
    act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
    fc3  = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
    mlp  = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
    return mlp

def get_lenet(add_stn=False):
    """
    LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
    Haffner. "Gradient-based learning applied to document recognition."
    Proceedings of the IEEE (1998)
    """
    data = mx.symbol.Variable('data')
    if(add_stn):
        data = mx.sym.SpatialTransformer(data=data, loc=get_loc(data), target_shape = (28,28),
                                         transform_type="affine", sampler_type="bilinear")
    # first conv
    conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20)
    tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh")
    pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max",
                              kernel=(2,2), stride=(2,2))
    # second conv
    conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=50)
    tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
    pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max",
                              kernel=(2,2), stride=(2,2))
    # first fullc
    flatten = mx.symbol.Flatten(data=pool2)
    fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
    tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh")
    # second fullc
    fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=10)
    # loss
    lenet = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
    return lenet

def get_iterator(data_shape):
    def get_iterator_impl(args, kv):
        data_dir = args.data_dir
        if '://' not in args.data_dir:
            _download(args.data_dir)
        flat = False if len(data_shape) == 3 else True

        train           = mx.io.MNISTIter(
            image       = data_dir + "train-images-idx3-ubyte",
            label       = data_dir + "train-labels-idx1-ubyte",
            input_shape = data_shape,
            batch_size  = args.batch_size,
            shuffle     = True,
            flat        = flat,
            num_parts   = kv.num_workers,
            part_index  = kv.rank)

        val = mx.io.MNISTIter(
            image       = data_dir + "t10k-images-idx3-ubyte",
            label       = data_dir + "t10k-labels-idx1-ubyte",
            input_shape = data_shape,
            batch_size  = args.batch_size,
            flat        = flat,
            num_parts   = kv.num_workers,
            part_index  = kv.rank)

        return (train, val)
    return get_iterator_impl

def parse_args():
    parser = argparse.ArgumentParser(description='train an image classifer on mnist')
    parser.add_argument('--network', type=str, default='mlp',
                        choices = ['mlp', 'lenet', 'lenet-stn'],
                        help = 'the cnn to use')
    parser.add_argument('--data-dir', type=str, default='mnist/',
                        help='the input data directory')
    parser.add_argument('--gpus', type=str,
                        help='the gpus will be used, e.g "0,1,2,3"')
    parser.add_argument('--num-examples', type=int, default=60000,
                        help='the number of training examples')
    parser.add_argument('--batch-size', type=int, default=128,
                        help='the batch size')
    parser.add_argument('--lr', type=float, default=.1,
                        help='the initial learning rate')
    parser.add_argument('--model-prefix', type=str,
                        help='the prefix of the model to load/save')
    parser.add_argument('--save-model-prefix', type=str,
                        help='the prefix of the model to save')
    parser.add_argument('--num-epochs', type=int, default=10,
                        help='the number of training epochs')
    parser.add_argument('--load-epoch', type=int,
                        help="load the model on an epoch using the model-prefix")
    parser.add_argument('--kv-store', type=str, default='local',
                        help='the kvstore type')
    parser.add_argument('--lr-factor', type=float, default=1,
                        help='times the lr with a factor for every lr-factor-epoch epoch')
    parser.add_argument('--lr-factor-epoch', type=float, default=1,
                        help='the number of epoch to factor the lr, could be .5')
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()


    if args.network == 'mlp':
        data_shape = (784, )
        net = get_mlp()
    elif args.network == 'lenet-stn':
        data_shape = (1, 28, 28)
        net = get_lenet(True)
    else:
        data_shape = (1, 28, 28)
        net = get_lenet()

    # train
    train_model.fit(args, net, get_iterator(data_shape))

保存和加载模型

同时将模型参数和模型结构保存为文件,将nn.Sequential修改为nn.HybridSequential,然后再添加LeNet.hybridize(),最后export方法,只保存参数可以用save_parameters方法,且把使用nn.Sequential。

import mxnet as mx
from mxnet.gluon import nn
from mxnet import gluon,nd,autograd,init
from mxnet.gluon.data.vision import datasets,transforms
from IPython import display
import matplotlib.pyplot as plt
import time
import numpy as np

#下载fashionMNIST数据集
fashion_train_data = datasets.FashionMNIST(train=True)
#获取图片数据和对应的标签
images,labels = fashion_train_data[:]

#transforms链式转换数据
transformer = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(0.13,0.31)])
#转换数据
fashion_data = fashion_train_data.transform_first(transformer)

#设置batch的大小
batch_size = 256
#在windows系统上,请将num_workers设置为0,否则会导致线程错误
train_data = gluon.data.DataLoader(fashion_data,batch_size=batch_size,shuffle=True,num_workers=0)

#加载验证数据
fashion_val_data = gluon.data.vision.FashionMNIST(train=False)
val_data = gluon.data.DataLoader(fashion_val_data.transform_first(transformer),
                                   batch_size=batch_size,num_workers=0)
#定义使用的GPU,使用GPU加速训练,如果有多个GPU,可以定义多个
gpu_devices = [mx.gpu(0)]
#定义网络结构
LeNet = nn.HybridSequential()
#构建一个LeNet的网络结构
LeNet.add(
    nn.Conv2D(channels=6,kernel_size=5,activation="relu"),
    nn.MaxPool2D(pool_size=2,strides=2),
    nn.Conv2D(channels=16,kernel_size=3,activation="relu"),
    nn.MaxPool2D(pool_size=2,strides=2),
    nn.Flatten(),
    nn.Dense(120,activation="relu"),
    nn.Dense(84,activation="relu"),
    nn.Dense(10)
)
LeNet.hybridize()
#初始化神经网络的权重参数,使用GPU来加速训练
LeNet.collect_params().initialize(force_reinit=True,ctx=gpu_devices)
#定义softmax损失函数
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
#设置优化算法,使用随机梯度下降sgd算法,学习率设置为0.1
trainer = gluon.Trainer(LeNet.collect_params(),"sgd",{"learning_rate":0.1})
#计算准确率
def acc(output,label):
    return (output.argmax(axis=1) == label.astype("float32")).mean().asscalar()

#设置迭代的轮数
epochs = 10
#训练模型
for epoch in range(epochs):
    train_loss,train_acc,val_acc = 0,0,0
    epoch_start_time = time.time()
    for data,label in train_data:
        #使用GPU来加载数据加速训练
        data_list = gluon.utils.split_and_load(data,gpu_devices)
        label_list = gluon.utils.split_and_load(label,gpu_devices)
        #前向传播
        with autograd.record():
            #获取多个GPU上的预测结果
            pred_Y = [LeNet(x) for x in data_list]
            #计算多个GPU上预测值的损失
            losses = [softmax_cross_entropy(pred_y,Y) for pred_y,Y in zip(pred_Y,label_list)]
        #反向传播更新参数
        for l in losses:
            l.backward()
        trainer.step(batch_size)
        #计算训练集上的总损失
        train_loss += sum([l.sum().asscalar() for l in losses])
        #计算训练集上的准确率
        train_acc += sum([acc(output_y,y) for output_y,y in zip(pred_Y,label_list)])

    for data,label in val_data:
        data_list = gluon.utils.split_and_load(data,ctx_list=gpu_devices)
        label_list = gluon.utils.split_and_load(label,ctx_list=gpu_devices)
        #计算验证集上的准确率
        val_acc += sum(acc(LeNet(val_X),val_Y) for val_X,val_Y in zip(data_list,label_list))

    print("epoch %d,loss:%.3f,train acc:%.3f,test acc:%.3f,in %.1f sec"%
          (epoch+1,train_loss/len(labels),train_acc/len(train_data),val_acc/len(val_data),time.time()-epoch_start_time))
#保存模型参数
LeNet.export("lenet",epoch=1)

#加载模型文件
LeNet = gluon.nn.SymbolBlock.imports("lenet-symbol.json",["data"],"lenet-0001.params")

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值