mindspore LeNet ResNet50作业

使用清华镜像 https://mirrors.tuna.tsinghua.edu.cn/anaconda/archive/

  • 创建一个干净的conda环境
conda create -n  mindspore python=3.7.5
  • 安装mindspore

https://www.mindspore.cn/install

pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.1.0/MindSpore/cpu/windows_x64/mindspore-1.1.0-cp37-cp37m-win_amd64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple
  • 安装jupter notebook
conda install jupyter notebook
  • 下载数据集解压
wget -N https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/MNIST_Data.zip 

unzip -o MNIST_Data.zip -d ./datasets

 

  • 安装matplotlib
pip install matplotlib
  • 定义处理数据函数
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
from mindspore import dtype as mstype
import matplotlib
import mindspore.dataset as ds


def create_dataset(data_path, batch_size=32, repeat_size=1,
                   num_parallel_workers=1):
    """ 
    create dataset for train or test
    
    Args:
        data_path (str): Data path
        batch_size (int): The number of data records in each group
        repeat_size (int): The number of replicated data records
        num_parallel_workers (int): The number of parallel workers
    """
    # define dataset
    mnist_ds = ds.MnistDataset(data_path)

    # define some parameters needed for data enhancement and rough justification
    resize_height, resize_width = 32, 32
    rescale = 1.0 / 255.0
    shift = 0.0
    rescale_nml = 1 / 0.3081
    shift_nml = -1 * 0.1307 / 0.3081

    # according to the parameters, generate the corresponding data enhancement method
    resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)
    rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
    rescale_op = CV.Rescale(rescale, shift)
    hwc2chw_op = CV.HWC2CHW()
    type_cast_op = C.TypeCast(mstype.int32)

    # using map to apply operations to a dataset
    mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)
    
    # process the generated dataset
    buffer_size = 10000
    mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size)
    mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
    mnist_ds = mnist_ds.repeat(repeat_size)

    return mnist_ds
  • 构造LeNet5神经网络
import mindspore.nn as nn
from mindspore.common.initializer import Normal

class LeNet5(nn.Cell):
    """Lenet network structure."""
    # define the operator required
    def __init__(self, num_class=10, num_channel=1):
        super(LeNet5, self).__init__()
        self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
        self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
        self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
        self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
        self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()

    # use the preceding operators to construct networks
    def construct(self, x):
        x = self.max_pool2d(self.relu(self.conv1(x)))
        x = self.max_pool2d(self.relu(self.conv2(x)))
        x = self.flatten(x)
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.fc3(x) 
        return x
  • 搭建训练网络
import os
from mindspore.nn import SoftmaxCrossEntropyWithLogits
from mindspore.nn import Accuracy
from mindspore import context, Model

context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

lr = 0.01
momentum = 0.9 

# create the network
network = LeNet5()

# define the optimizer
net_opt = nn.Momentum(network.trainable_params(), lr, momentum)

# define the loss function
net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

# define the model
model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()} )

epoch_size = 1
mnist_path = r"D:\mindsporedata\MNIST_Data"

eval_dataset = create_dataset(r"D:\mindsporedata\MNIST_Data\test")

repeat_size = 1
print("========== The Training Model is Defined. ==========")
  • 单次运行训练保存模型
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig

model_path = './models/ckpt/mindspore_save_model/'
# clean up old run files before in Linux
os.system('rm -f {}*.ckpt {}*.meta {}*.pb'.format(model_path, model_path, model_path))

# define config_ck for specifying the steps to save the checkpoint and the maximum file numbers
config_ck = CheckpointConfig(save_checkpoint_steps=375, keep_checkpoint_max=10)
# define ckpoint_cb for specifying the prefix of the file and the saving directory
ckpoint_cb = ModelCheckpoint(prefix='lenet', directory=model_path, config=config_ck)
#load the training dataset
ds_train = create_dataset(os.path.join(mnist_path, "train"), 32, repeat_size)
print("========== The Training is Starting. ==========")
model.train(epoch_size, ds_train, callbacks=ckpoint_cb, dataset_sink_mode=False)
print("========== The Training is Completed and the Checkpoint Files are Saved. ==========")
  • 多次运行训练保存模型
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
import os

# clean up old run files before in Linux
os.system('rm -f {}lenet_2*.ckpt'.format(model_path))

config_ck = CheckpointConfig(save_checkpoint_steps=375, keep_checkpoint_max=10)
# Specify that here the script is executed for the second time
ckpoint_cb = ModelCheckpoint(prefix='lenet_2', directory='./models/ckpt/mindspore_save_model', config=config_ck)
ds_train = create_dataset(os.path.join(mnist_path, "train"), 32, repeat_size)
print("========== The Training is Starting. ==========")
model.train(epoch_size, ds_train, callbacks=ckpoint_cb,dataset_sink_mode=False)
print("========== The Training is Completed and the Checkpoint Files are Saved. ==========")
  • 配置时间策略保存模型
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
import os

os.system('rm -f {}lenet_3*.ckpt'.format(model_path))
# define config_ck for specifying the seconds to save the checkpoint and the maximum file numbers
config_ck = CheckpointConfig(save_checkpoint_steps=None, save_checkpoint_seconds=10, keep_checkpoint_max=None, keep_checkpoint_per_n_minutes=1)
# define ckpoint_cb for specifying the prefix of the file and the saving directory
ckpoint_cb = ModelCheckpoint(prefix='lenet_3', directory='./models/ckpt/mindspore_save_model', config=config_ck)
#load the training dataset
epoch_size = 2
ds_train = create_dataset(os.path.join(mnist_path, "train"), 32, repeat_size)
print("========== The Training is Starting. ==========")
model.train(epoch_size, ds_train, callbacks=ckpoint_cb,dataset_sink_mode=False)
print("========== The Training is Completed and the Checkpoint Files are Saved. ==========")
  • 导出mindir
from mindspore import export, load_checkpoint, load_param_into_net
from mindspore import Tensor
import numpy as np

lenet = LeNet5()
# return a parameter dict for model
param_dict = load_checkpoint("./models/ckpt/mindspore_save_model/lenet-1_1875.ckpt")
# load the parameter into net
load_param_into_net(lenet, param_dict)
input = np.random.uniform(0.0, 1.0, size=[32, 1, 32, 32]).astype(np.float32)
# export the file with the specified name and format
export(lenet, Tensor(input), file_name='lenet-1_1875', file_format='MINDIR',)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值