实验(二)
代码段:
# 实验环境:MindSpore-python3.7-aarch64
import os
# os.environ['DEVICE_ID'] = '0'
import mindspore as ms
import mindspore.context as context
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.vision.c_transforms as CV
from mindspore import nn
from mindspore.train import Model
from mindspore.train.callback import LossMonitor
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend') # Ascend, CPU, GPU
data_train = os.path.join("MNIST", 'train') # train set
data_test = os.path.join("MNIST", 'test') # test set
ds = ms.dataset.MnistDataset(data_train)
print(data_train)
以上在华为云上不断报错,而且无法排除错误,于是我参考了网上视频教程,使用keras(基于tensorflow框架)第三方库在本地电脑上运行:
# 准备工作:
pip install keras
pip install tensorflow
第一次代码:
# 实验环境:pycharm, python 3.8
# 框架:tensorflow
from keras.utils import to_categorical
from keras import models, layers, regularizers
from keras.optimizers import RMSprop
from keras.datasets import mnist # 导入数据集
import matplotlib.pyplot as plt
# 加载数据集
(train_images, train_labels), (test_images, test_labels) = mnist.load_data() # 下载数据集
# print(train_images.shape, test_images.shape) # 打印输出形状
# print(train_images[0])
# print(train_labels[0])
# plt.imshow(train_images[0])
# plt.show()
# 将二维数据铺开成一维
train_images = train_images.reshape((60000, 28*28)).astype('float') # 784,输入层有784个神经元
test_images = test_images.reshape((10000, 28*28)).astype('float')
# 标签值编码
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# print(train_labels[0])
# 搭建神经网络
# 输入层28*28个神经元,隐藏层15个神经元,输出层10个神经元(0~9)
network = models.Sequential() # 序列式模型
network.add(layers.Dense(units=15, activation='relu', input_shape=(28*28, ), )) # 隐藏层
# Dense:全连接层
# units:15个神经元
# 激活函数:ReLu(sigmoid和tanh会产生梯度弥散现象,自变量很大时,图像很平缓,梯度下降十分缓慢)
network.add(layers.Dense(units=10, activation='softmax')