import paddle
import os
import numpy as np
import matplotlib.pyplot as plt
from paddle.nn import Conv2D, MaxPool2D, Linear
import paddle.nn.functional as F
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
def showImg(data, is_arr=False, title=''):
if not is_arr:
plt.figure(figsize=(10,10))
plt.imshow(data, cmap=plt.cm.binary)
plt.title(title)
plt.axis('off')
else:
length=len(data)
plt.figure(figsize=(50,20))
for i in range(length):
plt.subplot(1,length,i+1) # 将画板分为2行两列,本幅图位于第一个位置
plt.imshow(data[i], cmap=plt.cm.binary)
plt.title(title+'-'+str(i+1))
plt.axis('off')
plt.show()
# data=np.dot(np.eye(28),255)
# x_data = np.array(data, dtype='float32').reshape(-1, 1, 28, 28)
# 设置数据读取器,API自动读取MNIST数据训练集
train_dataset = paddle.vision.datasets.MNIST(mode='train')
data = np.array(train_dataset[0][0])
x_data = np.array(data, dtype='float32').reshape(-1, 1, 28, 28)
showImg(x_data[0][0],title='raw')
# 创建卷积和池化层
# 创建第1个卷积层
conv1 = Conv2D(in_channels=1, out_channels=8, kernel_size=3)
max_pool1 = MaxPool2D(kernel_size=2, stride=2)
# 尺寸的逻辑:池化层未改变通道数;当前通道数为6
# 创建第2个卷积层
conv2 = Conv2D(in_channels=8, out_channels=16, kernel_size=5)
max_pool2 = MaxPool2D(kernel_size=2, stride=2)
# 创建第3个卷积层
conv3 = Conv2D(in_channels=16, out_channels=120, kernel_size=4)
# 尺寸的逻辑:输入层将数据拉平[B,C,H,W] -> [B,C*H*W]
# 输入size是[28,28],经过三次卷积和两次池化之后,C*H*W等于120
fc1 = Linear(in_features=120, out_features=64)
# 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分类标签的类别数
fc2 = Linear(in_features=64, out_features=10)
x=paddle.to_tensor(x_data)
#[0]批次
x = conv1(x);showImg(x[0].numpy(),is_arr=True,title='c1')
# 每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
x = F.relu(x);showImg(x[0].numpy(),is_arr=True,title='s1')
x = max_pool1(x);showImg(x[0].numpy(),is_arr=True,title='m1')
x = F.relu(x);showImg(x[0].numpy(),is_arr=True,title='s2')
x = conv2(x);showImg(x[0].numpy(),is_arr=True,title='c2')
x = max_pool2(x);showImg(x[0].numpy(),is_arr=True,title='m2')
x = conv3(x);showImg(x[0].numpy(),is_arr=True,title='c3')
# 尺寸的逻辑:输入层将数据拉平[B,C,H,W] -> [B,C*H*W]
x = paddle.reshape(x, [x.shape[0], -1])
x = fc1(x)
x = F.relu(x)
x = fc2(x)
print(x.shape)
print(x)
[1, 10]
Tensor(shape=[1, 10], dtype=float32, place=CPUPlace, stop_gradient=False,
[[-60.85184097 , 4.83908606 , 31.84540939 , 81.17543030 , 281.08218384, -43.87710953 , -364.31637573, -0.46464920 , 22.34646797 , -181.85520935]])