MNIST
each number owns 7000 images
train/test splitting: 60k 10k 训练集和测试集
数字图片识别
对于一张灰度图片信息来说,可以用长宽都为28的矩阵表示
[28,28]=>[784](化为一个一维矩阵)=>[1,784]
-
X = [1,2,…,784]
x:[1,dx]
-
H1 = XW1 + b1
w1:[d1,dx]
b1:[d1]
X是[1,784] [1,784] * [d1,784]的转置 + [d1] => [1,d1] + [d1] =>[1,d1]
-
H2 = H1W2 + b2 [1,d1] * [d2,d1]的转置 + [d2] => [1,d2]
W2: [d2, d1]
b2: [d2]
-
H3 = H2W3 + b3 [1,d2] * [d3=10, d2]的转置 + [d3] =>[1,d3=10]
W3: [10,d2]
b3:[10]
怎么计算loss
-
H3: [1,d3] 第三层迭代出来
-
Y:[10] one-hot表示
- 例如 把数字1表示为[0,1,0,0,0,0,0,0,0,0,0]
- 把数字3表示为[0,0,0,1,0,0,0,0,0,0]
-
Euclidean Distance : H3 和 Y比较
-
pred = W3 * {W2[W1X + b1] + b2 } + b3
objective = (pred - Y)^2
-
找到最小的objective
-
pred = W3 * {W2[W1X + b1] + b2 } + b3
[0.1,0.8,0.01,…] 显然这个测试结果为1
-
argmax(pred)
max = 0.8
argmax = 1 等于0.8对应的数字1
取概率最大的数字作为预测值
数字识别实战
步骤
- load data (下载数据集)
- Build Model (建立三层模型)
- Train
- Test
辅助的工具函数 utils.py
import torch
from matplotlib import pyplot as plt
def plot_curve(data): #绘制曲线
fig = plt.figure()
plt.plot(range(len(data)), data, color='blue')
plt.legend(['value'], loc='upper right')
plt.xlabel('step')
plt.ylabel('value')
plt.show()
def plot_image(img, label, name): # 图片识别
fig = plt.figure()
for i in range(6):
plt.subplot(2, 3, i+1)
plt.tight_layout()
plt.imshow(img[i][0]*0.3081+0.1307, cmap='gray', interpolation='none')
plt.title("{}: {}".format(name, label[i].item()))
plt.xticks([])
plt.yticks([])
plt.show()
def one_hot(label, depth=10):
out = torch.zeros(label.size(0),depth)
idx = torch.LongTensor(label).view(-1, 1)
out.scatter_(dim=1, index=idx, value=1)
return out
核心代码 mnist_main.py
import torch
from torch import nn #循环神经网络的基本工作
from torch.nn import functional as F #常用函数
from torch import optim #优化工具包
import torchvision #视觉
from matplotlib import pyplot as plt #画图
from utils import plot_image, plot_curve, one_hot #模块函数
batch_size = 512
#step1.load dataset 加载数据集
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('mnist_data', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,)
)
])),
batch_size = batch_size, shuffle = True
)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('mnist_data/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,)
)
])),
batch_size = batch_size, shuffle = False
)
x,y = next(iter(train_loader))
print(x.shape, y.shape,x.min(),x.max())
plot_image(x, y, 'image sample')
# step2 创建网络 三层非线性嵌套
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
#xw+b 每一层
self.fc1 = nn.Linear(28*28, 256) #线性层
self.fc2 = nn.Linear(256, 64) #是第一层的输出
self.fc3 = nn.Linear(64, 10) #从784 到 10
def forward(self, x):
# x: [b, 1, 28, 28] 接收图片
# h1 = relu(xw1+b1)
x = F.relu(self.fc1(x)) #加一个激活函数
# h2 = relu(h1w2+b2)
x = F.relu(self.fc2(x))
# 第三层使用均方差 h3 = h2w3+b3
x = self.fc3(x)
return x
# step3 train
net = Net() # 创建一个网络对象
# [w1,b1, w2, b2, w3, b3]
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) #学习的步长 动量
train_loss = []
for epoch in range(3):
for batch_idx, (x, y) in enumerate(train_loader): #迭代三次
# x: [b, 1, 28, 28], y:[512]
# [b, 1, 28, 28] => [b, 784]
x = x.view(x.size(0), 28*28)
# =>[b, 10]
out = net(x)
# [b, 10]
y_onehot = one_hot(y)
# loss = mse(out, y_onehot)
loss = F.mse_loss(out,y_onehot) #得到均方差
optimizer.zero_grad() #清零梯度
loss.backward()
# w = w-lr*grad
optimizer.step() #再计算梯度
train_loss.append(loss.item())
if batch_idx % 10==0:
print(epoch, batch_idx, loss.item())
# 获得一个比较好的[w1,b1, w2, b2, w3, b3]
plot_curve(train_loss)
total_correct = 0
for x,y in test_loader:
x = x.view(x.size(0), 28*28)
out = net(x)
# out: [b, 10] => pred: [b]
pred = out.argmax(dim=1) #预测值
correct = pred.eq(y).sum().float().item()
total_correct += correct
total_num = len(test_loader.dataset) #总的数据量
acc = total_correct/total_num
print('test acc:', acc) #准确度
x,y = next(iter(test_loader))
out = net(x.view(x.size(0), 28*28))
pred = out.argmax(dim=1)
plot_image(x, pred, 'test')