实战手写数字识别
- torch和torchvision
- PyTorch之torch.transforms
- torchvision.transforms.Resize
- Scale
- CenterCrop按随机概率进行水平翻转
- RandomVerticalFlip随机概率进行垂直翻转
- ToTensor类型转换
- ToPILImage转换成PIL图片
- 数据预览和数据装载
- 模型搭建和参数优化
import torch
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch.autograd import Variable
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.5],std=[0.5])])
#数据集下载
data_train = datasets.MNIST(root = "./data/",
transform=transform,
train = True,
download = True)
data_test = datasets.MNIST(root="./data/",
transform = transform,
train = False)
#数据装载
data_loader_train = torch.utils.data.DataLoader(dataset=data_train,
batch_size = 64,
shuffle = True)
data_loader_test = torch.utils.data.DataLoader(dataset=data_test,
batch_size = 64,
shuffle = True)
'''
#预览数据
images,labels = next(iter(data_loader_train))#获取了一个批次的图片数据和对应图片标签
img = torchvision.utils.make_grid(images)#构造成网格模式
img = img.numpy().transpose(1,2,0)
std = [0.5,0.5,0.5]
mean = [0.5,0.5,0.5]
img = img*std+mean
print([labels[i] for i in range(64)])
plt.imshow(img)
'''
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1=torch.nn.Sequential(
torch.nn.Conv2d(1,64,kernel_size=3,stride=1,padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(64,128,kernel_size=3,stride=1,padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(stride=2,kernel_size=2))
self.dense=torch.nn.Sequential(
torch.nn.Linear(14*14*128,1024),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),#防止过拟合,以一定概率将部分参数归零
torch.nn.Linear(1024,10))
def forward(self,x):
x = self.conv1(x)
x = x.view(-1,14*14*128)
x = self.dense(x)
return x
model = Model()
cost = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
#模型训练和参数优化
n_epochs=5
for epoch in range(n_epochs):
running_loss = 0.0
running_correct = 0
print("Epoch{}/{}".format(epoch,n_epochs))
print("-"*10)
for data in data_loader_train:
X_train,y_train = data
X_train,y_train = Variable(X_train),Variable(y_train)
outputs=model(X_train)
_,pred = torch.max(outputs.data,1)
optimizer.zero_grad()
loss = cost(outputs,y_train)
loss.backward()
optimizer.step()
running_loss +=loss.data
running_correct +=torch.sum(pred ==y_train.data)
testing_correct = 0
for data in data_loader_test:
X_test,y_test = data
X_test,y_test = Variable(X_test),Variable(y_test)
outputs = model(X_test)
_,pred = torch.max(outputs.data,1)
testing_correct +=torch.sum(pred == y_test.data)
print("Loss is :{:.4f},Train Accuracy is:{:.4f}%,Test Accuracy is :{:.4f}".format(running_loss/len(data_train),100*running_correct/len(data_train),100*testing_correct/len(data_test)))
我在CPU上训练的,有点慢,差不多一个小时,建议用GPU来训。
Epoch0/5
----------
Loss is :0.0020,Train Accuracy is:96.1850%,Test Accuracy is :98.0700
Epoch1/5
----------
Loss is :0.0007,Train Accuracy is:98.6517%,Test Accuracy is :98.6500
Epoch2/5
----------
Loss is :0.0004,Train Accuracy is:99.0967%,Test Accuracy is :98.0600
Epoch3/5
----------
Loss is :0.0003,Train Accuracy is:99.2867%,Test Accuracy is :98.8200
Epoch4/5
----------
Loss is :0.0003,Train Accuracy is:99.4633%,Test Accuracy is :98.6900