#MNIST手写数据 用cpu把.cuda()去掉即可
import torch
import time
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision #数据库模块
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
torch.manual_seed(1)
EPOCH=1
BATCH_SIZE=50
LR=0.001
DOWNLOAD_MNIST=False #没下载就True
train_data=torchvision.datasets.MNIST(
root='mnist',
train=True,
transform=torchvision.transforms.ToTensor(),
download=DOWNLOAD_MNIST, #没下载就下载,下载好了就不下载
)
test_data=torchvision.datasets.MNIST(root='./mnist',train=False) #用DataLoader存取测试数据
train_loader=Data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True) #训练数据
with torch.no_grad():
test_x=Variable(torch.unsqueeze(test_data.data,dim=1)).type(torch.FloatTensor)[:50000].cuda()/255. #[:2000]/255将数据改为0——1
test_y=test_data.targets[:50000].cuda() #取2000张测试
#CNN模型
class CNN(nn.Module):
def __init__(self):
super(CNN,self).__init__()
self.conv1=nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=16,
kernel_size=5,
stride=1,
padding=2,
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 =nn.Sequential(
nn.Conv2d(16,32,5,1,2),
nn.ReLU(),
nn.MaxPool2d(2),
)
self.out=nn.Linear(32*7*7,10)
def forward(self,x):
x=self.conv1(x)
x=self.conv2(x)
x=x.view(x.size(0),-1)
output=self.out(x)
return output
cnn=CNN()
cnn.cuda()
#print(cnn)
#训练
optimizer=torch.optim.Adam(cnn.parameters(),lr=LR)
loss_func=nn.CrossEntropyLoss()
t_start = time.time()
for epoch in range(EPOCH):
for step,(x,y) in enumerate(train_loader):
b_x=Variable(x).cuda()
b_y=Variable(y).cuda()
output = cnn(b_x)
loss = loss_func(output, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
test_output=cnn(test_x[:10])
pred_y=torch.max(test_output,dim=1)[1].cuda().data.squeeze()
test_y = test_y.cpu() #打印之前把数据还到cpu
print(pred_y,'prediction number')
print(test_y[:10].numpy(),'real number')
t_end = time.time()
print('time:',t_end-t_start)