# 1.处理数据

1. 使用torchvision加载并且归一化CIFAR10的训练和测试数据集
2. 定义一个卷积神经网络
3. 定义一个损失函数
4. 在训练样本数据上训练网络
5. 在测试样本数据上测试网络

# 2.加载并归一化 CIFAR10

# -*- coding:utf-8 -*-
import torch
import torchvision
import torchvision.transforms as transforms

# torchvision 数据集的输出是范围在[0,1]之间的 PILImage，我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])

classes=('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

# -*- coding:utf-8 -*-
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np

# torchvision 数据集的输出是范围在[0,1]之间的 PILImage，我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])

classes=('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

def imshow(img):
img=img/2+0.5
npimg=img.numpy()
plt.imshow(np.transpose(npimg,(1,2,0)))
plt.show()

images,labels=dataiter.next()

#show image
imshow(torchvision.utils.make_grid(images))
#print labels
print(' '.join('%5s'%classes[labels[j]] for j in range(4)))

frog horse bird bird

# 3.定义一个卷积神经网络

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# torchvision 数据集的输出是范围在[0,1]之间的 PILImage，我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])

classes=('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1=nn.Conv2d(3,6,5)
self.pool=nn.MaxPool2d(2,2)
self.conv2=nn.Conv2d(6,16,5)
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,84)
self.fc3=nn.Linear(84,10)

def forward(self,x):
x=self.pool(F.relu(self.conv1(x)))
x=self.pool(F.relu(self.conv2(x)))
x=x.view(-1,16*5*5)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x

net=Net()

# 4.定义一个损失函数

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# torchvision 数据集的输出是范围在[0,1]之间的 PILImage，我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])

classes=('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1=nn.Conv2d(3,6,5)
self.pool=nn.MaxPool2d(2,2)
self.conv2=nn.Conv2d(6,16,5)
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,84)
self.fc3=nn.Linear(84,10)

def forward(self,x):
x=self.pool(F.relu(self.conv1(x)))
x=self.pool(F.relu(self.conv2(x)))
x=x.view(-1,16*5*5)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x
net=Net()

# 定义一个损失函数和优化器 让我们使用分类交叉熵Cross-Entropy 作损失函数，动量SGD做优化器。
criterion = nn.CrossEntropyLoss()
optimizer=optim.SGD(net.parameters(),lr=0.001,momentum=0.9)

# 5.在训练样本数据上训练网络

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# torchvision 数据集的输出是范围在[0,1]之间的 PILImage，我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])

classes=('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1=nn.Conv2d(3,6,5)
self.pool=nn.MaxPool2d(2,2)
self.conv2=nn.Conv2d(6,16,5)
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,84)
self.fc3=nn.Linear(84,10)

def forward(self,x):
x=self.pool(F.relu(self.conv1(x)))
x=self.pool(F.relu(self.conv2(x)))
x=x.view(-1,16*5*5)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x
net=Net()

# 定义一个损失函数和优化器 让我们使用分类交叉熵Cross-Entropy 作损失函数，动量SGD做优化器。
criterion = nn.CrossEntropyLoss()
optimizer=optim.SGD(net.parameters(),lr=0.001,momentum=0.9)

# 训练网络:需要在数据迭代器上循环传给网络和优化器 输入就可以。

for epoch in range(2):
running_loss=0.0
inputs,labels=data # get the inputs

outputs=net(inputs)

# forward + backward + optimize
loss=criterion(outputs,labels)
loss.backward()
optimizer.step()

#print statistics
running_loss += loss.item()
if i%2000==1999:  # print every 2000 mini-batches
print('[%d,%5d] loss: %.3f'%(epoch+1,i+1,running_loss/2000))
running_loss=0.0

print("Finished Training!")

[1, 2000] loss: 2.192
[1, 4000] loss: 1.840
[1, 6000] loss: 1.679
[1, 8000] loss: 1.566
[1,10000] loss: 1.503
[1,12000] loss: 1.451
[2, 2000] loss: 1.386
[2, 4000] loss: 1.358
[2, 6000] loss: 1.324
[2, 8000] loss: 1.305
[2,10000] loss: 1.274
[2,12000] loss: 1.240
Finished Training!

# 6.在测试样本数据上测试网络

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# torchvision 数据集的输出是范围在[0,1]之间的 PILImage，我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])

classes=('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1=nn.Conv2d(3,6,5)
self.pool=nn.MaxPool2d(2,2)
self.conv2=nn.Conv2d(6,16,5)
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,84)
self.fc3=nn.Linear(84,10)

def forward(self,x):
x=self.pool(F.relu(self.conv1(x)))
x=self.pool(F.relu(self.conv2(x)))
x=x.view(-1,16*5*5)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x
net=Net()

# 定义一个损失函数和优化器 让我们使用分类交叉熵Cross-Entropy 作损失函数，动量SGD做优化器。
criterion = nn.CrossEntropyLoss()
optimizer=optim.SGD(net.parameters(),lr=0.001,momentum=0.9)

# 训练网络:需要在数据迭代器上循环传给网络和优化器 输入就可以。

for epoch in range(2):
running_loss=0.0
inputs,labels=data # get the inputs

outputs=net(inputs)

# forward + backward + optimize
loss=criterion(outputs,labels)
loss.backward()
optimizer.step()

#print statistics
running_loss += loss.item()
if i%2000==1999:  # print every 2000 mini-batches
print('[%d,%5d] loss: %.3f'%(epoch+1,i+1,running_loss/2000))
running_loss=0.0

print("Finished Training!")

outputs = net(images)

# 输出是预测与十个类的近似程度，与某一个类的近似程度越高，网络就越认为图像是属于这一类别。所以让我们打印其中最相似类别类标：
_, predicted = torch.max(outputs, 1)

print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))

[1, 2000] loss: 2.237
[1, 4000] loss: 1.861
[1, 6000] loss: 1.687
[1, 8000] loss: 1.581
[1,10000] loss: 1.533
[1,12000] loss: 1.489
[2, 2000] loss: 1.444
[2, 4000] loss: 1.396
[2, 6000] loss: 1.375
[2, 8000] loss: 1.358
[2,10000] loss: 1.336
[2,12000] loss: 1.324
Finished Training!
Predicted: frog dog frog deer

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# torchvision 数据集的输出是范围在[0,1]之间的 PILImage，我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])

classes=('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1=nn.Conv2d(3,6,5)
self.pool=nn.MaxPool2d(2,2)
self.conv2=nn.Conv2d(6,16,5)
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,84)
self.fc3=nn.Linear(84,10)

def forward(self,x):
x=self.pool(F.relu(self.conv1(x)))
x=self.pool(F.relu(self.conv2(x)))
x=x.view(-1,16*5*5)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x
net=Net()

# 定义一个损失函数和优化器 让我们使用分类交叉熵Cross-Entropy 作损失函数，动量SGD做优化器。
criterion = nn.CrossEntropyLoss()
optimizer=optim.SGD(net.parameters(),lr=0.001,momentum=0.9)

# 训练网络:需要在数据迭代器上循环传给网络和优化器 输入就可以。

for epoch in range(2):
running_loss=0.0
inputs,labels=data # get the inputs

outputs=net(inputs)

# forward + backward + optimize
loss=criterion(outputs,labels)
loss.backward()
optimizer.step()

#print statistics
running_loss += loss.item()
if i%2000==1999:  # print every 2000 mini-batches
print('[%d,%5d] loss: %.3f'%(epoch+1,i+1,running_loss/2000))
running_loss=0.0

print("Finished Training!")

#网络在整个数据集上的表现
correct = 0
total = 0
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()

print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))

[1, 2000] loss: 2.237
[1, 4000] loss: 1.861
[1, 6000] loss: 1.687
[1, 8000] loss: 1.581
[1,10000] loss: 1.533
[1,12000] loss: 1.489
[2, 2000] loss: 1.444
[2, 4000] loss: 1.396
[2, 6000] loss: 1.375
[2, 8000] loss: 1.358
[2,10000] loss: 1.336
[2,12000] loss: 1.324
Finished Training!
Accuracy of the network on the 10000 test images: 53 %

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# torchvision 数据集的输出是范围在[0,1]之间的 PILImage，我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])

classes=('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1=nn.Conv2d(3,6,5)
self.pool=nn.MaxPool2d(2,2)
self.conv2=nn.Conv2d(6,16,5)
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,84)
self.fc3=nn.Linear(84,10)

def forward(self,x):
x=self.pool(F.relu(self.conv1(x)))
x=self.pool(F.relu(self.conv2(x)))
x=x.view(-1,16*5*5)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x
net=Net()

# 定义一个损失函数和优化器 让我们使用分类交叉熵Cross-Entropy 作损失函数，动量SGD做优化器。
criterion = nn.CrossEntropyLoss()
optimizer=optim.SGD(net.parameters(),lr=0.001,momentum=0.9)

# 训练网络:需要在数据迭代器上循环传给网络和优化器 输入就可以。

for epoch in range(2):
running_loss=0.0
inputs,labels=data # get the inputs

outputs=net(inputs)

# forward + backward + optimize
loss=criterion(outputs,labels)
loss.backward()
optimizer.step()

#print statistics
running_loss += loss.item()
if i%2000==1999:  # print every 2000 mini-batches
print('[%d,%5d] loss: %.3f'%(epoch+1,i+1,running_loss/2000))
running_loss=0.0

print("Finished Training!")

#随机预测出为10类中的哪一类，看来网络性能
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1

for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))

[1, 2000] loss: 2.237
[1, 4000] loss: 1.861
[1, 6000] loss: 1.687
[1, 8000] loss: 1.581
[1,10000] loss: 1.533
[1,12000] loss: 1.489
[2, 2000] loss: 1.444
[2, 4000] loss: 1.396
[2, 6000] loss: 1.375
[2, 8000] loss: 1.358
[2,10000] loss: 1.336
[2,12000] loss: 1.324
Finished Training!
Accuracy of plane : 57 %
Accuracy of car : 51 %
Accuracy of bird : 23 %
Accuracy of cat : 17 %
Accuracy of deer : 46 %
Accuracy of dog : 58 %
Accuracy of frog : 80 %
Accuracy of horse : 58 %
Accuracy of ship : 76 %
Accuracy of truck : 67 %

03-02 272

06-06 2万+

06-11 307

12-19 2万+

01-30 3万+

07-29 8711

05-23 29

06-10 1820

04-16 6494

01-20 8万+

08-16 52

04-01 9445

08-22 69

09-12 50

02-27 8万+

03-05 1万+

03-01 14万+

03-03 7398

03-19 82万+

02-28 4万+

04-25 7万+

07-18 289

03-04 14万+

03-10 13万+

03-08 2万+

03-12 12万+

03-16 1万+

03-19 9万+

03-08 7万+

03-10 7217

#### 新一代神器STM32CubeMonitor介绍、下载、安装和使用教程

©️2019 CSDN 皮肤主题: 技术黑板 设计师: CSDN官方博客