目录
1.1 导入模组并下载数据集,图片转为torch(简略教程)
4. 打印网络,指定设备 / 定义损失和优化器 / 训练模型 / 保存模型 / 载入模型,加入训练集,预测 / 评估模型,准确率都列出来
1. 下载并了解样本
1.1 导入模组并下载数据集,图片转为torch(简略教程)
import torchvision
import torchvision.transforms as tranforms
import torch.nn.functional as F
#设置路径
data_dir = 'C:/Users/liyzc/Desktop/学习资料/第一卷资源/fashion_mnist'
#torchvision.transforms是pytorch中的图像预处理包
tranform = tranforms.Compose([tranforms.ToTensor()])
#下载训练集
train_dataset = torchvision.datasets.FashionMNIST(data_dir, train=True, transform=tranform,download = True)
print('训练数据条数:\n', len(train_dataset))
#下载测试集
val_dataset = torchvision.datasets.FashionMNIST(root=data_dir,
train=False, transform = tranform)
print('测试数据条数:\n', len(val_dataset))
1.2随便找个图片看看
import pylab
im = train_dataset[0][0]
im = im.reshape(-1,28)
pylab.imshow(im)
pylab.show()
print("该图片的标签为:",train_dataset[0][1])
2. 制作批次数据集
2.1 用dataloader封装
import torch
batch_size = 10
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
2.2 按批次读取,并显示出来
from matplotlib import pyplot as plt
import numpy as np
#define the function to show the picture
def imshow(img):
print("图片形状:",np.shape(img))
npimg = img.numpy()
plt.axis('off')
plt.imshow(np.transpose(npimg, (1, 2, 0)))
#define the name of labels
classes = ('T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle_Boot')
#数据转化为迭代器
sample = iter(train_loader)
#从迭代器里取出一 批次 的样本
images, labels = sample.next()
#打印这一批次样本的形状和标签,并展示
print('样本形状:',np.shape(images))
print('样本标签:',labels)
imshow(torchvision.utils.make_grid(images,nrow=batch_size))
print(','.join('%5s' % classes[labels[j]] for j in range(len(images))))
3. 定义神经网络模型
#class init self super老套路
class myConNet(torch.nn.Module):
def __init__(self):
super(myConNet, self).__init__()
#定义卷积层,注意,输入是1,输出是10个label,态度与土办法
self.conv1 = torch.nn.Conv2d(in_channels=1,
out_channels=6,
kernel_size=3)
self.conv2 = torch.nn.Conv2d(in_channels=6,
out_channels=12,
kernel_size=3)
self.conv3 = torch.nn.Conv2d(in_channels=12,
out_channels=10,
kernel_size=3)
#正向
def forward(self,t):
#池化卷积第一层,引入上面定义的函数
t = self.conv1(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
#池化卷积第二层
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
#池化卷积第三层
t = self.conv3(t)
t = F.avg_pool2d(t, kernel_size=t.shape[-2:],stride=t.shape[-2:])
return t.reshape(t.shape[:2])
4. 打印网络,指定设备 / 定义损失和优化器 /训练 / 保存模型 / 载入模型,加入训练集,预测 / 评估模型,准确率都列出来
############################4.1,打印网络并指定设备
if __name__ == '__main__':
#
network = myConNet()
print(network)#打印网络
#
#print(network.parameters())
##指定设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
network.to(device)
#print(network.parameters())
#############################4.2 定义损失计算方法于优化器
criterion = torch.nn.CrossEntropyLoss() #实例化损失函数类
optimizer = torch.optim.Adam(network.parameters(), lr=.01)
###########################4.3 训练模型
for epoch in range(10): #数据集迭代10次
running_loss = 0.0
for i, data in enumerate(train_loader, 0): #循环取出批次数据
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device) #
optimizer.zero_grad()#清空之前的梯度
outputs = network(inputs)
loss = criterion(outputs, labels)#计算损失
loss.backward() #反向传播
optimizer.step() #更新参数
running_loss += loss.item()
if i % 1000 == 999:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
###############################4.4 保存模型
torch.save(network.state_dict(), './CNNFashionMNIST.pth')
###############################4.5 用sklear印一下accuracy——score
from sklearn.metrics import accuracy_score
outputs = network(inputs)
_, predicted = torch.max(outputs, 1)
print("训练时的准确:",accuracy_score(predicted.cpu().numpy(),labels.cpu().numpy()))
##############################4.6 加载模型,装入训练数据,并预测
network.load_state_dict(torch.load( './CNNFashionMNIST.pth'))#加载模型
#使用模型
dataiter = iter(test_loader)
images, labels = dataiter.next()
inputs, labels = images.to(device), labels.to(device)
imshow(torchvision.utils.make_grid(images,nrow=batch_size))
print('真实标签: ', ' '.join('%5s' % classes[labels[j]] for j in range(len(images))))
outputs = network(inputs) #调用模型预测
_, predicted = torch.max(outputs, 1) #加上分类结果,
#注意,这里是按照第一维度,寻找最大值,拿出它的索引,作为最终分类结果。
print('预测结果: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(len(images))))
################################4.7 评估模型
#测试模型
class_correct = list(0. for i in range(10)) #做一个列表,用于收集每个类正确的个数
class_total = list(0. for i in range(10)) #做一个列表,用于收集每个类总体的个数
with torch.no_grad():
for data in test_loader: #遍历数据集
images, labels = data
inputs, labels = images.to(device), labels.to(device)
outputs = network(inputs) #把每个批次数据放入模型
_, predicted = torch.max(outputs, 1) #计算预测结果
predicted = predicted.to(device)
c = (predicted == labels).squeeze() #注意:把label个预测一致的个数计算下来!!!!!
for i in range(10): #遍历这十个类别
label = labels[i]
class_correct[label] += c[i].item() #注意,预测正确就+1
class_total[label] += 1 #总之总个数要加1
sumacc = 0
for i in range(10): #每个类展示出来
Accuracy = 100 * class_correct[i] / class_total[i]
print('Accuracy of %5s : %2d %%' % (classes[i], Accuracy ))
sumacc =sumacc+Accuracy
print('Accuracy of all : %2d %%' % ( sumacc/10. )) #输出最终准确率
还没做的:回头用sklearn把第四部分封装打出来试试。