【PyTorch】PyTorch搭建神经网络处理图片分类的完整代码

1. 导入库
import torch
import torchvision
from torchvision import datasets, transforms
import os            # os包集成了一些对文件路径和目录进行操作的类
import matplotlib.pyplot as plt
import time


# 读取数据
# 这一步类似预处理，将图片裁剪成64*64大小
data_dir = 'C:/Users/17865/Desktop/Sort/Data'
data_transform = {x:transforms.Compose([transforms.Scale([64,64]),
transforms.ToTensor()]) for x in ['train', 'valid']}
# 这一步相当于读取数据
image_datasets = {x:datasets.ImageFolder(root = os.path.join(data_dir,x),
transform = data_transform[x]) for x in ['train', 'valid']}
# 读取完数据后，对数据进行装载
batch_size = 4,
shuffle = True) for x in ['train', 'valid']}


class Models(torch.nn.Module):
def __init__(self):
super(Models, self).__init__()
self.Conv = torch.nn.Sequential(
torch.nn.ReLU(),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),

torch.nn.ReLU(),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2))

self.Classes = torch.nn.Sequential(
torch.nn.Linear(16 * 16 * 256, 512),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(512, 3))

def forward(self, inputs):
x = self.Conv(inputs)
x = x.view(-1, 16 * 16 * 256)
x = self.Classes(x)
return x

model = Models()
print(model)

1. 定义损失和优化：交叉熵损失（适合多分类）和自适应优化（首选的优化函数）函数
loss_f = torch.nn.CrossEntropyLoss()
# 使用GPU训练
Use_gpu = torch.cuda.is_available()
# 将模型放置在GPU上
if Use_gpu:
model = model.cuda()


1. 训练及测试网络
# 设置5个epoch
epoch_n = 5
time_open = time.time()
# 每执行一个epoch输出一次
for epoch in range(epoch_n):
print('epoch {}/{}'.format(epoch, epoch_n - 1))
print('-' * 10)
# 判断是训练还是测试
for phase in ['train', 'valid']:
if phase == 'train':
# 设置为True，会进行Dropout并使用batch mean和batch var
print('training...')
model.train(True)
else:
# 设置为False，不会进行Dropout并使用running mean和running var
print('validing...')
model.train(False)
# 初始化loss和corrects
running_loss = 0.0
running_corrects = 0.0
# 输出标号 和对应图片，下标从1开始
for batch, data in enumerate(dataloader[phase], 1):
X, Y = data
# 将数据放在GPU上训练
X, Y = Variable(X).cuda(), Variable(Y).cuda()
# 模型预测概率
y_pred = model(X)
# pred，概率较大值对应的索引值，可看做预测结果，1表示行
_, pred = torch.max(y_pred.data, 1)
# 梯度归零
# 计算损失
loss = loss_f(y_pred, Y)
# 训练 需要反向传播及梯度更新
if phase == 'train':
# 反向传播出现问题
loss.backward()
optimizer.step()
# 损失和
running_loss += loss.data.item()
# 预测正确的图片个数
running_corrects += torch.sum(pred == Y.data)
# 训练时，每500个batch输出一次，训练loss和acc
if batch % 500 == 0 and phase == 'train':
print('batch{},trainLoss:{:.4f},trainAcc:{:.4f}'.format(batch, running_loss / batch,
100 * running_corrects / (4 * batch)))
# 输出每个epoch的loss和acc
epoch_loss = running_loss * 4 / len(image_datasets[phase])
epoch_acc = 100 * running_corrects / len(image_datasets[phase])
print('{} Loss:{:.4f} Acc:{:.4f}%'.format(phase, epoch_loss, epoch_acc))
time_end = time.time() - time_open
print(time_end)


1. 保存模型及参数
# 保存和加载整个模型
torch.save(model, 'model.pth')
print(model_1)

# 仅保存和加载模型参数
torch.save(model.state_dict(), 'params.pth')
print(dic)


import torch
import torchvision
from torchvision import datasets, transforms
import os            # os包集成了一些对文件路径和目录进行操作的类
import matplotlib.pyplot as plt
import time

# 读取数据
data_dir = 'C:/Users/17865/Desktop/超声医疗/sjb的数据集/Data'
data_transform = {x:transforms.Compose([transforms.Scale([64,64]),
transforms.ToTensor()]) for x in ['train', 'valid']}   # 这一步类似预处理
image_datasets = {x:datasets.ImageFolder(root = os.path.join(data_dir,x),
transform = data_transform[x]) for x in ['train', 'valid']}  # 这一步相当于读取数据
batch_size = 4,
shuffle = True) for x in ['train', 'valid']}  # 读取完数据后，对数据进行装载

# 模型搭建
class Models(torch.nn.Module):
def __init__(self):
super(Models, self).__init__()
self.Conv = torch.nn.Sequential(
torch.nn.ReLU(),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),

torch.nn.ReLU(),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2))

self.Classes = torch.nn.Sequential(
torch.nn.Linear(16 * 16 * 256, 512),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(512, 3))

def forward(self, inputs):
x = self.Conv(inputs)
x = x.view(-1, 16 * 16 * 256)
x = self.Classes(x)
return x

model = Models()
print(model)
'''
# 保存和加载整个模型
torch.save(model, 'model.pth')
print(model_1)

# 仅保存和加载模型参数
torch.save(model.state_dict(), 'params.pth')
print(dic)
'''
loss_f = torch.nn.CrossEntropyLoss()

Use_gpu = torch.cuda.is_available()
if Use_gpu:
model = model.cuda()

epoch_n = 5
time_open = time.time()

for epoch in range(epoch_n):
print('epoch {}/{}'.format(epoch, epoch_n - 1))
print('-' * 10)

for phase in ['train', 'valid']:
if phase == 'train':
# # 设置为True，会进行Dropout并使用batch mean和batch var
print('training...')
model.train(True)
else:
# # 设置为False，不会进行Dropout并使用running mean和running var
print('validing...')
model.train(False)

running_loss = 0.0
running_corrects = 0.0
# 输出标号 和对应图片，下标从1开始
for batch, data in enumerate(dataloader[phase], 1):
X, Y = data
# 将数据放在GPU上训练
X, Y = Variable(X).cuda(), Variable(Y).cuda()
# 模型预测概率
y_pred = model(X)
# pred，概率较大值对应的索引值，可看做预测结果，1表示行
_, pred = torch.max(y_pred.data, 1)
# 梯度归零
# 计算损失
loss = loss_f(y_pred, Y)
# 训练 需要反向传播及梯度更新
if phase == 'train':
# 反向传播出现问题
loss.backward()
optimizer.step()
# 损失和
running_loss += loss.data.item()
# 预测正确的图片个数
running_corrects += torch.sum(pred == Y.data)
# 训练时，每500个batch输出一次，训练loss和acc
if batch % 500 == 0 and phase == 'train':
print('batch{},trainLoss:{:.4f},trainAcc:{:.4f}'.format(batch, running_loss / batch,
100 * running_corrects / (4 * batch)))
# 输出每个epoch的loss和acc
epoch_loss = running_loss * 4 / len(image_datasets[phase])
epoch_acc = 100 * running_corrects / len(image_datasets[phase])
print('{} Loss:{:.4f} Acc:{:.4f}%'.format(phase, epoch_loss, epoch_acc))
time_end = time.time() - time_open
print(time_end)


02-05 2万+
05-10 1万+

07-16 5591
03-23 176
06-13 3814