import torch
import torch.nn as nn
import torch.nn.functional as F
import torch .optim as optim
from torch import device
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms,models
import torchvision
import os
from net import simpleconv3
from tensorboardX import SummaryWriter
writer = SummaryWriter('logs')
from torch.optim.lr_scheduler import StepLR
def train(model,criterion , optimizer, lr_scheduler, epochs):
for epoch in range(epochs):
print("epoch time:{epoch}/{epochs}".format(epoch=epoch, epochs=epochs))
for phase in ['train','val']:
if phase == 'train':
model.train()
lr_scheduler.step()
else:
model.eval()
running_loss = 0.0 ##损失变量
running_accs = 0.0 ##精度变量
number_batch = 0 ##
for data in dataloader[phase]:
images, labels = data
optimizer.zero_grad()
use_gpu = torch.cuda.is_available()
if use_gpu:
images = images.cuda()
labels = labels.cuda()
outputs = model(images)
loss = criterion(outputs, labels)
_ ,preds = torch.max(outputs,1)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.data.item()
running_accs += torch.sum(preds == labels).item()
number_batch += 1
## 得到每一个epoch的平均损失与精度
epoch_loss = running_loss / number_batch
epoch_acc = running_accs / dataset_sizes[phase]
## 收集精度和损失用于可视化
if phase == 'train':
writer.add_scalar('data/trainloss', epoch_loss, epoch)
writer.add_scalar('data/trainacc', epoch_acc, epoch)
else:
writer.add_scalar('data/valloss', epoch_loss, epoch)
writer.add_scalar('data/valacc', epoch_acc, epoch)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
writer.close()
return model
if __name__ == '__main__':
image_size = 60
crop_size = 48#input
num_classes = 2
model = simpleconv3(num_classes) ##创建模型
data_dir = './data'
if not os.path.exists('models'):
os.mkdir('models')
use_gpu = torch.cuda.is_available()
if use_gpu:
models = models.cuda()
## 创建数据预处理函数,训练预处理包括随机裁剪缩放、随机翻转、归一化,验证预处理包括中心裁剪,归一化
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(48), # 在随机大小和比例下裁剪图像,然后缩放到48x48
transforms.RandomHorizontalFlip(), # 以一定的概率水平翻转图像
transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换为torch.Tensor,并缩放到[0.0, 1.0]
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) # 标准化图像
]),
'val': transforms.Compose([
transforms.Resize(64), # 缩放图像到64x64
transforms.CenterCrop(48), # 从中心裁剪出48x48的区域
transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换为torch.Tensor,并缩放到[0.0, 1.0]
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) # 标准化图像
]),
}
image_dataset = {x:datasets.ImageFolder(os.path.join(data_dir,x),data_transforms[x]) for x in ['train','val']}
dataloader = {x:torch.utils.data.DataLoader(
dataset=datasets[x],
batch_size=13,
shuffle=True,
num_workers=4,
)for x in ['train', 'test']
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x]) for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
exp_lr_scheduler = StepLR(optimizer_ft, step_size=100, gamma=0.1)
model = train(model=model,
criterion=criterion,
optimizer=optimizer_ft,
scheduler=exp_lr_scheduler,
num_epochs=300)
torch.save(model.state_dict(),'models/model.pt')
手写一个训练流程-后续检查
最新推荐文章于 2024-08-10 23:14:03 发布