模
型
的
训
练
模型的训练
模 型 的 训 练
import numpy as np
import torch
import torch. nn as nn
from torch. utils. data import DataLoader
import torchvision. transforms as transforms
import torch. optim as optim
from matplotlib import pyplot as plt
from major_utils import set_seed
import major_config
from major_dataset import LoadDataset
set_seed( )
MAX_EPOCH = major_config. num_epoch
BATCH_SIZE = major_config. batchsize
LR = major_config. learning_rate
log_interval = 10
val_interval = 1
train_transform = transforms. Compose( [
transforms. Resize( ( 32 , 32 ) ) ,
transforms. ToTensor( ) ,
transforms. Normalize( major_config. norm_mean, major_config. norm_std) ,
] )
valid_transform = transforms. Compose( [
transforms. Resize( ( 32 , 32 ) ) ,
transforms. ToTensor( ) ,
transforms. Normalize( major_config. norm_mean, major_config. norm_std) ,
] )
train_data = LoadDataset( data_dir= major_config. train_image, transform= train_transform)
train_loader = DataLoader( dataset= train_data, batch_size= BATCH_SIZE, shuffle= True )
valid_data = LoadDataset( data_dir= major_config. val_image, transform= valid_transform)
valid_loader = DataLoader( dataset= valid_data, batch_size= BATCH_SIZE)
net = major_config. model
criterion = nn. CrossEntropyLoss( )
optimizer = optim. SGD( net. parameters( ) , lr= LR, momentum= 0.9 )
scheduler = torch. optim. lr_scheduler. StepLR( optimizer, step_size= 10 , gamma= 0.1 )
train_curve = list ( )
valid_curve = list ( )
for epoch in range ( MAX_EPOCH) :
loss_mean = 0 .
correct = 0 .
total = 0 .
net. train( )
for i, data in enumerate ( train_loader) :
inputs, labels = data
outputs = net( inputs)
optimizer. zero_grad( )
loss = criterion( outputs, labels)
loss. backward( )
optimizer. step( )
_, predicted = torch. max ( outputs. data, 1 )
total += labels. size( 0 )
correct += ( predicted == labels) . squeeze( ) . sum ( ) . numpy( )
loss_mean += loss. item( )
train_curve. append( loss. item( ) )
if ( i+ 1 ) % log_interval == 0 :
loss_mean = loss_mean / log_interval
print ( "Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}" . format (
epoch, MAX_EPOCH, i+ 1 , len ( train_loader) , loss_mean, correct / total) )
correct= correct
total= total
f = open ( "log_training.txt" , 'a' )
f. write( "Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}" . format (
epoch, MAX_EPOCH, i+ 1 , len ( train_loader) , loss_mean, correct / total) )
f. write( "\n" )
loss_mean = 0 .
scheduler. step( )
if ( epoch+ 1 ) % val_interval == 0 :
correct_val = 0 .
total_val = 0 .
loss_val = 0 .
net. eval ( )
with torch. no_grad( ) :
for j, data in enumerate ( valid_loader) :
inputs, labels = data
outputs = net( inputs)
loss = criterion( outputs, labels)
_, predicted = torch. max ( outputs. data, 1 )
total_val += labels. size( 0 )
correct_val += ( predicted == labels) . squeeze( ) . sum ( ) . numpy( )
loss_val += loss. item( )
valid_curve. append( loss_val/ valid_loader. __len__( ) )
print ( "Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}" . format (
epoch, MAX_EPOCH, j+ 1 , len ( valid_loader) , loss_val, correct_val / total_val) )
f = open ( "log_training.txt" , 'a' )
f. write( "Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}" . format (
epoch, MAX_EPOCH, j+ 1 , len ( valid_loader) , loss_val, correct_val / total_val) )
f. write( "\n" )
train_x = range ( len ( train_curve) )
train_y = train_curve
train_iters = len ( train_loader)
valid_x = np. arange( 1 , len ( valid_curve) + 1 ) * train_iters* val_interval
valid_y = valid_curve
plt. plot( train_x, train_y, label= 'Train' )
plt. plot( valid_x, valid_y, label= 'Valid' )
plt. legend( loc= 'upper right' )
plt. ylabel( 'loss value' )
plt. xlabel( 'Iteration' )
plt. show( )