图像识别实战(三)----网络的训练

图像识别实战(三)----网络的训练

9.设置优化器

#优化器设置
optimizer_ft = optim.Adam(params_to_update, lr=1e-2)
scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7,gamma=0.1)#学习率每7个epoch衰减成原来的1/10
#最后一层已经LogSoftmax()了,所以不能nn.CrossEntropyLoss()来计算了,CrossEntropyLoss()=log_softmax() + NLLLoss()
criterion = nn.NLLLoss()

10.训练模块

#训练模块

def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, filename=filename):
    since = time.time()#返回当前的时间
    best_acc = 0
    """
    checkpoint = torch.load(filename)
    best_acc = checkpoint['best_acc']
    model.load_state_dict(checkpoint['optimizer'])
    model.class_to_idx = checkpoint['mapping']
    """
    model.to(device)#将模型放入GPU中训练
    val_acc_history = []
    train_acc_history = []
    train_losses = []
    valid_losses = []
    LRs = [optimizer.param_groups[0]['lr']]#得到自己的学习率
    # i_list=[i for i in optimizer.param_groups[0].keys()]
	# print(i_list)    
	#['amsgrad', 'params', 'lr', 'betas', 'weight_decay', 'eps']
    best_model_wts = copy.deepcopy(model.state_dict())
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs-1))
        print('-'*10)
        
        #训练和验证
        for phase in ['train','valid']:
            if phase == 'train':
                model.train()#训练
            else:
                model.eval()#验证
                
                
            running_loss = 0.0
            running_corrects = 0
            
            #把数据都取个遍
            
            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)
                
                #清零
                optimizer.zero_grad()
                #只有训练的时候计算和更新梯度
                with torch.set_grad_enabled(phase == 'train'):
                    if is_inception and phase == 'train':
                        outputs, aux_outputs = model(inputs)
                        loss1 = criterion(outputs, labels)
                        loss2 = criterion(aux_outputs, labels)
                        loss = loss1 + 0.4*loss2
                    else:#resent 执行的是这里
                        outputs = model(inputs)
                        loss = criterion(outputs, labels)
                    _, preds = torch.max(outputs, 1)
                        
                            
                    #训练阶段更新权重
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                        
                #计算损失
                
                running_loss += loss.item() * inputs.size(0)  
                running_corrects += torch.sum(preds == labels.data)
                
            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
            
            
            time_elapsed = time.time() - since 
            print('Time elspsed {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed%60))
            print('{}lose: {:.4f} Acc:{:.4f}'.format(phase, epoch_loss,epoch_acc))
            
            
            #得到最好的那次模型
            
            if phase == 'valid' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                state = {
                    "state_dict": model.state_dict(),
                    "best_acc": best_acc,
                    "optimizer": optimizer.state_dict(),
                    
                }
                torch.save(state, filename)
            if phase == 'valid':
                val_acc_history.append(epoch_acc)
                valid_losses.append(epoch_loss)
                scheduler.step(epoch_loss)
                    
            if phase == 'train':
                train_acc_history.append(epoch_acc)
                train_losses.append(epoch_loss)
                    
                    
        print('Optimizer learning rate:{:.7f}'.format(optimizer.param_groups[0]['lr']))      
            
        LRs.append(optimizer.param_groups[0]['lr'])     
        print()
                  
            
    time_elapsed = time.time() - since
    print("Training complete in {:.0f}m{:.0f}s".format(time_elapsed // 60, time_elapsed%60))        
    print('best val Acc: {:4f}'.format(best_acc))
               
    #训练完后用最好的一次当做模型训练结果
    model.load_state_dict(best_model_wts)
    return model , val_acc_history, train_acc_history, valid_losses,LRs     
               

11.调用模型

mdoel_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs = train_model(model_ft, dataloaders, criterion, optimizer_ft, num_epochs=25, is_inception=False, filename=filename)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

NAND_LU

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值