第TR5周:Transformer实战:文本分类

任务:
●理解文中代码逻辑并成功运行
●根据自己的理解对代码进行调优,使准确率达到70%

1.准备工作

1.1.环境安装

这是一个使用PyTorch通过Transformer算法实现简单的文本分类实战案例。

import torch,torchvision
print(torch.__version__)  #注意是双下划线
print(torchvision.__version__)

代码输出

2.0.0+cpu
0.15.1+cpu

1.2.加载数据

import torch
import torch.nn as nn
import torchvision
from torchvision import transforms, datasets
import os,PIL,pathlib,warnings

warnings.filterwarnings("ignore") #忽略警告信息

# win10系统
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device

代码输出

device(type='cpu')
import pandas as pd

# 加载自定义中文数据
train_data = pd.read_csv('./TR5/train.csv', sep='\t', header=None)
train_data.head()

代码输出

01
0还有双鸭山到淮阴的汽车票吗13号的Travel-Query
1从这里怎么回家Travel-Query
2随便播放一首专辑阁楼里的佛里的歌Music-Play
3给看一下墓王之王嘛FilmTele-Play
4我想看挑战两把s686打突变团竞的游戏视频Video-Play
# 构造数据集迭代器
def coustom_data_iter(texts, labels):
    for x, y in zip(texts, labels):
        yield x, y
        
train_iter = coustom_data_iter(train_data[0].values[:], train_data[1].values[:])
train_data[0].values[:]

代码输出

array(['还有双鸭山到淮阴的汽车票吗13号的', '从这里怎么回家', '随便播放一首专辑阁楼里的佛里的歌', ...,
       '黎耀祥陈豪邓萃雯畲诗曼陈法拉敖嘉年杨怡马浚伟等到场出席', '百事盖世群星星光演唱会有谁', '下周一视频会议的闹钟帮我开开'],
      dtype=object)
train_data[1].values[:]

代码输出

array(['Travel-Query', 'Travel-Query', 'Music-Play', ..., 'Radio-Listen',
       'Video-Play', 'Alarm-Update'], dtype=object)

2.数据预处理

2.1.构建词典

需要另外安装jieba分词库,安装语句如下:
●cmd命令:pip install jieba

from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
import jieba

# 中文分词方法
tokenizer = jieba.lcut

def yield_tokens(data_iter):
    for text,_ in data_iter:
        yield tokenizer(text)

vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"]) # 设置默认索引,如果找不到单词,则会选择默认索引

代码输出

Building prefix dict from the default dictionary ...
Loading model from cache C:\Users\xzy\AppData\Local\Temp\jieba.cache
Loading model cost 0.953 seconds.
Prefix dict has been built successfully.
vocab(['我','想','看','和平','精英','上','战神','必备','技巧','的','游戏','视频'])

代码输出

[2, 10, 13, 973, 1079, 146, 7724, 7574, 7793, 1, 186, 28]
label_name = list(set(train_data[1].values[:]))
print(label_name)

代码输出

['Audio-Play', 'Music-Play', 'Weather-Query', 'Alarm-Update', 'Radio-Listen', 'TVProgram-Play', 'Travel-Query', 'FilmTele-Play', 'Calendar-Query', 'HomeAppliance-Control', 'Video-Play', 'Other']
text_pipeline  = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: label_name.index(x)

print(text_pipeline('我想看和平精英上战神必备技巧的游戏视频'))
print(label_pipeline('Video-Play'))

代码输出

[2, 10, 13, 973, 1079, 146, 7724, 7574, 7793, 1, 186, 28]
10

2.2.生成数据批次和迭代器

from torch.utils.data import DataLoader

def collate_batch(batch):
    label_list, text_list, offsets = [], [], [0]
    
    for (_text,_label) in batch:
        # 标签列表
        label_list.append(label_pipeline(_label))
        
        # 文本列表
        processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
        text_list.append(processed_text)
        
        # 偏移量,即语句的总词汇量
        offsets.append(processed_text.size(0))
        
    label_list = torch.tensor(label_list, dtype=torch.int64)
    text_list  = torch.cat(text_list)
    offsets    = torch.tensor(offsets[:-1]).cumsum(dim=0) #返回维度dim中输入元素的累计和
    
    return text_list.to(device),label_list.to(device), offsets.to(device)

2.3.构建数据集

from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset

BATCH_SIZE = 4 

train_iter    = coustom_data_iter(train_data[0].values[:], train_data[1].values[:])
train_dataset = to_map_style_dataset(train_iter)

split_train_, split_valid_ = random_split(train_dataset,
                                          [int(len(train_dataset)*0.8),int(len(train_dataset)*0.2)])

train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE,
                              shuffle=True, collate_fn=collate_batch)

valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE,
                              shuffle=True, collate_fn=collate_batch)

to_map_style_dataset()函数

作用是将一个迭代式的数据集(Iterable-style dataset)转换为映射式的数据集(Map-style dataset)。这个转换使得我们可以通过索引(例如:整数)更方便地访问数据集中的元素。在 PyTorch 中,数据集可以分为两种类型:Iterable-style 和 Map-style。
●Iterable-style 数据集实现了 __ iter__() 方法,可以迭代访问数据集中的元素,但不支持通过索引访问。
●Map-style 数据集实现了 __ getitem__() 和 __ len__() 方法,可以直接通过索引访问特定元素,并能获取数据集的大小。

3.模型构建

3.1.定义位置编码函数

import math,os,torch

class PositionalEncoding(nn.Module):
    def __init__(self, embed_dim, max_len=500):
        super(PositionalEncoding, self).__init__()

        # 创建一个大小为 [max_len, embed_dim] 的零张量
        pe = torch.zeros(max_len, embed_dim) 
        # 创建一个形状为 [max_len, 1] 的位置索引张量
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) 

        div_term = torch.exp(torch.arange(0, embed_dim, 2).float() * (-math.log(100.0) / embed_dim))
        
        pe[:, 0::2] = torch.sin(position * div_term) # 计算 PE(pos, 2i)
        pe[:, 1::2] = torch.cos(position * div_term) # 计算 PE(pos, 2i+1)
        pe = pe.unsqueeze(0).transpose(0, 1)

        # 将位置编码张量注册为模型的缓冲区,参数不参与梯度下降,保存model的时候会将其保存下来
        self.register_buffer('pe', pe)

    def forward(self, x):
        # 将位置编码添加到输入张量中,注意位置编码的形状
        x = x + self.pe[:x.size(0)]
        return x

3.2.定义Transformer模型


from tempfile import TemporaryDirectory
from typing   import Tuple
from torch    import nn, Tensor
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.utils.data import dataset

class TransformerModel(nn.Module):

    def __init__(self, vocab_size, embed_dim, num_class, nhead=8, d_hid=256, nlayers=12, dropout=0.1):
        super().__init__()

        self.embedding = nn.EmbeddingBag(vocab_size,   # 词典大小
                                         embed_dim,    # 嵌入的维度
                                         sparse=False) # 
        
        self.pos_encoder = PositionalEncoding(embed_dim)

        # 定义编码器层
        encoder_layers           = TransformerEncoderLayer(embed_dim, nhead, d_hid, dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
        self.embed_dim           = embed_dim
        self.linear              = nn.Linear(embed_dim*4, num_class)
        
    def forward(self, src, offsets, src_mask=None):

        src    = self.embedding(src, offsets)
        src    = self.pos_encoder(src)
        output = self.transformer_encoder(src, src_mask)

        output = output.view(4, embed_dim*4)
        output = self.linear(output)
 
        return output

3.3.初始化模型

vocab_size = len(vocab)  # 词汇表的大小
embed_dim  = 64         # 嵌入维度
num_class  = len(label_name)

# 创建 Transformer 模型,并将其移动到设备上
model = TransformerModel(vocab_size, 
                         embed_dim, 
                         num_class).to(device)

3.4.定义训练函数

import time

def train(dataloader):
    model.train()  # 切换为训练模式
    total_acc, train_loss, total_count = 0, 0, 0
    log_interval = 300
    start_time   = time.time()

    for idx, (text,label,offsets) in enumerate(dataloader):
        predicted_label = model(text, offsets)
        optimizer.zero_grad()                    # grad属性归零

        loss = criterion(predicted_label, label) # 计算网络输出和真实值之间的差距,label为真实值
        loss.backward()                          # 反向传播
        optimizer.step()  # 每一步自动更新
        
        # 记录acc与loss
        total_acc   += (predicted_label.argmax(1) == label).sum().item()
        train_loss  += loss.item()
        total_count += label.size(0)
        
        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            print('| epoch {:1d} | {:4d}/{:4d} batches '
                  '| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),
                                              total_acc/total_count, train_loss/total_count))
            total_acc, train_loss, total_count = 0, 0, 0
            start_time = time.time()

3.5.定义评估函数

def evaluate(dataloader):
    model.eval()  # 切换为测试模式
    total_acc, train_loss, total_count = 0, 0, 0

    with torch.no_grad():
        for idx, (text,label,offsets) in enumerate(dataloader):
            predicted_label = model(text, offsets)
            
            loss = criterion(predicted_label, label)  # 计算loss值
            # 记录测试数据
            total_acc   += (predicted_label.argmax(1) == label).sum().item()
            train_loss  += loss.item()
            total_count += label.size(0)
            
    return total_acc/total_count, train_loss/total_count

4.训练模型

4.1.模型训练

# 超参数
EPOCHS     = 10 

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)

for epoch in range(1, EPOCHS + 1):
    epoch_start_time = time.time()
    train(train_dataloader)
    val_acc, val_loss = evaluate(valid_dataloader)
    
    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']
    
    print('-' * 69)
    print('| epoch {:1d} | time: {:4.2f}s | '
          'valid_acc {:4.3f} valid_loss {:4.3f} | lr {:4.6f}'.format(epoch,
                                           time.time() - epoch_start_time,
                                           val_acc,val_loss,lr))

    print('-' * 69)

代码输出

| epoch 1 |  300/2420 batches | train_acc 0.105 train_loss 0.63515
| epoch 1 |  600/2420 batches | train_acc 0.103 train_loss 0.62862
| epoch 1 |  900/2420 batches | train_acc 0.109 train_loss 0.61628
| epoch 1 | 1200/2420 batches | train_acc 0.134 train_loss 0.59848
| epoch 1 | 1500/2420 batches | train_acc 0.116 train_loss 0.59714
| epoch 1 | 1800/2420 batches | train_acc 0.126 train_loss 0.58824
| epoch 1 | 2100/2420 batches | train_acc 0.147 train_loss 0.59021
| epoch 1 | 2400/2420 batches | train_acc 0.147 train_loss 0.58219
---------------------------------------------------------------------
| epoch 1 | time: 81.20s | valid_acc 0.166 valid_loss 0.572 | lr 0.010000
---------------------------------------------------------------------
| epoch 2 |  300/2420 batches | train_acc 0.166 train_loss 0.57671
| epoch 2 |  600/2420 batches | train_acc 0.149 train_loss 0.57718
| epoch 2 |  900/2420 batches | train_acc 0.153 train_loss 0.57841
| epoch 2 | 1200/2420 batches | train_acc 0.168 train_loss 0.57620
| epoch 2 | 1500/2420 batches | train_acc 0.152 train_loss 0.57920
| epoch 2 | 1800/2420 batches | train_acc 0.159 train_loss 0.57392
| epoch 2 | 2100/2420 batches | train_acc 0.158 train_loss 0.57644
| epoch 2 | 2400/2420 batches | train_acc 0.187 train_loss 0.57352
---------------------------------------------------------------------
| epoch 2 | time: 80.59s | valid_acc 0.210 valid_loss 0.561 | lr 0.010000
---------------------------------------------------------------------
| epoch 3 |  300/2420 batches | train_acc 0.178 train_loss 0.56841
| epoch 3 |  600/2420 batches | train_acc 0.182 train_loss 0.56651
| epoch 3 |  900/2420 batches | train_acc 0.191 train_loss 0.55880
| epoch 3 | 1200/2420 batches | train_acc 0.212 train_loss 0.55917
| epoch 3 | 1500/2420 batches | train_acc 0.209 train_loss 0.55905
| epoch 3 | 1800/2420 batches | train_acc 0.190 train_loss 0.56497
| epoch 3 | 2100/2420 batches | train_acc 0.225 train_loss 0.55538
| epoch 3 | 2400/2420 batches | train_acc 0.195 train_loss 0.56107
---------------------------------------------------------------------
| epoch 3 | time: 80.77s | valid_acc 0.223 valid_loss 0.549 | lr 0.010000
---------------------------------------------------------------------
| epoch 4 |  300/2420 batches | train_acc 0.221 train_loss 0.55027
| epoch 4 |  600/2420 batches | train_acc 0.226 train_loss 0.54617
| epoch 4 |  900/2420 batches | train_acc 0.243 train_loss 0.54574
| epoch 4 | 1200/2420 batches | train_acc 0.223 train_loss 0.55473
| epoch 4 | 1500/2420 batches | train_acc 0.218 train_loss 0.55534
| epoch 4 | 1800/2420 batches | train_acc 0.236 train_loss 0.54059
| epoch 4 | 2100/2420 batches | train_acc 0.228 train_loss 0.54930
| epoch 4 | 2400/2420 batches | train_acc 0.226 train_loss 0.55326
---------------------------------------------------------------------
| epoch 4 | time: 81.09s | valid_acc 0.239 valid_loss 0.539 | lr 0.010000
---------------------------------------------------------------------
| epoch 5 |  300/2420 batches | train_acc 0.228 train_loss 0.54374
| epoch 5 |  600/2420 batches | train_acc 0.211 train_loss 0.54772
| epoch 5 |  900/2420 batches | train_acc 0.230 train_loss 0.54833
| epoch 5 | 1200/2420 batches | train_acc 0.226 train_loss 0.54882
| epoch 5 | 1500/2420 batches | train_acc 0.217 train_loss 0.54486
| epoch 5 | 1800/2420 batches | train_acc 0.231 train_loss 0.54067
| epoch 5 | 2100/2420 batches | train_acc 0.245 train_loss 0.53641
| epoch 5 | 2400/2420 batches | train_acc 0.238 train_loss 0.53832
---------------------------------------------------------------------
| epoch 5 | time: 80.30s | valid_acc 0.250 valid_loss 0.531 | lr 0.010000
---------------------------------------------------------------------
| epoch 6 |  300/2420 batches | train_acc 0.233 train_loss 0.53834
| epoch 6 |  600/2420 batches | train_acc 0.229 train_loss 0.54007
| epoch 6 |  900/2420 batches | train_acc 0.240 train_loss 0.53498
| epoch 6 | 1200/2420 batches | train_acc 0.265 train_loss 0.53296
| epoch 6 | 1500/2420 batches | train_acc 0.237 train_loss 0.53516
| epoch 6 | 1800/2420 batches | train_acc 0.244 train_loss 0.54253
| epoch 6 | 2100/2420 batches | train_acc 0.263 train_loss 0.53246
| epoch 6 | 2400/2420 batches | train_acc 0.272 train_loss 0.52636
---------------------------------------------------------------------
| epoch 6 | time: 80.11s | valid_acc 0.236 valid_loss 0.543 | lr 0.010000
---------------------------------------------------------------------
| epoch 7 |  300/2420 batches | train_acc 0.247 train_loss 0.53724
| epoch 7 |  600/2420 batches | train_acc 0.277 train_loss 0.52268
| epoch 7 |  900/2420 batches | train_acc 0.287 train_loss 0.52461
| epoch 7 | 1200/2420 batches | train_acc 0.245 train_loss 0.52172
| epoch 7 | 1500/2420 batches | train_acc 0.253 train_loss 0.52076
| epoch 7 | 1800/2420 batches | train_acc 0.262 train_loss 0.51814
| epoch 7 | 2100/2420 batches | train_acc 0.277 train_loss 0.51824
| epoch 7 | 2400/2420 batches | train_acc 0.300 train_loss 0.51197
---------------------------------------------------------------------
| epoch 7 | time: 80.58s | valid_acc 0.301 valid_loss 0.502 | lr 0.010000
---------------------------------------------------------------------
| epoch 8 |  300/2420 batches | train_acc 0.290 train_loss 0.51114
| epoch 8 |  600/2420 batches | train_acc 0.299 train_loss 0.50069
| epoch 8 |  900/2420 batches | train_acc 0.299 train_loss 0.49917
| epoch 8 | 1200/2420 batches | train_acc 0.320 train_loss 0.49608
| epoch 8 | 1500/2420 batches | train_acc 0.341 train_loss 0.48615
| epoch 8 | 1800/2420 batches | train_acc 0.315 train_loss 0.50020
| epoch 8 | 2100/2420 batches | train_acc 0.366 train_loss 0.47658
| epoch 8 | 2400/2420 batches | train_acc 0.343 train_loss 0.48388
---------------------------------------------------------------------
| epoch 8 | time: 80.48s | valid_acc 0.367 valid_loss 0.471 | lr 0.010000
---------------------------------------------------------------------
| epoch 9 |  300/2420 batches | train_acc 0.355 train_loss 0.47828
| epoch 9 |  600/2420 batches | train_acc 0.358 train_loss 0.47669
| epoch 9 |  900/2420 batches | train_acc 0.369 train_loss 0.46768
| epoch 9 | 1200/2420 batches | train_acc 0.368 train_loss 0.47074
| epoch 9 | 1500/2420 batches | train_acc 0.385 train_loss 0.46331
| epoch 9 | 1800/2420 batches | train_acc 0.355 train_loss 0.47316
| epoch 9 | 2100/2420 batches | train_acc 0.380 train_loss 0.46985
| epoch 9 | 2400/2420 batches | train_acc 0.384 train_loss 0.46793
---------------------------------------------------------------------
| epoch 9 | time: 80.06s | valid_acc 0.381 valid_loss 0.456 | lr 0.010000
---------------------------------------------------------------------
| epoch 10 |  300/2420 batches | train_acc 0.380 train_loss 0.45352
| epoch 10 |  600/2420 batches | train_acc 0.395 train_loss 0.44962
| epoch 10 |  900/2420 batches | train_acc 0.407 train_loss 0.44455
| epoch 10 | 1200/2420 batches | train_acc 0.397 train_loss 0.44962
| epoch 10 | 1500/2420 batches | train_acc 0.401 train_loss 0.44773
| epoch 10 | 1800/2420 batches | train_acc 0.422 train_loss 0.43027
| epoch 10 | 2100/2420 batches | train_acc 0.454 train_loss 0.42070
| epoch 10 | 2400/2420 batches | train_acc 0.446 train_loss 0.43559
---------------------------------------------------------------------
| epoch 10 | time: 80.64s | valid_acc 0.454 valid_loss 0.413 | lr 0.010000
---------------------------------------------------------------------

4.2.模型评估

test_acc, test_loss = evaluate(valid_dataloader)
print('模型准确率为:{:5.4f}'.format(test_acc))

代码输出

模型准确率为:0.4471

5.根据自己的理解对代码进行调优,使准确率达到70%

要将模型的准确率提高,可以考虑以下几种方法调整:

5.1. 调整模型超参数

● 学习率 (Learning Rate):可能需要调整学习率,过高或过低的学习率都会影响模型的收敛速度和效果。可以尝试在代码中降低或增加学习率。

●隐藏层大小 (Hidden Size): 增加隐藏层的大小或增加网络层数可能会提升模型的表达能力。

● 批次大小 (Batch Size): 调整批次大小,可以尝试增加或减少 batch_size 看看效果。

5.2. 增加训练轮数
增加训练轮数 (epochs) 可以让模型有更多时间去学习和调整参数。训练更多轮次,但要注意过拟合风险。

5.3. 正则化

●添加 Dropout 层可以防止模型过拟合,从而提升模型的泛化能力。
●L2 正则化:在优化器中添加权重衰减。

5.4. 数据增强
增加数据量或通过数据增强技术生成更多的训练数据。数据多样性通常能提高模型的泛化能力。

5.5. 使用更复杂的模型架构
可以尝试使用更复杂的模型架构,如双向 LSTM、GRU 或 Transformer 模型,这些模型在处理序列数据上可能表现更好。

5.6. 调优损失函数
如果当前损失函数表现不好,可以考虑更改损失函数或在现有损失函数上加入自定义的调整。

5.7. 检查数据集
确保数据集中标签分布合理且没有错误数据。如果数据不平衡,可能需要进行重采样或使用加权损失函数。

5.8. 实验与调参
通过实验观察不同参数和方法的组合对模型性能的影响,找到最合适的设置。

这些方法可以单独或组合使用以提高模型的准确率。首先可以从增加训练轮次和调整学习率开始,然后再逐步尝试增加模型的复杂度和使用正则化方法。调优是一个反复实验的过程,需要根据实验结果不断调整。

对于该次提高准确率达到70%,可以增加训练轮数,对上面的代码,令EPOCHS从10调整到100,即:

# 超参数
EPOCHS     = 100

其他的不变,得到训练结果如下所示:

# 超参数
| epoch 1 |  300/2420 batches | train_acc 0.124 train_loss 0.63172
| epoch 1 |  600/2420 batches | train_acc 0.100 train_loss 0.62975
| epoch 1 |  900/2420 batches | train_acc 0.108 train_loss 0.61340
| epoch 1 | 1200/2420 batches | train_acc 0.128 train_loss 0.60664
| epoch 1 | 1500/2420 batches | train_acc 0.102 train_loss 0.59968
| epoch 1 | 1800/2420 batches | train_acc 0.114 train_loss 0.59483
| epoch 1 | 2100/2420 batches | train_acc 0.141 train_loss 0.58566
| epoch 1 | 2400/2420 batches | train_acc 0.128 train_loss 0.58129
---------------------------------------------------------------------
| epoch 1 | time: 85.62s | valid_acc 0.141 valid_loss 0.585 | lr 0.010000
---------------------------------------------------------------------
| epoch 2 |  300/2420 batches | train_acc 0.157 train_loss 0.58087
| epoch 2 |  600/2420 batches | train_acc 0.157 train_loss 0.57831
| epoch 2 |  900/2420 batches | train_acc 0.153 train_loss 0.58050
| epoch 2 | 1200/2420 batches | train_acc 0.172 train_loss 0.57228
| epoch 2 | 1500/2420 batches | train_acc 0.187 train_loss 0.56080
| epoch 2 | 1800/2420 batches | train_acc 0.163 train_loss 0.57341
| epoch 2 | 2100/2420 batches | train_acc 0.169 train_loss 0.56785
| epoch 2 | 2400/2420 batches | train_acc 0.174 train_loss 0.56988
---------------------------------------------------------------------
| epoch 2 | time: 86.00s | valid_acc 0.194 valid_loss 0.564 | lr 0.010000
---------------------------------------------------------------------
| epoch 3 |  300/2420 batches | train_acc 0.194 train_loss 0.56447
| epoch 3 |  600/2420 batches | train_acc 0.187 train_loss 0.55973
| epoch 3 |  900/2420 batches | train_acc 0.198 train_loss 0.55564
| epoch 3 | 1200/2420 batches | train_acc 0.189 train_loss 0.55915
| epoch 3 | 1500/2420 batches | train_acc 0.199 train_loss 0.55970
| epoch 3 | 1800/2420 batches | train_acc 0.200 train_loss 0.56135
| epoch 3 | 2100/2420 batches | train_acc 0.225 train_loss 0.54863
| epoch 3 | 2400/2420 batches | train_acc 0.216 train_loss 0.54849
---------------------------------------------------------------------
| epoch 3 | time: 84.61s | valid_acc 0.243 valid_loss 0.542 | lr 0.010000
---------------------------------------------------------------------
| epoch 4 |  300/2420 batches | train_acc 0.226 train_loss 0.55006
| epoch 4 |  600/2420 batches | train_acc 0.217 train_loss 0.55003
| epoch 4 |  900/2420 batches | train_acc 0.240 train_loss 0.54791
| epoch 4 | 1200/2420 batches | train_acc 0.253 train_loss 0.54397
| epoch 4 | 1500/2420 batches | train_acc 0.233 train_loss 0.54547
| epoch 4 | 1800/2420 batches | train_acc 0.233 train_loss 0.55099
| epoch 4 | 2100/2420 batches | train_acc 0.227 train_loss 0.54291
| epoch 4 | 2400/2420 batches | train_acc 0.223 train_loss 0.54319
---------------------------------------------------------------------
| epoch 4 | time: 84.61s | valid_acc 0.261 valid_loss 0.548 | lr 0.010000
---------------------------------------------------------------------
| epoch 5 |  300/2420 batches | train_acc 0.233 train_loss 0.54866
| epoch 5 |  600/2420 batches | train_acc 0.238 train_loss 0.53594
| epoch 5 |  900/2420 batches | train_acc 0.250 train_loss 0.53580
| epoch 5 | 1200/2420 batches | train_acc 0.236 train_loss 0.53639
| epoch 5 | 1500/2420 batches | train_acc 0.242 train_loss 0.53502
| epoch 5 | 1800/2420 batches | train_acc 0.229 train_loss 0.54436
| epoch 5 | 2100/2420 batches | train_acc 0.266 train_loss 0.52806
| epoch 5 | 2400/2420 batches | train_acc 0.241 train_loss 0.53787
---------------------------------------------------------------------
| epoch 5 | time: 84.84s | valid_acc 0.240 valid_loss 0.537 | lr 0.010000
---------------------------------------------------------------------
| epoch 6 |  300/2420 batches | train_acc 0.245 train_loss 0.53467
| epoch 6 |  600/2420 batches | train_acc 0.241 train_loss 0.53455
| epoch 6 |  900/2420 batches | train_acc 0.253 train_loss 0.52606
| epoch 6 | 1200/2420 batches | train_acc 0.250 train_loss 0.53322
| epoch 6 | 1500/2420 batches | train_acc 0.276 train_loss 0.52478
| epoch 6 | 1800/2420 batches | train_acc 0.251 train_loss 0.52935
| epoch 6 | 2100/2420 batches | train_acc 0.260 train_loss 0.52503
| epoch 6 | 2400/2420 batches | train_acc 0.239 train_loss 0.53837
---------------------------------------------------------------------
| epoch 6 | time: 84.49s | valid_acc 0.260 valid_loss 0.536 | lr 0.010000
---------------------------------------------------------------------
| epoch 7 |  300/2420 batches | train_acc 0.263 train_loss 0.51957
| epoch 7 |  600/2420 batches | train_acc 0.249 train_loss 0.52756
| epoch 7 |  900/2420 batches | train_acc 0.267 train_loss 0.52570
| epoch 7 | 1200/2420 batches | train_acc 0.266 train_loss 0.52688
| epoch 7 | 1500/2420 batches | train_acc 0.268 train_loss 0.52311
| epoch 7 | 1800/2420 batches | train_acc 0.238 train_loss 0.53244
| epoch 7 | 2100/2420 batches | train_acc 0.253 train_loss 0.52637
| epoch 7 | 2400/2420 batches | train_acc 0.273 train_loss 0.52541
---------------------------------------------------------------------
| epoch 7 | time: 84.32s | valid_acc 0.269 valid_loss 0.521 | lr 0.010000
---------------------------------------------------------------------
| epoch 8 |  300/2420 batches | train_acc 0.256 train_loss 0.52177
| epoch 8 |  600/2420 batches | train_acc 0.254 train_loss 0.52789
| epoch 8 |  900/2420 batches | train_acc 0.269 train_loss 0.52848
| epoch 8 | 1200/2420 batches | train_acc 0.284 train_loss 0.52198
| epoch 8 | 1500/2420 batches | train_acc 0.270 train_loss 0.51472
| epoch 8 | 1800/2420 batches | train_acc 0.261 train_loss 0.52358
| epoch 8 | 2100/2420 batches | train_acc 0.253 train_loss 0.52217
| epoch 8 | 2400/2420 batches | train_acc 0.270 train_loss 0.51121
---------------------------------------------------------------------
| epoch 8 | time: 86.19s | valid_acc 0.271 valid_loss 0.517 | lr 0.010000
---------------------------------------------------------------------
| epoch 9 |  300/2420 batches | train_acc 0.248 train_loss 0.51928
| epoch 9 |  600/2420 batches | train_acc 0.281 train_loss 0.51930
| epoch 9 |  900/2420 batches | train_acc 0.272 train_loss 0.51636
| epoch 9 | 1200/2420 batches | train_acc 0.270 train_loss 0.51573
| epoch 9 | 1500/2420 batches | train_acc 0.277 train_loss 0.51272
| epoch 9 | 1800/2420 batches | train_acc 0.268 train_loss 0.51255
| epoch 9 | 2100/2420 batches | train_acc 0.284 train_loss 0.51215
| epoch 9 | 2400/2420 batches | train_acc 0.271 train_loss 0.50947
---------------------------------------------------------------------
| epoch 9 | time: 84.34s | valid_acc 0.296 valid_loss 0.507 | lr 0.010000
---------------------------------------------------------------------
| epoch 10 |  300/2420 batches | train_acc 0.277 train_loss 0.50457
| epoch 10 |  600/2420 batches | train_acc 0.278 train_loss 0.51906
| epoch 10 |  900/2420 batches | train_acc 0.295 train_loss 0.50676
| epoch 10 | 1200/2420 batches | train_acc 0.302 train_loss 0.49880
| epoch 10 | 1500/2420 batches | train_acc 0.304 train_loss 0.50554
| epoch 10 | 1800/2420 batches | train_acc 0.314 train_loss 0.49315
| epoch 10 | 2100/2420 batches | train_acc 0.346 train_loss 0.48392
| epoch 10 | 2400/2420 batches | train_acc 0.338 train_loss 0.47602
---------------------------------------------------------------------
| epoch 10 | time: 84.58s | valid_acc 0.342 valid_loss 0.486 | lr 0.010000
---------------------------------------------------------------------
| epoch 11 |  300/2420 batches | train_acc 0.348 train_loss 0.47524
| epoch 11 |  600/2420 batches | train_acc 0.386 train_loss 0.47113
| epoch 11 |  900/2420 batches | train_acc 0.362 train_loss 0.46883
| epoch 11 | 1200/2420 batches | train_acc 0.383 train_loss 0.45975
| epoch 11 | 1500/2420 batches | train_acc 0.381 train_loss 0.46418
| epoch 11 | 1800/2420 batches | train_acc 0.380 train_loss 0.45996
| epoch 11 | 2100/2420 batches | train_acc 0.386 train_loss 0.46528
| epoch 11 | 2400/2420 batches | train_acc 0.410 train_loss 0.45139
---------------------------------------------------------------------
| epoch 11 | time: 83.80s | valid_acc 0.410 valid_loss 0.445 | lr 0.010000
---------------------------------------------------------------------
| epoch 12 |  300/2420 batches | train_acc 0.415 train_loss 0.44190
| epoch 12 |  600/2420 batches | train_acc 0.401 train_loss 0.44386
| epoch 12 |  900/2420 batches | train_acc 0.428 train_loss 0.43816
| epoch 12 | 1200/2420 batches | train_acc 0.427 train_loss 0.43997
| epoch 12 | 1500/2420 batches | train_acc 0.426 train_loss 0.43172
| epoch 12 | 1800/2420 batches | train_acc 0.409 train_loss 0.44360
| epoch 12 | 2100/2420 batches | train_acc 0.439 train_loss 0.43052
| epoch 12 | 2400/2420 batches | train_acc 0.446 train_loss 0.43177
---------------------------------------------------------------------
| epoch 12 | time: 83.92s | valid_acc 0.458 valid_loss 0.420 | lr 0.010000
---------------------------------------------------------------------
| epoch 13 |  300/2420 batches | train_acc 0.463 train_loss 0.41759
| epoch 13 |  600/2420 batches | train_acc 0.474 train_loss 0.40258
| epoch 13 |  900/2420 batches | train_acc 0.463 train_loss 0.41299
| epoch 13 | 1200/2420 batches | train_acc 0.476 train_loss 0.41546
| epoch 13 | 1500/2420 batches | train_acc 0.464 train_loss 0.40948
| epoch 13 | 1800/2420 batches | train_acc 0.468 train_loss 0.41841
| epoch 13 | 2100/2420 batches | train_acc 0.490 train_loss 0.39884
| epoch 13 | 2400/2420 batches | train_acc 0.487 train_loss 0.40473
---------------------------------------------------------------------
| epoch 13 | time: 83.64s | valid_acc 0.500 valid_loss 0.402 | lr 0.010000
---------------------------------------------------------------------
| epoch 14 |  300/2420 batches | train_acc 0.515 train_loss 0.38339
| epoch 14 |  600/2420 batches | train_acc 0.519 train_loss 0.37501
| epoch 14 |  900/2420 batches | train_acc 0.517 train_loss 0.38280
| epoch 14 | 1200/2420 batches | train_acc 0.517 train_loss 0.38195
| epoch 14 | 1500/2420 batches | train_acc 0.525 train_loss 0.39008
| epoch 14 | 1800/2420 batches | train_acc 0.525 train_loss 0.38158
| epoch 14 | 2100/2420 batches | train_acc 0.525 train_loss 0.38559
| epoch 14 | 2400/2420 batches | train_acc 0.529 train_loss 0.38228
---------------------------------------------------------------------
| epoch 14 | time: 86.00s | valid_acc 0.566 valid_loss 0.366 | lr 0.010000
---------------------------------------------------------------------
| epoch 15 |  300/2420 batches | train_acc 0.553 train_loss 0.37311
| epoch 15 |  600/2420 batches | train_acc 0.564 train_loss 0.35000
| epoch 15 |  900/2420 batches | train_acc 0.558 train_loss 0.36826
| epoch 15 | 1200/2420 batches | train_acc 0.547 train_loss 0.36430
| epoch 15 | 1500/2420 batches | train_acc 0.564 train_loss 0.36633
| epoch 15 | 1800/2420 batches | train_acc 0.561 train_loss 0.35983
| epoch 15 | 2100/2420 batches | train_acc 0.593 train_loss 0.33570
| epoch 15 | 2400/2420 batches | train_acc 0.594 train_loss 0.33010
---------------------------------------------------------------------
| epoch 15 | time: 84.05s | valid_acc 0.589 valid_loss 0.342 | lr 0.010000
---------------------------------------------------------------------
| epoch 16 |  300/2420 batches | train_acc 0.580 train_loss 0.34923
| epoch 16 |  600/2420 batches | train_acc 0.587 train_loss 0.33068
| epoch 16 |  900/2420 batches | train_acc 0.599 train_loss 0.32958
| epoch 16 | 1200/2420 batches | train_acc 0.607 train_loss 0.32168
| epoch 16 | 1500/2420 batches | train_acc 0.588 train_loss 0.33818
| epoch 16 | 1800/2420 batches | train_acc 0.598 train_loss 0.34203
| epoch 16 | 2100/2420 batches | train_acc 0.628 train_loss 0.31660
| epoch 16 | 2400/2420 batches | train_acc 0.603 train_loss 0.32781
---------------------------------------------------------------------
| epoch 16 | time: 84.41s | valid_acc 0.624 valid_loss 0.331 | lr 0.010000
---------------------------------------------------------------------
| epoch 17 |  300/2420 batches | train_acc 0.626 train_loss 0.31390
| epoch 17 |  600/2420 batches | train_acc 0.631 train_loss 0.30935
| epoch 17 |  900/2420 batches | train_acc 0.633 train_loss 0.31277
| epoch 17 | 1200/2420 batches | train_acc 0.644 train_loss 0.30918
| epoch 17 | 1500/2420 batches | train_acc 0.651 train_loss 0.29901
| epoch 17 | 1800/2420 batches | train_acc 0.636 train_loss 0.31031
| epoch 17 | 2100/2420 batches | train_acc 0.627 train_loss 0.31124
| epoch 17 | 2400/2420 batches | train_acc 0.651 train_loss 0.30914
---------------------------------------------------------------------
| epoch 17 | time: 83.92s | valid_acc 0.629 valid_loss 0.305 | lr 0.010000
---------------------------------------------------------------------
| epoch 18 |  300/2420 batches | train_acc 0.630 train_loss 0.30568
| epoch 18 |  600/2420 batches | train_acc 0.634 train_loss 0.29731
| epoch 18 |  900/2420 batches | train_acc 0.672 train_loss 0.28409
| epoch 18 | 1200/2420 batches | train_acc 0.681 train_loss 0.28299
| epoch 18 | 1500/2420 batches | train_acc 0.642 train_loss 0.28732
| epoch 18 | 1800/2420 batches | train_acc 0.658 train_loss 0.28358
| epoch 18 | 2100/2420 batches | train_acc 0.642 train_loss 0.29563
| epoch 18 | 2400/2420 batches | train_acc 0.668 train_loss 0.29340
---------------------------------------------------------------------
| epoch 18 | time: 83.63s | valid_acc 0.648 valid_loss 0.327 | lr 0.010000
---------------------------------------------------------------------
| epoch 19 |  300/2420 batches | train_acc 0.672 train_loss 0.27791
| epoch 19 |  600/2420 batches | train_acc 0.667 train_loss 0.28195
| epoch 19 |  900/2420 batches | train_acc 0.683 train_loss 0.26861
| epoch 19 | 1200/2420 batches | train_acc 0.684 train_loss 0.28081
| epoch 19 | 1500/2420 batches | train_acc 0.700 train_loss 0.26196
| epoch 19 | 1800/2420 batches | train_acc 0.664 train_loss 0.28689
| epoch 19 | 2100/2420 batches | train_acc 0.670 train_loss 0.27827
| epoch 19 | 2400/2420 batches | train_acc 0.689 train_loss 0.26052
---------------------------------------------------------------------
| epoch 19 | time: 84.06s | valid_acc 0.664 valid_loss 0.287 | lr 0.010000
---------------------------------------------------------------------
| epoch 20 |  300/2420 batches | train_acc 0.708 train_loss 0.26133
| epoch 20 |  600/2420 batches | train_acc 0.668 train_loss 0.28050
| epoch 20 |  900/2420 batches | train_acc 0.688 train_loss 0.26073
| epoch 20 | 1200/2420 batches | train_acc 0.699 train_loss 0.26520
| epoch 20 | 1500/2420 batches | train_acc 0.709 train_loss 0.25477
| epoch 20 | 1800/2420 batches | train_acc 0.680 train_loss 0.26421
| epoch 20 | 2100/2420 batches | train_acc 0.680 train_loss 0.27121
| epoch 20 | 2400/2420 batches | train_acc 0.703 train_loss 0.25947
---------------------------------------------------------------------
| epoch 20 | time: 83.50s | valid_acc 0.682 valid_loss 0.279 | lr 0.010000
---------------------------------------------------------------------
| epoch 21 |  300/2420 batches | train_acc 0.723 train_loss 0.24385
| epoch 21 |  600/2420 batches | train_acc 0.734 train_loss 0.22769
| epoch 21 |  900/2420 batches | train_acc 0.711 train_loss 0.25551
| epoch 21 | 1200/2420 batches | train_acc 0.715 train_loss 0.25782
| epoch 21 | 1500/2420 batches | train_acc 0.693 train_loss 0.24973
| epoch 21 | 1800/2420 batches | train_acc 0.708 train_loss 0.25735
| epoch 21 | 2100/2420 batches | train_acc 0.708 train_loss 0.25476
| epoch 21 | 2400/2420 batches | train_acc 0.667 train_loss 0.27919
---------------------------------------------------------------------
| epoch 21 | time: 85.58s | valid_acc 0.681 valid_loss 0.281 | lr 0.010000
---------------------------------------------------------------------
| epoch 22 |  300/2420 batches | train_acc 0.728 train_loss 0.23146
| epoch 22 |  600/2420 batches | train_acc 0.701 train_loss 0.24143
| epoch 22 |  900/2420 batches | train_acc 0.720 train_loss 0.24390
| epoch 22 | 1200/2420 batches | train_acc 0.734 train_loss 0.23387
| epoch 22 | 1500/2420 batches | train_acc 0.710 train_loss 0.25002
| epoch 22 | 1800/2420 batches | train_acc 0.733 train_loss 0.23654
| epoch 22 | 2100/2420 batches | train_acc 0.707 train_loss 0.24708
| epoch 22 | 2400/2420 batches | train_acc 0.718 train_loss 0.24439
---------------------------------------------------------------------
| epoch 22 | time: 83.83s | valid_acc 0.686 valid_loss 0.292 | lr 0.010000
---------------------------------------------------------------------
| epoch 23 |  300/2420 batches | train_acc 0.755 train_loss 0.21283
| epoch 23 |  600/2420 batches | train_acc 0.744 train_loss 0.22126
| epoch 23 |  900/2420 batches | train_acc 0.748 train_loss 0.22315
| epoch 23 | 1200/2420 batches | train_acc 0.723 train_loss 0.24164
| epoch 23 | 1500/2420 batches | train_acc 0.760 train_loss 0.21543
| epoch 23 | 1800/2420 batches | train_acc 0.739 train_loss 0.23916
| epoch 23 | 2100/2420 batches | train_acc 0.747 train_loss 0.23062
| epoch 23 | 2400/2420 batches | train_acc 0.722 train_loss 0.24701
---------------------------------------------------------------------
| epoch 23 | time: 83.58s | valid_acc 0.702 valid_loss 0.285 | lr 0.010000
---------------------------------------------------------------------
| epoch 24 |  300/2420 batches | train_acc 0.757 train_loss 0.21892
| epoch 24 |  600/2420 batches | train_acc 0.768 train_loss 0.20316
| epoch 24 |  900/2420 batches | train_acc 0.749 train_loss 0.22762
| epoch 24 | 1200/2420 batches | train_acc 0.759 train_loss 0.21968
| epoch 24 | 1500/2420 batches | train_acc 0.743 train_loss 0.22297
| epoch 24 | 1800/2420 batches | train_acc 0.745 train_loss 0.22220
| epoch 24 | 2100/2420 batches | train_acc 0.735 train_loss 0.23062
| epoch 24 | 2400/2420 batches | train_acc 0.751 train_loss 0.22651
---------------------------------------------------------------------
| epoch 24 | time: 83.42s | valid_acc 0.711 valid_loss 0.263 | lr 0.010000
---------------------------------------------------------------------
| epoch 25 |  300/2420 batches | train_acc 0.748 train_loss 0.22368
| epoch 25 |  600/2420 batches | train_acc 0.757 train_loss 0.21353
| epoch 25 |  900/2420 batches | train_acc 0.766 train_loss 0.20579
| epoch 25 | 1200/2420 batches | train_acc 0.743 train_loss 0.22613
| epoch 25 | 1500/2420 batches | train_acc 0.782 train_loss 0.19220
| epoch 25 | 1800/2420 batches | train_acc 0.758 train_loss 0.20600
| epoch 25 | 2100/2420 batches | train_acc 0.762 train_loss 0.21032
| epoch 25 | 2400/2420 batches | train_acc 0.761 train_loss 0.21532
---------------------------------------------------------------------
| epoch 25 | time: 84.20s | valid_acc 0.705 valid_loss 0.272 | lr 0.010000
---------------------------------------------------------------------
| epoch 26 |  300/2420 batches | train_acc 0.772 train_loss 0.19795
| epoch 26 |  600/2420 batches | train_acc 0.773 train_loss 0.20005
| epoch 26 |  900/2420 batches | train_acc 0.752 train_loss 0.22276
| epoch 26 | 1200/2420 batches | train_acc 0.777 train_loss 0.19670
| epoch 26 | 1500/2420 batches | train_acc 0.776 train_loss 0.19337
| epoch 26 | 1800/2420 batches | train_acc 0.786 train_loss 0.19579
| epoch 26 | 2100/2420 batches | train_acc 0.752 train_loss 0.22032
| epoch 26 | 2400/2420 batches | train_acc 0.763 train_loss 0.21359
---------------------------------------------------------------------
| epoch 26 | time: 83.77s | valid_acc 0.712 valid_loss 0.256 | lr 0.010000
---------------------------------------------------------------------
| epoch 27 |  300/2420 batches | train_acc 0.790 train_loss 0.18600
| epoch 27 |  600/2420 batches | train_acc 0.794 train_loss 0.18248
| epoch 27 |  900/2420 batches | train_acc 0.777 train_loss 0.19377
| epoch 27 | 1200/2420 batches | train_acc 0.778 train_loss 0.19608
| epoch 27 | 1500/2420 batches | train_acc 0.779 train_loss 0.19710
| epoch 27 | 1800/2420 batches | train_acc 0.751 train_loss 0.21942
| epoch 27 | 2100/2420 batches | train_acc 0.775 train_loss 0.20445
| epoch 27 | 2400/2420 batches | train_acc 0.772 train_loss 0.20421
---------------------------------------------------------------------
| epoch 27 | time: 85.28s | valid_acc 0.729 valid_loss 0.241 | lr 0.010000
---------------------------------------------------------------------
| epoch 28 |  300/2420 batches | train_acc 0.802 train_loss 0.18074
| epoch 28 |  600/2420 batches | train_acc 0.770 train_loss 0.19999
| epoch 28 |  900/2420 batches | train_acc 0.797 train_loss 0.18595
| epoch 28 | 1200/2420 batches | train_acc 0.789 train_loss 0.19213
| epoch 28 | 1500/2420 batches | train_acc 0.788 train_loss 0.18830
| epoch 28 | 1800/2420 batches | train_acc 0.802 train_loss 0.18123
| epoch 28 | 2100/2420 batches | train_acc 0.771 train_loss 0.20442
| epoch 28 | 2400/2420 batches | train_acc 0.771 train_loss 0.19183
---------------------------------------------------------------------
| epoch 28 | time: 83.65s | valid_acc 0.721 valid_loss 0.250 | lr 0.010000
---------------------------------------------------------------------
| epoch 29 |  300/2420 batches | train_acc 0.798 train_loss 0.17485
| epoch 29 |  600/2420 batches | train_acc 0.782 train_loss 0.18292
| epoch 29 |  900/2420 batches | train_acc 0.812 train_loss 0.17578
| epoch 29 | 1200/2420 batches | train_acc 0.797 train_loss 0.18546
| epoch 29 | 1500/2420 batches | train_acc 0.774 train_loss 0.20004
| epoch 29 | 1800/2420 batches | train_acc 0.798 train_loss 0.18155
| epoch 29 | 2100/2420 batches | train_acc 0.802 train_loss 0.17889
| epoch 29 | 2400/2420 batches | train_acc 0.793 train_loss 0.18119
---------------------------------------------------------------------
| epoch 29 | time: 83.53s | valid_acc 0.739 valid_loss 0.235 | lr 0.010000
---------------------------------------------------------------------
| epoch 30 |  300/2420 batches | train_acc 0.811 train_loss 0.17348
| epoch 30 |  600/2420 batches | train_acc 0.807 train_loss 0.17723
| epoch 30 |  900/2420 batches | train_acc 0.806 train_loss 0.17811
| epoch 30 | 1200/2420 batches | train_acc 0.802 train_loss 0.18165
| epoch 30 | 1500/2420 batches | train_acc 0.792 train_loss 0.17804
| epoch 30 | 1800/2420 batches | train_acc 0.794 train_loss 0.18201
| epoch 30 | 2100/2420 batches | train_acc 0.805 train_loss 0.18108
| epoch 30 | 2400/2420 batches | train_acc 0.772 train_loss 0.19445
---------------------------------------------------------------------
| epoch 30 | time: 83.34s | valid_acc 0.736 valid_loss 0.241 | lr 0.010000
---------------------------------------------------------------------
| epoch 31 |  300/2420 batches | train_acc 0.795 train_loss 0.18230
| epoch 31 |  600/2420 batches | train_acc 0.800 train_loss 0.16432
| epoch 31 |  900/2420 batches | train_acc 0.823 train_loss 0.15572
| epoch 31 | 1200/2420 batches | train_acc 0.806 train_loss 0.17355
| epoch 31 | 1500/2420 batches | train_acc 0.788 train_loss 0.18789
| epoch 31 | 1800/2420 batches | train_acc 0.821 train_loss 0.16542
| epoch 31 | 2100/2420 batches | train_acc 0.800 train_loss 0.17999
| epoch 31 | 2400/2420 batches | train_acc 0.818 train_loss 0.16072
---------------------------------------------------------------------
| epoch 31 | time: 82.82s | valid_acc 0.723 valid_loss 0.248 | lr 0.010000
---------------------------------------------------------------------
| epoch 32 |  300/2420 batches | train_acc 0.814 train_loss 0.16909
| epoch 32 |  600/2420 batches | train_acc 0.807 train_loss 0.16509
| epoch 32 |  900/2420 batches | train_acc 0.820 train_loss 0.16708
| epoch 32 | 1200/2420 batches | train_acc 0.802 train_loss 0.17467
| epoch 32 | 1500/2420 batches | train_acc 0.802 train_loss 0.17725
| epoch 32 | 1800/2420 batches | train_acc 0.794 train_loss 0.18396
| epoch 32 | 2100/2420 batches | train_acc 0.827 train_loss 0.15054
| epoch 32 | 2400/2420 batches | train_acc 0.827 train_loss 0.15799
---------------------------------------------------------------------
| epoch 32 | time: 86.00s | valid_acc 0.750 valid_loss 0.239 | lr 0.010000
---------------------------------------------------------------------
| epoch 33 |  300/2420 batches | train_acc 0.826 train_loss 0.15211
| epoch 33 |  600/2420 batches | train_acc 0.813 train_loss 0.16635
| epoch 33 |  900/2420 batches | train_acc 0.814 train_loss 0.16284
| epoch 33 | 1200/2420 batches | train_acc 0.812 train_loss 0.16308
| epoch 33 | 1500/2420 batches | train_acc 0.812 train_loss 0.16584
| epoch 33 | 1800/2420 batches | train_acc 0.804 train_loss 0.16944
| epoch 33 | 2100/2420 batches | train_acc 0.815 train_loss 0.16779
| epoch 33 | 2400/2420 batches | train_acc 0.815 train_loss 0.17174
---------------------------------------------------------------------
| epoch 33 | time: 83.19s | valid_acc 0.745 valid_loss 0.238 | lr 0.010000
---------------------------------------------------------------------
| epoch 34 |  300/2420 batches | train_acc 0.835 train_loss 0.15056
| epoch 34 |  600/2420 batches | train_acc 0.842 train_loss 0.14200
| epoch 34 |  900/2420 batches | train_acc 0.818 train_loss 0.15810
| epoch 34 | 1200/2420 batches | train_acc 0.808 train_loss 0.15527
| epoch 34 | 1500/2420 batches | train_acc 0.802 train_loss 0.17560
| epoch 34 | 1800/2420 batches | train_acc 0.821 train_loss 0.16032
| epoch 34 | 2100/2420 batches | train_acc 0.812 train_loss 0.16979
| epoch 34 | 2400/2420 batches | train_acc 0.818 train_loss 0.15668
---------------------------------------------------------------------
| epoch 34 | time: 85.28s | valid_acc 0.742 valid_loss 0.261 | lr 0.010000
---------------------------------------------------------------------
| epoch 35 |  300/2420 batches | train_acc 0.826 train_loss 0.14948
| epoch 35 |  600/2420 batches | train_acc 0.831 train_loss 0.14942
| epoch 35 |  900/2420 batches | train_acc 0.833 train_loss 0.14918
| epoch 35 | 1200/2420 batches | train_acc 0.822 train_loss 0.15994
| epoch 35 | 1500/2420 batches | train_acc 0.823 train_loss 0.14684
| epoch 35 | 1800/2420 batches | train_acc 0.831 train_loss 0.14712
| epoch 35 | 2100/2420 batches | train_acc 0.830 train_loss 0.15662
| epoch 35 | 2400/2420 batches | train_acc 0.823 train_loss 0.15639
---------------------------------------------------------------------
| epoch 35 | time: 84.20s | valid_acc 0.731 valid_loss 0.250 | lr 0.010000
---------------------------------------------------------------------
| epoch 36 |  300/2420 batches | train_acc 0.822 train_loss 0.15163
| epoch 36 |  600/2420 batches | train_acc 0.825 train_loss 0.15594
| epoch 36 |  900/2420 batches | train_acc 0.821 train_loss 0.14903
| epoch 36 | 1200/2420 batches | train_acc 0.843 train_loss 0.13939
| epoch 36 | 1500/2420 batches | train_acc 0.846 train_loss 0.13995
| epoch 36 | 1800/2420 batches | train_acc 0.828 train_loss 0.16141
| epoch 36 | 2100/2420 batches | train_acc 0.822 train_loss 0.15839
| epoch 36 | 2400/2420 batches | train_acc 0.825 train_loss 0.15181
---------------------------------------------------------------------
| epoch 36 | time: 83.35s | valid_acc 0.727 valid_loss 0.256 | lr 0.010000
---------------------------------------------------------------------
| epoch 37 |  300/2420 batches | train_acc 0.838 train_loss 0.13922
| epoch 37 |  600/2420 batches | train_acc 0.848 train_loss 0.13031
| epoch 37 |  900/2420 batches | train_acc 0.848 train_loss 0.14068
| epoch 37 | 1200/2420 batches | train_acc 0.823 train_loss 0.15291
| epoch 37 | 1500/2420 batches | train_acc 0.829 train_loss 0.15638
| epoch 37 | 1800/2420 batches | train_acc 0.812 train_loss 0.15583
| epoch 37 | 2100/2420 batches | train_acc 0.823 train_loss 0.14788
| epoch 37 | 2400/2420 batches | train_acc 0.815 train_loss 0.15764
---------------------------------------------------------------------
| epoch 37 | time: 83.44s | valid_acc 0.757 valid_loss 0.236 | lr 0.010000
---------------------------------------------------------------------
| epoch 38 |  300/2420 batches | train_acc 0.842 train_loss 0.13803
| epoch 38 |  600/2420 batches | train_acc 0.852 train_loss 0.13690
| epoch 38 |  900/2420 batches | train_acc 0.824 train_loss 0.14710
| epoch 38 | 1200/2420 batches | train_acc 0.839 train_loss 0.14726
| epoch 38 | 1500/2420 batches | train_acc 0.836 train_loss 0.14197
| epoch 38 | 1800/2420 batches | train_acc 0.847 train_loss 0.13536
| epoch 38 | 2100/2420 batches | train_acc 0.821 train_loss 0.14906
| epoch 38 | 2400/2420 batches | train_acc 0.861 train_loss 0.13178
---------------------------------------------------------------------
| epoch 38 | time: 82.70s | valid_acc 0.732 valid_loss 0.269 | lr 0.010000
---------------------------------------------------------------------
| epoch 39 |  300/2420 batches | train_acc 0.860 train_loss 0.12524
| epoch 39 |  600/2420 batches | train_acc 0.870 train_loss 0.11515
| epoch 39 |  900/2420 batches | train_acc 0.835 train_loss 0.14201
| epoch 39 | 1200/2420 batches | train_acc 0.851 train_loss 0.12672
| epoch 39 | 1500/2420 batches | train_acc 0.848 train_loss 0.13800
| epoch 39 | 1800/2420 batches | train_acc 0.840 train_loss 0.15097
| epoch 39 | 2100/2420 batches | train_acc 0.840 train_loss 0.13632
| epoch 39 | 2400/2420 batches | train_acc 0.823 train_loss 0.15372
---------------------------------------------------------------------
| epoch 39 | time: 83.45s | valid_acc 0.753 valid_loss 0.233 | lr 0.010000
---------------------------------------------------------------------
| epoch 40 |  300/2420 batches | train_acc 0.879 train_loss 0.11595
| epoch 40 |  600/2420 batches | train_acc 0.853 train_loss 0.12571
| epoch 40 |  900/2420 batches | train_acc 0.832 train_loss 0.13827
| epoch 40 | 1200/2420 batches | train_acc 0.825 train_loss 0.14629
| epoch 40 | 1500/2420 batches | train_acc 0.846 train_loss 0.14264
| epoch 40 | 1800/2420 batches | train_acc 0.871 train_loss 0.12055
| epoch 40 | 2100/2420 batches | train_acc 0.836 train_loss 0.13732
| epoch 40 | 2400/2420 batches | train_acc 0.837 train_loss 0.14249
---------------------------------------------------------------------
| epoch 40 | time: 84.73s | valid_acc 0.713 valid_loss 0.308 | lr 0.010000
---------------------------------------------------------------------
| epoch 41 |  300/2420 batches | train_acc 0.833 train_loss 0.14336
| epoch 41 |  600/2420 batches | train_acc 0.857 train_loss 0.12880
| epoch 41 |  900/2420 batches | train_acc 0.860 train_loss 0.11698
| epoch 41 | 1200/2420 batches | train_acc 0.847 train_loss 0.13717
| epoch 41 | 1500/2420 batches | train_acc 0.838 train_loss 0.13810
| epoch 41 | 1800/2420 batches | train_acc 0.856 train_loss 0.13185
| epoch 41 | 2100/2420 batches | train_acc 0.848 train_loss 0.12859
| epoch 41 | 2400/2420 batches | train_acc 0.847 train_loss 0.13721
---------------------------------------------------------------------
| epoch 41 | time: 83.15s | valid_acc 0.756 valid_loss 0.244 | lr 0.010000
---------------------------------------------------------------------
| epoch 42 |  300/2420 batches | train_acc 0.866 train_loss 0.12032
| epoch 42 |  600/2420 batches | train_acc 0.868 train_loss 0.11196
| epoch 42 |  900/2420 batches | train_acc 0.838 train_loss 0.13773
| epoch 42 | 1200/2420 batches | train_acc 0.854 train_loss 0.12956
| epoch 42 | 1500/2420 batches | train_acc 0.846 train_loss 0.13400
| epoch 42 | 1800/2420 batches | train_acc 0.868 train_loss 0.11811
| epoch 42 | 2100/2420 batches | train_acc 0.838 train_loss 0.13654
| epoch 42 | 2400/2420 batches | train_acc 0.857 train_loss 0.12734
---------------------------------------------------------------------
| epoch 42 | time: 83.08s | valid_acc 0.756 valid_loss 0.236 | lr 0.010000
---------------------------------------------------------------------
| epoch 43 |  300/2420 batches | train_acc 0.882 train_loss 0.10326
| epoch 43 |  600/2420 batches | train_acc 0.877 train_loss 0.11144
| epoch 43 |  900/2420 batches | train_acc 0.856 train_loss 0.12526
| epoch 43 | 1200/2420 batches | train_acc 0.868 train_loss 0.11544
| epoch 43 | 1500/2420 batches | train_acc 0.853 train_loss 0.13788
| epoch 43 | 1800/2420 batches | train_acc 0.852 train_loss 0.13450
| epoch 43 | 2100/2420 batches | train_acc 0.868 train_loss 0.11786
| epoch 43 | 2400/2420 batches | train_acc 0.854 train_loss 0.13193
---------------------------------------------------------------------
| epoch 43 | time: 83.08s | valid_acc 0.765 valid_loss 0.223 | lr 0.010000
---------------------------------------------------------------------
| epoch 44 |  300/2420 batches | train_acc 0.874 train_loss 0.10205
| epoch 44 |  600/2420 batches | train_acc 0.869 train_loss 0.12368
| epoch 44 |  900/2420 batches | train_acc 0.874 train_loss 0.10740
| epoch 44 | 1200/2420 batches | train_acc 0.869 train_loss 0.11505
| epoch 44 | 1500/2420 batches | train_acc 0.868 train_loss 0.11295
| epoch 44 | 1800/2420 batches | train_acc 0.855 train_loss 0.13161
| epoch 44 | 2100/2420 batches | train_acc 0.874 train_loss 0.11385
| epoch 44 | 2400/2420 batches | train_acc 0.819 train_loss 0.15840
---------------------------------------------------------------------
| epoch 44 | time: 82.94s | valid_acc 0.759 valid_loss 0.231 | lr 0.010000
---------------------------------------------------------------------
| epoch 45 |  300/2420 batches | train_acc 0.862 train_loss 0.11530
| epoch 45 |  600/2420 batches | train_acc 0.899 train_loss 0.09283
| epoch 45 |  900/2420 batches | train_acc 0.872 train_loss 0.11314
| epoch 45 | 1200/2420 batches | train_acc 0.871 train_loss 0.11460
| epoch 45 | 1500/2420 batches | train_acc 0.874 train_loss 0.11236
| epoch 45 | 1800/2420 batches | train_acc 0.848 train_loss 0.13209
| epoch 45 | 2100/2420 batches | train_acc 0.829 train_loss 0.13464
| epoch 45 | 2400/2420 batches | train_acc 0.859 train_loss 0.11837
---------------------------------------------------------------------
| epoch 45 | time: 83.08s | valid_acc 0.749 valid_loss 0.252 | lr 0.010000
---------------------------------------------------------------------
| epoch 46 |  300/2420 batches | train_acc 0.890 train_loss 0.09126
| epoch 46 |  600/2420 batches | train_acc 0.888 train_loss 0.09891
| epoch 46 |  900/2420 batches | train_acc 0.873 train_loss 0.10826
| epoch 46 | 1200/2420 batches | train_acc 0.862 train_loss 0.11587
| epoch 46 | 1500/2420 batches | train_acc 0.863 train_loss 0.11633
| epoch 46 | 1800/2420 batches | train_acc 0.861 train_loss 0.12253
| epoch 46 | 2100/2420 batches | train_acc 0.885 train_loss 0.10478
| epoch 46 | 2400/2420 batches | train_acc 0.859 train_loss 0.11999
---------------------------------------------------------------------
| epoch 46 | time: 83.13s | valid_acc 0.764 valid_loss 0.254 | lr 0.010000
---------------------------------------------------------------------
| epoch 47 |  300/2420 batches | train_acc 0.880 train_loss 0.10184
| epoch 47 |  600/2420 batches | train_acc 0.875 train_loss 0.10861
| epoch 47 |  900/2420 batches | train_acc 0.873 train_loss 0.10799
| epoch 47 | 1200/2420 batches | train_acc 0.871 train_loss 0.11262
| epoch 47 | 1500/2420 batches | train_acc 0.860 train_loss 0.11797
| epoch 47 | 1800/2420 batches | train_acc 0.877 train_loss 0.10575
| epoch 47 | 2100/2420 batches | train_acc 0.881 train_loss 0.10026
| epoch 47 | 2400/2420 batches | train_acc 0.864 train_loss 0.11927
---------------------------------------------------------------------
| epoch 47 | time: 85.22s | valid_acc 0.779 valid_loss 0.243 | lr 0.010000
---------------------------------------------------------------------
| epoch 48 |  300/2420 batches | train_acc 0.901 train_loss 0.07928
| epoch 48 |  600/2420 batches | train_acc 0.876 train_loss 0.09973
| epoch 48 |  900/2420 batches | train_acc 0.877 train_loss 0.11181
| epoch 48 | 1200/2420 batches | train_acc 0.882 train_loss 0.10271
| epoch 48 | 1500/2420 batches | train_acc 0.885 train_loss 0.09556
| epoch 48 | 1800/2420 batches | train_acc 0.867 train_loss 0.11364
| epoch 48 | 2100/2420 batches | train_acc 0.862 train_loss 0.11825
| epoch 48 | 2400/2420 batches | train_acc 0.887 train_loss 0.10267
---------------------------------------------------------------------
| epoch 48 | time: 85.01s | valid_acc 0.764 valid_loss 0.252 | lr 0.010000
---------------------------------------------------------------------
| epoch 49 |  300/2420 batches | train_acc 0.895 train_loss 0.09962
| epoch 49 |  600/2420 batches | train_acc 0.877 train_loss 0.10977
| epoch 49 |  900/2420 batches | train_acc 0.878 train_loss 0.10843
| epoch 49 | 1200/2420 batches | train_acc 0.881 train_loss 0.10609
| epoch 49 | 1500/2420 batches | train_acc 0.894 train_loss 0.09679
| epoch 49 | 1800/2420 batches | train_acc 0.888 train_loss 0.10495
| epoch 49 | 2100/2420 batches | train_acc 0.883 train_loss 0.09637
| epoch 49 | 2400/2420 batches | train_acc 0.877 train_loss 0.10460
---------------------------------------------------------------------
| epoch 49 | time: 82.73s | valid_acc 0.760 valid_loss 0.257 | lr 0.010000
---------------------------------------------------------------------
| epoch 50 |  300/2420 batches | train_acc 0.890 train_loss 0.09739
| epoch 50 |  600/2420 batches | train_acc 0.900 train_loss 0.08256
| epoch 50 |  900/2420 batches | train_acc 0.885 train_loss 0.10728
| epoch 50 | 1200/2420 batches | train_acc 0.867 train_loss 0.10743
| epoch 50 | 1500/2420 batches | train_acc 0.882 train_loss 0.10503
| epoch 50 | 1800/2420 batches | train_acc 0.882 train_loss 0.10211
| epoch 50 | 2100/2420 batches | train_acc 0.876 train_loss 0.09971
| epoch 50 | 2400/2420 batches | train_acc 0.877 train_loss 0.10484
---------------------------------------------------------------------
| epoch 50 | time: 82.73s | valid_acc 0.761 valid_loss 0.237 | lr 0.010000
---------------------------------------------------------------------
| epoch 51 |  300/2420 batches | train_acc 0.900 train_loss 0.08516
| epoch 51 |  600/2420 batches | train_acc 0.873 train_loss 0.10698
| epoch 51 |  900/2420 batches | train_acc 0.890 train_loss 0.10267
| epoch 51 | 1200/2420 batches | train_acc 0.906 train_loss 0.07914
| epoch 51 | 1500/2420 batches | train_acc 0.889 train_loss 0.09404
| epoch 51 | 1800/2420 batches | train_acc 0.879 train_loss 0.10481
| epoch 51 | 2100/2420 batches | train_acc 0.880 train_loss 0.10114
| epoch 51 | 2400/2420 batches | train_acc 0.885 train_loss 0.09714
---------------------------------------------------------------------
| epoch 51 | time: 81.16s | valid_acc 0.731 valid_loss 0.283 | lr 0.010000
---------------------------------------------------------------------
| epoch 52 |  300/2420 batches | train_acc 0.911 train_loss 0.07339
| epoch 52 |  600/2420 batches | train_acc 0.894 train_loss 0.09092
| epoch 52 |  900/2420 batches | train_acc 0.907 train_loss 0.08407
| epoch 52 | 1200/2420 batches | train_acc 0.877 train_loss 0.10039
| epoch 52 | 1500/2420 batches | train_acc 0.885 train_loss 0.10036
| epoch 52 | 1800/2420 batches | train_acc 0.867 train_loss 0.11025
| epoch 52 | 2100/2420 batches | train_acc 0.880 train_loss 0.10413
| epoch 52 | 2400/2420 batches | train_acc 0.897 train_loss 0.08787
---------------------------------------------------------------------
| epoch 52 | time: 81.06s | valid_acc 0.777 valid_loss 0.256 | lr 0.010000
---------------------------------------------------------------------
| epoch 53 |  300/2420 batches | train_acc 0.898 train_loss 0.08878
| epoch 53 |  600/2420 batches | train_acc 0.916 train_loss 0.07625
| epoch 53 |  900/2420 batches | train_acc 0.891 train_loss 0.09333
| epoch 53 | 1200/2420 batches | train_acc 0.897 train_loss 0.09014
| epoch 53 | 1500/2420 batches | train_acc 0.892 train_loss 0.09336
| epoch 53 | 1800/2420 batches | train_acc 0.894 train_loss 0.09973
| epoch 53 | 2100/2420 batches | train_acc 0.895 train_loss 0.09534
| epoch 53 | 2400/2420 batches | train_acc 0.894 train_loss 0.08308
---------------------------------------------------------------------
| epoch 53 | time: 82.91s | valid_acc 0.794 valid_loss 0.225 | lr 0.010000
---------------------------------------------------------------------
| epoch 54 |  300/2420 batches | train_acc 0.918 train_loss 0.07868
| epoch 54 |  600/2420 batches | train_acc 0.904 train_loss 0.08181
| epoch 54 |  900/2420 batches | train_acc 0.890 train_loss 0.09652
| epoch 54 | 1200/2420 batches | train_acc 0.901 train_loss 0.08894
| epoch 54 | 1500/2420 batches | train_acc 0.907 train_loss 0.08254
| epoch 54 | 1800/2420 batches | train_acc 0.912 train_loss 0.08605
| epoch 54 | 2100/2420 batches | train_acc 0.887 train_loss 0.09603
| epoch 54 | 2400/2420 batches | train_acc 0.903 train_loss 0.08372
---------------------------------------------------------------------
| epoch 54 | time: 80.81s | valid_acc 0.770 valid_loss 0.249 | lr 0.010000
---------------------------------------------------------------------
| epoch 55 |  300/2420 batches | train_acc 0.916 train_loss 0.07836
| epoch 55 |  600/2420 batches | train_acc 0.914 train_loss 0.07156
| epoch 55 |  900/2420 batches | train_acc 0.910 train_loss 0.07900
| epoch 55 | 1200/2420 batches | train_acc 0.908 train_loss 0.08146
| epoch 55 | 1500/2420 batches | train_acc 0.896 train_loss 0.09399
| epoch 55 | 1800/2420 batches | train_acc 0.900 train_loss 0.08169
| epoch 55 | 2100/2420 batches | train_acc 0.894 train_loss 0.09631
| epoch 55 | 2400/2420 batches | train_acc 0.923 train_loss 0.06935
---------------------------------------------------------------------
| epoch 55 | time: 81.83s | valid_acc 0.777 valid_loss 0.264 | lr 0.010000
---------------------------------------------------------------------
| epoch 56 |  300/2420 batches | train_acc 0.925 train_loss 0.07211
| epoch 56 |  600/2420 batches | train_acc 0.914 train_loss 0.07324
| epoch 56 |  900/2420 batches | train_acc 0.905 train_loss 0.08824
| epoch 56 | 1200/2420 batches | train_acc 0.897 train_loss 0.08899
| epoch 56 | 1500/2420 batches | train_acc 0.921 train_loss 0.07203
| epoch 56 | 1800/2420 batches | train_acc 0.901 train_loss 0.07464
| epoch 56 | 2100/2420 batches | train_acc 0.892 train_loss 0.09230
| epoch 56 | 2400/2420 batches | train_acc 0.908 train_loss 0.08379
---------------------------------------------------------------------
| epoch 56 | time: 80.92s | valid_acc 0.793 valid_loss 0.230 | lr 0.010000
---------------------------------------------------------------------
| epoch 57 |  300/2420 batches | train_acc 0.926 train_loss 0.06351
| epoch 57 |  600/2420 batches | train_acc 0.920 train_loss 0.07490
| epoch 57 |  900/2420 batches | train_acc 0.907 train_loss 0.08814
| epoch 57 | 1200/2420 batches | train_acc 0.905 train_loss 0.08854
| epoch 57 | 1500/2420 batches | train_acc 0.902 train_loss 0.08498
| epoch 57 | 1800/2420 batches | train_acc 0.897 train_loss 0.08369
| epoch 57 | 2100/2420 batches | train_acc 0.890 train_loss 0.09669
| epoch 57 | 2400/2420 batches | train_acc 0.898 train_loss 0.08944
---------------------------------------------------------------------
| epoch 57 | time: 80.95s | valid_acc 0.776 valid_loss 0.240 | lr 0.010000
---------------------------------------------------------------------
| epoch 58 |  300/2420 batches | train_acc 0.921 train_loss 0.06808
| epoch 58 |  600/2420 batches | train_acc 0.905 train_loss 0.07949
| epoch 58 |  900/2420 batches | train_acc 0.908 train_loss 0.07559
| epoch 58 | 1200/2420 batches | train_acc 0.894 train_loss 0.09134
| epoch 58 | 1500/2420 batches | train_acc 0.914 train_loss 0.07321
| epoch 58 | 1800/2420 batches | train_acc 0.903 train_loss 0.08555
| epoch 58 | 2100/2420 batches | train_acc 0.884 train_loss 0.10364
| epoch 58 | 2400/2420 batches | train_acc 0.897 train_loss 0.09274
---------------------------------------------------------------------
| epoch 58 | time: 81.52s | valid_acc 0.773 valid_loss 0.262 | lr 0.010000
---------------------------------------------------------------------
| epoch 59 |  300/2420 batches | train_acc 0.924 train_loss 0.06483
| epoch 59 |  600/2420 batches | train_acc 0.922 train_loss 0.06247
| epoch 59 |  900/2420 batches | train_acc 0.898 train_loss 0.09298
| epoch 59 | 1200/2420 batches | train_acc 0.912 train_loss 0.07381
| epoch 59 | 1500/2420 batches | train_acc 0.907 train_loss 0.08552
| epoch 59 | 1800/2420 batches | train_acc 0.912 train_loss 0.07757
| epoch 59 | 2100/2420 batches | train_acc 0.931 train_loss 0.06169
| epoch 59 | 2400/2420 batches | train_acc 0.911 train_loss 0.07805
---------------------------------------------------------------------
| epoch 59 | time: 81.19s | valid_acc 0.785 valid_loss 0.268 | lr 0.010000
---------------------------------------------------------------------
| epoch 60 |  300/2420 batches | train_acc 0.916 train_loss 0.06315
| epoch 60 |  600/2420 batches | train_acc 0.915 train_loss 0.07857
| epoch 60 |  900/2420 batches | train_acc 0.909 train_loss 0.07548
| epoch 60 | 1200/2420 batches | train_acc 0.928 train_loss 0.06380
| epoch 60 | 1500/2420 batches | train_acc 0.920 train_loss 0.06902
| epoch 60 | 1800/2420 batches | train_acc 0.913 train_loss 0.07780
| epoch 60 | 2100/2420 batches | train_acc 0.918 train_loss 0.07845
| epoch 60 | 2400/2420 batches | train_acc 0.917 train_loss 0.07457
---------------------------------------------------------------------
| epoch 60 | time: 82.98s | valid_acc 0.769 valid_loss 0.276 | lr 0.010000
---------------------------------------------------------------------
| epoch 61 |  300/2420 batches | train_acc 0.922 train_loss 0.07288
| epoch 61 |  600/2420 batches | train_acc 0.926 train_loss 0.06420
| epoch 61 |  900/2420 batches | train_acc 0.916 train_loss 0.07647
| epoch 61 | 1200/2420 batches | train_acc 0.913 train_loss 0.07901
| epoch 61 | 1500/2420 batches | train_acc 0.910 train_loss 0.07707
| epoch 61 | 1800/2420 batches | train_acc 0.917 train_loss 0.06734
| epoch 61 | 2100/2420 batches | train_acc 0.893 train_loss 0.08693
| epoch 61 | 2400/2420 batches | train_acc 0.930 train_loss 0.06399
---------------------------------------------------------------------
| epoch 61 | time: 81.05s | valid_acc 0.783 valid_loss 0.263 | lr 0.010000
---------------------------------------------------------------------
| epoch 62 |  300/2420 batches | train_acc 0.921 train_loss 0.06561
| epoch 62 |  600/2420 batches | train_acc 0.926 train_loss 0.06935
| epoch 62 |  900/2420 batches | train_acc 0.919 train_loss 0.07428
| epoch 62 | 1200/2420 batches | train_acc 0.921 train_loss 0.06815
| epoch 62 | 1500/2420 batches | train_acc 0.913 train_loss 0.07114
| epoch 62 | 1800/2420 batches | train_acc 0.917 train_loss 0.06813
| epoch 62 | 2100/2420 batches | train_acc 0.911 train_loss 0.07596
| epoch 62 | 2400/2420 batches | train_acc 0.921 train_loss 0.07096
---------------------------------------------------------------------
| epoch 62 | time: 81.41s | valid_acc 0.782 valid_loss 0.276 | lr 0.010000
---------------------------------------------------------------------
| epoch 63 |  300/2420 batches | train_acc 0.941 train_loss 0.05660
| epoch 63 |  600/2420 batches | train_acc 0.937 train_loss 0.05008
| epoch 63 |  900/2420 batches | train_acc 0.924 train_loss 0.06738
| epoch 63 | 1200/2420 batches | train_acc 0.930 train_loss 0.06001
| epoch 63 | 1500/2420 batches | train_acc 0.922 train_loss 0.07476
| epoch 63 | 1800/2420 batches | train_acc 0.910 train_loss 0.08664
| epoch 63 | 2100/2420 batches | train_acc 0.902 train_loss 0.08759
| epoch 63 | 2400/2420 batches | train_acc 0.930 train_loss 0.06486
---------------------------------------------------------------------
| epoch 63 | time: 80.89s | valid_acc 0.771 valid_loss 0.234 | lr 0.010000
---------------------------------------------------------------------
| epoch 64 |  300/2420 batches | train_acc 0.913 train_loss 0.07576
| epoch 64 |  600/2420 batches | train_acc 0.946 train_loss 0.04881
| epoch 64 |  900/2420 batches | train_acc 0.936 train_loss 0.05455
| epoch 64 | 1200/2420 batches | train_acc 0.909 train_loss 0.08166
| epoch 64 | 1500/2420 batches | train_acc 0.932 train_loss 0.05924
| epoch 64 | 1800/2420 batches | train_acc 0.919 train_loss 0.06563
| epoch 64 | 2100/2420 batches | train_acc 0.915 train_loss 0.07134
| epoch 64 | 2400/2420 batches | train_acc 0.930 train_loss 0.06113
---------------------------------------------------------------------
| epoch 64 | time: 81.84s | valid_acc 0.770 valid_loss 0.246 | lr 0.010000
---------------------------------------------------------------------
| epoch 65 |  300/2420 batches | train_acc 0.934 train_loss 0.06279
| epoch 65 |  600/2420 batches | train_acc 0.923 train_loss 0.07141
| epoch 65 |  900/2420 batches | train_acc 0.947 train_loss 0.04880
| epoch 65 | 1200/2420 batches | train_acc 0.943 train_loss 0.05144
| epoch 65 | 1500/2420 batches | train_acc 0.925 train_loss 0.06744
| epoch 65 | 1800/2420 batches | train_acc 0.900 train_loss 0.09185
| epoch 65 | 2100/2420 batches | train_acc 0.911 train_loss 0.08608
| epoch 65 | 2400/2420 batches | train_acc 0.933 train_loss 0.06248
---------------------------------------------------------------------
| epoch 65 | time: 81.28s | valid_acc 0.786 valid_loss 0.253 | lr 0.010000
---------------------------------------------------------------------
| epoch 66 |  300/2420 batches | train_acc 0.939 train_loss 0.05374
| epoch 66 |  600/2420 batches | train_acc 0.942 train_loss 0.05049
| epoch 66 |  900/2420 batches | train_acc 0.922 train_loss 0.07498
| epoch 66 | 1200/2420 batches | train_acc 0.912 train_loss 0.08230
| epoch 66 | 1500/2420 batches | train_acc 0.923 train_loss 0.06609
| epoch 66 | 1800/2420 batches | train_acc 0.926 train_loss 0.05900
| epoch 66 | 2100/2420 batches | train_acc 0.918 train_loss 0.07689
| epoch 66 | 2400/2420 batches | train_acc 0.938 train_loss 0.05863
---------------------------------------------------------------------
| epoch 66 | time: 80.62s | valid_acc 0.779 valid_loss 0.269 | lr 0.010000
---------------------------------------------------------------------
| epoch 67 |  300/2420 batches | train_acc 0.933 train_loss 0.06135
| epoch 67 |  600/2420 batches | train_acc 0.930 train_loss 0.06048
| epoch 67 |  900/2420 batches | train_acc 0.927 train_loss 0.06220
| epoch 67 | 1200/2420 batches | train_acc 0.932 train_loss 0.06628
| epoch 67 | 1500/2420 batches | train_acc 0.925 train_loss 0.06801
| epoch 67 | 1800/2420 batches | train_acc 0.922 train_loss 0.07198
| epoch 67 | 2100/2420 batches | train_acc 0.936 train_loss 0.05591
| epoch 67 | 2400/2420 batches | train_acc 0.933 train_loss 0.05686
---------------------------------------------------------------------
| epoch 67 | time: 82.98s | valid_acc 0.803 valid_loss 0.242 | lr 0.010000
---------------------------------------------------------------------
| epoch 68 |  300/2420 batches | train_acc 0.949 train_loss 0.04409
| epoch 68 |  600/2420 batches | train_acc 0.950 train_loss 0.04891
| epoch 68 |  900/2420 batches | train_acc 0.927 train_loss 0.06206
| epoch 68 | 1200/2420 batches | train_acc 0.941 train_loss 0.04996
| epoch 68 | 1500/2420 batches | train_acc 0.939 train_loss 0.05460
| epoch 68 | 1800/2420 batches | train_acc 0.949 train_loss 0.04824
| epoch 68 | 2100/2420 batches | train_acc 0.939 train_loss 0.05672
| epoch 68 | 2400/2420 batches | train_acc 0.930 train_loss 0.06677
---------------------------------------------------------------------
| epoch 68 | time: 81.30s | valid_acc 0.791 valid_loss 0.253 | lr 0.010000
---------------------------------------------------------------------
| epoch 69 |  300/2420 batches | train_acc 0.963 train_loss 0.04278
| epoch 69 |  600/2420 batches | train_acc 0.939 train_loss 0.05643
| epoch 69 |  900/2420 batches | train_acc 0.926 train_loss 0.06480
| epoch 69 | 1200/2420 batches | train_acc 0.918 train_loss 0.06928
| epoch 69 | 1500/2420 batches | train_acc 0.922 train_loss 0.06915
| epoch 69 | 1800/2420 batches | train_acc 0.932 train_loss 0.06473
| epoch 69 | 2100/2420 batches | train_acc 0.922 train_loss 0.06379
| epoch 69 | 2400/2420 batches | train_acc 0.913 train_loss 0.07706
---------------------------------------------------------------------
| epoch 69 | time: 82.36s | valid_acc 0.788 valid_loss 0.252 | lr 0.010000
---------------------------------------------------------------------
| epoch 70 |  300/2420 batches | train_acc 0.949 train_loss 0.04331
| epoch 70 |  600/2420 batches | train_acc 0.935 train_loss 0.05568
| epoch 70 |  900/2420 batches | train_acc 0.918 train_loss 0.07939
| epoch 70 | 1200/2420 batches | train_acc 0.926 train_loss 0.06634
| epoch 70 | 1500/2420 batches | train_acc 0.943 train_loss 0.05687
| epoch 70 | 1800/2420 batches | train_acc 0.934 train_loss 0.05636
| epoch 70 | 2100/2420 batches | train_acc 0.933 train_loss 0.06485
| epoch 70 | 2400/2420 batches | train_acc 0.921 train_loss 0.07313
---------------------------------------------------------------------
| epoch 70 | time: 82.19s | valid_acc 0.755 valid_loss 0.322 | lr 0.010000
---------------------------------------------------------------------
| epoch 71 |  300/2420 batches | train_acc 0.944 train_loss 0.05253
| epoch 71 |  600/2420 batches | train_acc 0.928 train_loss 0.05949
| epoch 71 |  900/2420 batches | train_acc 0.932 train_loss 0.06402
| epoch 71 | 1200/2420 batches | train_acc 0.954 train_loss 0.04713
| epoch 71 | 1500/2420 batches | train_acc 0.916 train_loss 0.06499
| epoch 71 | 1800/2420 batches | train_acc 0.936 train_loss 0.05509
| epoch 71 | 2100/2420 batches | train_acc 0.941 train_loss 0.05373
| epoch 71 | 2400/2420 batches | train_acc 0.927 train_loss 0.06966
---------------------------------------------------------------------
| epoch 71 | time: 81.61s | valid_acc 0.789 valid_loss 0.258 | lr 0.010000
---------------------------------------------------------------------
| epoch 72 |  300/2420 batches | train_acc 0.957 train_loss 0.04377
| epoch 72 |  600/2420 batches | train_acc 0.954 train_loss 0.04152
| epoch 72 |  900/2420 batches | train_acc 0.945 train_loss 0.04506
| epoch 72 | 1200/2420 batches | train_acc 0.932 train_loss 0.06295
| epoch 72 | 1500/2420 batches | train_acc 0.944 train_loss 0.04862
| epoch 72 | 1800/2420 batches | train_acc 0.930 train_loss 0.05745
| epoch 72 | 2100/2420 batches | train_acc 0.946 train_loss 0.04683
| epoch 72 | 2400/2420 batches | train_acc 0.954 train_loss 0.04157
---------------------------------------------------------------------
| epoch 72 | time: 80.98s | valid_acc 0.798 valid_loss 0.262 | lr 0.010000
---------------------------------------------------------------------
| epoch 73 |  300/2420 batches | train_acc 0.953 train_loss 0.04062
| epoch 73 |  600/2420 batches | train_acc 0.950 train_loss 0.04890
| epoch 73 |  900/2420 batches | train_acc 0.915 train_loss 0.07666
| epoch 73 | 1200/2420 batches | train_acc 0.944 train_loss 0.05104
| epoch 73 | 1500/2420 batches | train_acc 0.924 train_loss 0.07236
| epoch 73 | 1800/2420 batches | train_acc 0.930 train_loss 0.05682
| epoch 73 | 2100/2420 batches | train_acc 0.941 train_loss 0.05178
| epoch 73 | 2400/2420 batches | train_acc 0.942 train_loss 0.05401
---------------------------------------------------------------------
| epoch 73 | time: 83.86s | valid_acc 0.790 valid_loss 0.266 | lr 0.010000
---------------------------------------------------------------------
| epoch 74 |  300/2420 batches | train_acc 0.944 train_loss 0.04933
| epoch 74 |  600/2420 batches | train_acc 0.957 train_loss 0.03281
| epoch 74 |  900/2420 batches | train_acc 0.946 train_loss 0.04826
| epoch 74 | 1200/2420 batches | train_acc 0.951 train_loss 0.04535
| epoch 74 | 1500/2420 batches | train_acc 0.954 train_loss 0.04255
| epoch 74 | 1800/2420 batches | train_acc 0.946 train_loss 0.05082
| epoch 74 | 2100/2420 batches | train_acc 0.927 train_loss 0.06743
| epoch 74 | 2400/2420 batches | train_acc 0.914 train_loss 0.07799
---------------------------------------------------------------------
| epoch 74 | time: 81.50s | valid_acc 0.794 valid_loss 0.242 | lr 0.010000
---------------------------------------------------------------------
| epoch 75 |  300/2420 batches | train_acc 0.932 train_loss 0.07306
| epoch 75 |  600/2420 batches | train_acc 0.924 train_loss 0.06987
| epoch 75 |  900/2420 batches | train_acc 0.942 train_loss 0.05354
| epoch 75 | 1200/2420 batches | train_acc 0.947 train_loss 0.05166
| epoch 75 | 1500/2420 batches | train_acc 0.932 train_loss 0.06066
| epoch 75 | 1800/2420 batches | train_acc 0.922 train_loss 0.06874
| epoch 75 | 2100/2420 batches | train_acc 0.935 train_loss 0.06380
| epoch 75 | 2400/2420 batches | train_acc 0.939 train_loss 0.04793
---------------------------------------------------------------------
| epoch 75 | time: 82.05s | valid_acc 0.743 valid_loss 0.312 | lr 0.010000
---------------------------------------------------------------------
| epoch 76 |  300/2420 batches | train_acc 0.951 train_loss 0.04294
| epoch 76 |  600/2420 batches | train_acc 0.948 train_loss 0.04888
| epoch 76 |  900/2420 batches | train_acc 0.949 train_loss 0.05162
| epoch 76 | 1200/2420 batches | train_acc 0.946 train_loss 0.04870
| epoch 76 | 1500/2420 batches | train_acc 0.953 train_loss 0.04120
| epoch 76 | 1800/2420 batches | train_acc 0.948 train_loss 0.04565
| epoch 76 | 2100/2420 batches | train_acc 0.925 train_loss 0.06437
| epoch 76 | 2400/2420 batches | train_acc 0.953 train_loss 0.04467
---------------------------------------------------------------------
| epoch 76 | time: 80.37s | valid_acc 0.799 valid_loss 0.270 | lr 0.010000
---------------------------------------------------------------------
| epoch 77 |  300/2420 batches | train_acc 0.954 train_loss 0.04750
| epoch 77 |  600/2420 batches | train_acc 0.960 train_loss 0.04160
| epoch 77 |  900/2420 batches | train_acc 0.943 train_loss 0.05258
| epoch 77 | 1200/2420 batches | train_acc 0.938 train_loss 0.06039
| epoch 77 | 1500/2420 batches | train_acc 0.937 train_loss 0.05521
| epoch 77 | 1800/2420 batches | train_acc 0.968 train_loss 0.03012
| epoch 77 | 2100/2420 batches | train_acc 0.954 train_loss 0.04110
| epoch 77 | 2400/2420 batches | train_acc 0.916 train_loss 0.07806
---------------------------------------------------------------------
| epoch 77 | time: 81.02s | valid_acc 0.795 valid_loss 0.248 | lr 0.010000
---------------------------------------------------------------------
| epoch 78 |  300/2420 batches | train_acc 0.959 train_loss 0.04072
| epoch 78 |  600/2420 batches | train_acc 0.938 train_loss 0.05479
| epoch 78 |  900/2420 batches | train_acc 0.938 train_loss 0.05694
| epoch 78 | 1200/2420 batches | train_acc 0.931 train_loss 0.06226
| epoch 78 | 1500/2420 batches | train_acc 0.938 train_loss 0.05981
| epoch 78 | 1800/2420 batches | train_acc 0.943 train_loss 0.04819
| epoch 78 | 2100/2420 batches | train_acc 0.929 train_loss 0.06151
| epoch 78 | 2400/2420 batches | train_acc 0.955 train_loss 0.04671
---------------------------------------------------------------------
| epoch 78 | time: 80.69s | valid_acc 0.776 valid_loss 0.264 | lr 0.010000
---------------------------------------------------------------------
| epoch 79 |  300/2420 batches | train_acc 0.954 train_loss 0.04263
| epoch 79 |  600/2420 batches | train_acc 0.941 train_loss 0.05204
| epoch 79 |  900/2420 batches | train_acc 0.955 train_loss 0.04191
| epoch 79 | 1200/2420 batches | train_acc 0.961 train_loss 0.03837
| epoch 79 | 1500/2420 batches | train_acc 0.968 train_loss 0.03201
| epoch 79 | 1800/2420 batches | train_acc 0.957 train_loss 0.04062
| epoch 79 | 2100/2420 batches | train_acc 0.956 train_loss 0.04040
| epoch 79 | 2400/2420 batches | train_acc 0.963 train_loss 0.03299
---------------------------------------------------------------------
| epoch 79 | time: 80.97s | valid_acc 0.796 valid_loss 0.280 | lr 0.010000
---------------------------------------------------------------------
| epoch 80 |  300/2420 batches | train_acc 0.962 train_loss 0.03409
| epoch 80 |  600/2420 batches | train_acc 0.955 train_loss 0.04004
| epoch 80 |  900/2420 batches | train_acc 0.963 train_loss 0.03365
| epoch 80 | 1200/2420 batches | train_acc 0.955 train_loss 0.04101
| epoch 80 | 1500/2420 batches | train_acc 0.953 train_loss 0.04053
| epoch 80 | 1800/2420 batches | train_acc 0.958 train_loss 0.04114
| epoch 80 | 2100/2420 batches | train_acc 0.953 train_loss 0.04739
| epoch 80 | 2400/2420 batches | train_acc 0.952 train_loss 0.04531
---------------------------------------------------------------------
| epoch 80 | time: 82.80s | valid_acc 0.796 valid_loss 0.259 | lr 0.010000
---------------------------------------------------------------------
| epoch 81 |  300/2420 batches | train_acc 0.952 train_loss 0.03755
| epoch 81 |  600/2420 batches | train_acc 0.946 train_loss 0.05063
| epoch 81 |  900/2420 batches | train_acc 0.943 train_loss 0.04992
| epoch 81 | 1200/2420 batches | train_acc 0.963 train_loss 0.03472
| epoch 81 | 1500/2420 batches | train_acc 0.932 train_loss 0.06841
| epoch 81 | 1800/2420 batches | train_acc 0.941 train_loss 0.05272
| epoch 81 | 2100/2420 batches | train_acc 0.931 train_loss 0.06627
| epoch 81 | 2400/2420 batches | train_acc 0.938 train_loss 0.05471
---------------------------------------------------------------------
| epoch 81 | time: 81.31s | valid_acc 0.803 valid_loss 0.263 | lr 0.010000
---------------------------------------------------------------------
| epoch 82 |  300/2420 batches | train_acc 0.970 train_loss 0.03158
| epoch 82 |  600/2420 batches | train_acc 0.956 train_loss 0.03843
| epoch 82 |  900/2420 batches | train_acc 0.961 train_loss 0.03785
| epoch 82 | 1200/2420 batches | train_acc 0.950 train_loss 0.04103
| epoch 82 | 1500/2420 batches | train_acc 0.943 train_loss 0.05644
| epoch 82 | 1800/2420 batches | train_acc 0.955 train_loss 0.03869
| epoch 82 | 2100/2420 batches | train_acc 0.946 train_loss 0.04829
| epoch 82 | 2400/2420 batches | train_acc 0.958 train_loss 0.03234
---------------------------------------------------------------------
| epoch 82 | time: 80.29s | valid_acc 0.806 valid_loss 0.255 | lr 0.010000
---------------------------------------------------------------------
| epoch 83 |  300/2420 batches | train_acc 0.962 train_loss 0.03743
| epoch 83 |  600/2420 batches | train_acc 0.953 train_loss 0.04388
| epoch 83 |  900/2420 batches | train_acc 0.963 train_loss 0.03126
| epoch 83 | 1200/2420 batches | train_acc 0.968 train_loss 0.03120
| epoch 83 | 1500/2420 batches | train_acc 0.953 train_loss 0.04372
| epoch 83 | 1800/2420 batches | train_acc 0.958 train_loss 0.03934
| epoch 83 | 2100/2420 batches | train_acc 0.927 train_loss 0.06562
| epoch 83 | 2400/2420 batches | train_acc 0.941 train_loss 0.05471
---------------------------------------------------------------------
| epoch 83 | time: 80.74s | valid_acc 0.804 valid_loss 0.264 | lr 0.010000
---------------------------------------------------------------------
| epoch 84 |  300/2420 batches | train_acc 0.960 train_loss 0.03130
| epoch 84 |  600/2420 batches | train_acc 0.958 train_loss 0.04253
| epoch 84 |  900/2420 batches | train_acc 0.968 train_loss 0.03181
| epoch 84 | 1200/2420 batches | train_acc 0.965 train_loss 0.03171
| epoch 84 | 1500/2420 batches | train_acc 0.952 train_loss 0.04207
| epoch 84 | 1800/2420 batches | train_acc 0.949 train_loss 0.04716
| epoch 84 | 2100/2420 batches | train_acc 0.953 train_loss 0.03981
| epoch 84 | 2400/2420 batches | train_acc 0.949 train_loss 0.04419
---------------------------------------------------------------------
| epoch 84 | time: 81.69s | valid_acc 0.788 valid_loss 0.282 | lr 0.010000
---------------------------------------------------------------------
| epoch 85 |  300/2420 batches | train_acc 0.946 train_loss 0.04681
| epoch 85 |  600/2420 batches | train_acc 0.948 train_loss 0.04537
| epoch 85 |  900/2420 batches | train_acc 0.952 train_loss 0.04129
| epoch 85 | 1200/2420 batches | train_acc 0.954 train_loss 0.04309
| epoch 85 | 1500/2420 batches | train_acc 0.949 train_loss 0.04646
| epoch 85 | 1800/2420 batches | train_acc 0.954 train_loss 0.04112
| epoch 85 | 2100/2420 batches | train_acc 0.960 train_loss 0.04051
| epoch 85 | 2400/2420 batches | train_acc 0.961 train_loss 0.03649
---------------------------------------------------------------------
| epoch 85 | time: 80.73s | valid_acc 0.807 valid_loss 0.269 | lr 0.010000
---------------------------------------------------------------------
| epoch 86 |  300/2420 batches | train_acc 0.963 train_loss 0.03688
| epoch 86 |  600/2420 batches | train_acc 0.976 train_loss 0.02525
| epoch 86 |  900/2420 batches | train_acc 0.954 train_loss 0.04125
| epoch 86 | 1200/2420 batches | train_acc 0.945 train_loss 0.05591
| epoch 86 | 1500/2420 batches | train_acc 0.953 train_loss 0.04531
| epoch 86 | 1800/2420 batches | train_acc 0.949 train_loss 0.04184
| epoch 86 | 2100/2420 batches | train_acc 0.972 train_loss 0.02967
| epoch 86 | 2400/2420 batches | train_acc 0.948 train_loss 0.04695
---------------------------------------------------------------------
| epoch 86 | time: 80.82s | valid_acc 0.800 valid_loss 0.257 | lr 0.010000
---------------------------------------------------------------------
| epoch 87 |  300/2420 batches | train_acc 0.971 train_loss 0.02505
| epoch 87 |  600/2420 batches | train_acc 0.951 train_loss 0.04329
| epoch 87 |  900/2420 batches | train_acc 0.954 train_loss 0.03493
| epoch 87 | 1200/2420 batches | train_acc 0.958 train_loss 0.03560
| epoch 87 | 1500/2420 batches | train_acc 0.958 train_loss 0.04199
| epoch 87 | 1800/2420 batches | train_acc 0.935 train_loss 0.06008
| epoch 87 | 2100/2420 batches | train_acc 0.938 train_loss 0.05810
| epoch 87 | 2400/2420 batches | train_acc 0.957 train_loss 0.04352
---------------------------------------------------------------------
| epoch 87 | time: 82.28s | valid_acc 0.792 valid_loss 0.261 | lr 0.010000
---------------------------------------------------------------------
| epoch 88 |  300/2420 batches | train_acc 0.960 train_loss 0.03488
| epoch 88 |  600/2420 batches | train_acc 0.968 train_loss 0.02972
| epoch 88 |  900/2420 batches | train_acc 0.941 train_loss 0.05666
| epoch 88 | 1200/2420 batches | train_acc 0.950 train_loss 0.03783
| epoch 88 | 1500/2420 batches | train_acc 0.968 train_loss 0.02428
| epoch 88 | 1800/2420 batches | train_acc 0.938 train_loss 0.05949
| epoch 88 | 2100/2420 batches | train_acc 0.958 train_loss 0.04124
| epoch 88 | 2400/2420 batches | train_acc 0.966 train_loss 0.02956
---------------------------------------------------------------------
| epoch 88 | time: 80.76s | valid_acc 0.803 valid_loss 0.269 | lr 0.010000
---------------------------------------------------------------------
| epoch 89 |  300/2420 batches | train_acc 0.968 train_loss 0.03499
| epoch 89 |  600/2420 batches | train_acc 0.961 train_loss 0.03806
| epoch 89 |  900/2420 batches | train_acc 0.960 train_loss 0.03646
| epoch 89 | 1200/2420 batches | train_acc 0.946 train_loss 0.04350
| epoch 89 | 1500/2420 batches | train_acc 0.961 train_loss 0.03692
| epoch 89 | 1800/2420 batches | train_acc 0.956 train_loss 0.04297
| epoch 89 | 2100/2420 batches | train_acc 0.959 train_loss 0.03494
| epoch 89 | 2400/2420 batches | train_acc 0.951 train_loss 0.03608
---------------------------------------------------------------------
| epoch 89 | time: 80.86s | valid_acc 0.799 valid_loss 0.287 | lr 0.010000
---------------------------------------------------------------------
| epoch 90 |  300/2420 batches | train_acc 0.963 train_loss 0.03076
| epoch 90 |  600/2420 batches | train_acc 0.968 train_loss 0.02549
| epoch 90 |  900/2420 batches | train_acc 0.968 train_loss 0.02899
| epoch 90 | 1200/2420 batches | train_acc 0.948 train_loss 0.04387
| epoch 90 | 1500/2420 batches | train_acc 0.973 train_loss 0.02587
| epoch 90 | 1800/2420 batches | train_acc 0.956 train_loss 0.04675
| epoch 90 | 2100/2420 batches | train_acc 0.958 train_loss 0.03955
| epoch 90 | 2400/2420 batches | train_acc 0.948 train_loss 0.04309
---------------------------------------------------------------------
| epoch 90 | time: 80.85s | valid_acc 0.798 valid_loss 0.275 | lr 0.010000
---------------------------------------------------------------------
| epoch 91 |  300/2420 batches | train_acc 0.951 train_loss 0.04534
| epoch 91 |  600/2420 batches | train_acc 0.978 train_loss 0.02108
| epoch 91 |  900/2420 batches | train_acc 0.961 train_loss 0.04049
| epoch 91 | 1200/2420 batches | train_acc 0.962 train_loss 0.03711
| epoch 91 | 1500/2420 batches | train_acc 0.973 train_loss 0.02557
| epoch 91 | 1800/2420 batches | train_acc 0.947 train_loss 0.04786
| epoch 91 | 2100/2420 batches | train_acc 0.955 train_loss 0.04236
| epoch 91 | 2400/2420 batches | train_acc 0.972 train_loss 0.02953
---------------------------------------------------------------------
| epoch 91 | time: 80.94s | valid_acc 0.791 valid_loss 0.291 | lr 0.010000
---------------------------------------------------------------------
| epoch 92 |  300/2420 batches | train_acc 0.968 train_loss 0.03492
| epoch 92 |  600/2420 batches | train_acc 0.964 train_loss 0.03184
| epoch 92 |  900/2420 batches | train_acc 0.955 train_loss 0.04372
| epoch 92 | 1200/2420 batches | train_acc 0.958 train_loss 0.03827
| epoch 92 | 1500/2420 batches | train_acc 0.959 train_loss 0.03938
| epoch 92 | 1800/2420 batches | train_acc 0.960 train_loss 0.03123
| epoch 92 | 2100/2420 batches | train_acc 0.973 train_loss 0.02855
| epoch 92 | 2400/2420 batches | train_acc 0.973 train_loss 0.02922
---------------------------------------------------------------------
| epoch 92 | time: 80.17s | valid_acc 0.814 valid_loss 0.273 | lr 0.010000
---------------------------------------------------------------------
| epoch 93 |  300/2420 batches | train_acc 0.969 train_loss 0.02448
| epoch 93 |  600/2420 batches | train_acc 0.978 train_loss 0.02336
| epoch 93 |  900/2420 batches | train_acc 0.958 train_loss 0.04480
| epoch 93 | 1200/2420 batches | train_acc 0.978 train_loss 0.02401
| epoch 93 | 1500/2420 batches | train_acc 0.973 train_loss 0.02453
| epoch 93 | 1800/2420 batches | train_acc 0.978 train_loss 0.02186
| epoch 93 | 2100/2420 batches | train_acc 0.971 train_loss 0.02963
| epoch 93 | 2400/2420 batches | train_acc 0.964 train_loss 0.03090
---------------------------------------------------------------------
| epoch 93 | time: 83.03s | valid_acc 0.806 valid_loss 0.268 | lr 0.010000
---------------------------------------------------------------------
| epoch 94 |  300/2420 batches | train_acc 0.960 train_loss 0.03757
| epoch 94 |  600/2420 batches | train_acc 0.973 train_loss 0.02292
| epoch 94 |  900/2420 batches | train_acc 0.954 train_loss 0.03829
| epoch 94 | 1200/2420 batches | train_acc 0.954 train_loss 0.04090
| epoch 94 | 1500/2420 batches | train_acc 0.963 train_loss 0.03849
| epoch 94 | 1800/2420 batches | train_acc 0.954 train_loss 0.04564
| epoch 94 | 2100/2420 batches | train_acc 0.968 train_loss 0.03245
| epoch 94 | 2400/2420 batches | train_acc 0.973 train_loss 0.02682
---------------------------------------------------------------------
| epoch 94 | time: 80.56s | valid_acc 0.805 valid_loss 0.275 | lr 0.010000
---------------------------------------------------------------------
| epoch 95 |  300/2420 batches | train_acc 0.970 train_loss 0.03042
| epoch 95 |  600/2420 batches | train_acc 0.965 train_loss 0.03148
| epoch 95 |  900/2420 batches | train_acc 0.953 train_loss 0.04832
| epoch 95 | 1200/2420 batches | train_acc 0.969 train_loss 0.03207
| epoch 95 | 1500/2420 batches | train_acc 0.963 train_loss 0.03617
| epoch 95 | 1800/2420 batches | train_acc 0.969 train_loss 0.02531
| epoch 95 | 2100/2420 batches | train_acc 0.955 train_loss 0.04204
| epoch 95 | 2400/2420 batches | train_acc 0.935 train_loss 0.05621
---------------------------------------------------------------------
| epoch 95 | time: 80.97s | valid_acc 0.795 valid_loss 0.264 | lr 0.010000
---------------------------------------------------------------------
| epoch 96 |  300/2420 batches | train_acc 0.967 train_loss 0.03227
| epoch 96 |  600/2420 batches | train_acc 0.975 train_loss 0.02206
| epoch 96 |  900/2420 batches | train_acc 0.977 train_loss 0.02024
| epoch 96 | 1200/2420 batches | train_acc 0.963 train_loss 0.03481
| epoch 96 | 1500/2420 batches | train_acc 0.968 train_loss 0.03259
| epoch 96 | 1800/2420 batches | train_acc 0.961 train_loss 0.03325
| epoch 96 | 2100/2420 batches | train_acc 0.960 train_loss 0.03615
| epoch 96 | 2400/2420 batches | train_acc 0.967 train_loss 0.03298
---------------------------------------------------------------------
| epoch 96 | time: 81.23s | valid_acc 0.813 valid_loss 0.275 | lr 0.010000
---------------------------------------------------------------------
| epoch 97 |  300/2420 batches | train_acc 0.978 train_loss 0.02161
| epoch 97 |  600/2420 batches | train_acc 0.966 train_loss 0.02856
| epoch 97 |  900/2420 batches | train_acc 0.939 train_loss 0.05448
| epoch 97 | 1200/2420 batches | train_acc 0.950 train_loss 0.04763
| epoch 97 | 1500/2420 batches | train_acc 0.955 train_loss 0.04292
| epoch 97 | 1800/2420 batches | train_acc 0.970 train_loss 0.02975
| epoch 97 | 2100/2420 batches | train_acc 0.963 train_loss 0.03453
| epoch 97 | 2400/2420 batches | train_acc 0.965 train_loss 0.02972
---------------------------------------------------------------------
| epoch 97 | time: 80.24s | valid_acc 0.795 valid_loss 0.283 | lr 0.010000
---------------------------------------------------------------------
| epoch 98 |  300/2420 batches | train_acc 0.949 train_loss 0.04678
| epoch 98 |  600/2420 batches | train_acc 0.968 train_loss 0.03154
| epoch 98 |  900/2420 batches | train_acc 0.979 train_loss 0.02056
| epoch 98 | 1200/2420 batches | train_acc 0.978 train_loss 0.02339
| epoch 98 | 1500/2420 batches | train_acc 0.950 train_loss 0.04358
| epoch 98 | 1800/2420 batches | train_acc 0.964 train_loss 0.03267
| epoch 98 | 2100/2420 batches | train_acc 0.958 train_loss 0.03132
| epoch 98 | 2400/2420 batches | train_acc 0.950 train_loss 0.04559
---------------------------------------------------------------------
| epoch 98 | time: 82.11s | valid_acc 0.798 valid_loss 0.244 | lr 0.010000
---------------------------------------------------------------------
| epoch 99 |  300/2420 batches | train_acc 0.959 train_loss 0.03348
| epoch 99 |  600/2420 batches | train_acc 0.972 train_loss 0.02780
| epoch 99 |  900/2420 batches | train_acc 0.966 train_loss 0.03377
| epoch 99 | 1200/2420 batches | train_acc 0.972 train_loss 0.02690
| epoch 99 | 1500/2420 batches | train_acc 0.974 train_loss 0.02399
| epoch 99 | 1800/2420 batches | train_acc 0.971 train_loss 0.02574
| epoch 99 | 2100/2420 batches | train_acc 0.987 train_loss 0.01601
| epoch 99 | 2400/2420 batches | train_acc 0.960 train_loss 0.03983
---------------------------------------------------------------------
| epoch 99 | time: 80.51s | valid_acc 0.812 valid_loss 0.268 | lr 0.010000
---------------------------------------------------------------------
| epoch 100 |  300/2420 batches | train_acc 0.968 train_loss 0.02928
| epoch 100 |  600/2420 batches | train_acc 0.974 train_loss 0.02237
| epoch 100 |  900/2420 batches | train_acc 0.969 train_loss 0.02943
| epoch 100 | 1200/2420 batches | train_acc 0.958 train_loss 0.04141
| epoch 100 | 1500/2420 batches | train_acc 0.964 train_loss 0.03669
| epoch 100 | 1800/2420 batches | train_acc 0.964 train_loss 0.03270
| epoch 100 | 2100/2420 batches | train_acc 0.963 train_loss 0.03624
| epoch 100 | 2400/2420 batches | train_acc 0.949 train_loss 0.04894
---------------------------------------------------------------------
| epoch 100 | time: 81.81s | valid_acc 0.802 valid_loss 0.261 | lr 0.010000
---------------------------------------------------------------------

模型评估的代码运行:

test_acc, test_loss = evaluate(valid_dataloader)
print('模型准确率为:{:5.4f}'.format(test_acc))

运行结果为:

模型准确率为:0.8000
  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值