Pytorch 学习日记(一)

 train训练代码

​

"""模型的训练"""
from dataset import get_dataloader
from models import MnistModel
from torch import optim
import conf
from tqdm import tqdm
import numpy as np
import torch
import os
from test import eval
import torch.nn as nn
import torch.nn.functional as F


# 1.实例化模型,优化器,损失函数
model = MnistModel().to(conf.device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)  # SGD
loss_func = nn.CrossEntropyLoss()
if os.path.exists("./models/model.pkl"):
    model.load_state_dict(torch.load("./models/model.pkl"))
    optimizer.load_state_dict(torch.load("./models/optimizer.pkl"))

# 2.进行循环,进行训练
def train(epoch):
    train_dataloader = get_dataloader(train=True)
    bar = tqdm(enumerate(train_dataloader), total=len(train_dataloader))
    total_loss = []
    model.train()
    for idx, (input, target) in bar:
        input = input.to(conf.device)
        target = target.to(conf.device)
        # 梯度置为0
        optimizer.zero_grad()
        # 计算得到预测值
        output = model(input)
        # 得到损失
        loss = loss_func(output, target)
        # 反向传播
        loss.backward()
        total_loss.append(loss.item())
        # 参数更新
        optimizer.step()
        # 打印数据
        if idx % 10 == 0:
            bar.set_description("epcoh:{} idx:{},loss:{:.6f}".format(epoch, idx, np.mean(total_loss)))
            torch.save(model.state_dict(), "./models/model.pkl")
            torch.save(optimizer.state_dict(), "./models/optimizer.pkl")


if __name__ == '__main__':
    for i in range(10):
        train(i)
        eval()


​

model模型代码

"""定义模型"""

import torch.nn as nn
import torch.nn.functional as F


# 全连接层
class MnistModel(nn.Module):

    def __init__(self):
        super(MnistModel, self).__init__()
        self.fc1 = nn.Linear(1*28*28, 100)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(100, 10)

    def forward(self, image):  # [batch_size, 1, 28, 28]
        image_viwed = image.view(-1, 1*28*28)  # [batch_size, 1*28*28]
        fc1_out = self.fc1(image_viwed)        # [batch_size, 100]
        fc1_out = self.relu(fc1_out)           # [batch_size, 100]   max(0, x) #为什么用relu 作用提高网络复杂度
        out = self.fc2(fc1_out)                # [batch_size, 10]
        return out
        # return F.log_softmax(out, dim=-1)  


class MnistModel(nn.Module):
     def __init__(self):
         super(MnistModel, self).__init__()
         # [batch_size, 1, 28, 28] --> [batch_size, 16, 14, 14]
         self.conv1 = nn.Conv2d(in_channels=1,
                              out_channels=16,
                              kernel_size=5,
                              stride=2, padding=2)
         self.bn1 = nn.BatchNorm2d(16)
         self.relu = nn.ReLU()
         # [batch_size, 16, 14, 14] --> [batch_size, 32, 7, 7]
         self.conv2 = nn.Conv2d(in_channels=16,
                              out_channels=32,
                              kernel_size=5,
                              stride=2, padding=2)
         self.bn2 = nn.BatchNorm2d(32)
         self.fc = nn.Linear(32*7*7, 10)

     def forward(self, image):
         out = self.conv1(image)
         # print("self.conv1", out.shape)
         out = self.bn1(out)
         out = self.relu(out)
         out = self.conv2(out)
         # print("self.conv2", out.shape)
         out = self.bn2(out)
         out = self.relu(out)
         out = out.view(-1, 32*7*7)
         # print("out.view", out.shape)
         out = self.fc(out)  # [batch_size, 10]
         # print("self.fc", out.shape)
         return out


if __name__ == '__main__':
    # API计算交叉熵损失1
    import torch

    out = torch.tensor([
            [-1.0278, -3.1653, -0.8736, 0.8210, -1.4080, 2.9766, -1.0509, -1.5239, -1.0118, -1.5916],
            [-3.6254, -1.8749, -1.0033, -2.1918, -1.0601, -1.0115, -2.4043, 2.0531, 0.8111, 1.8591],
            [-3.2762, -1.0700, 3.1338, 0.3999, -1.3655, -0.7205, 0.0276, -0.0656, -0.2156, -0.4373],
            [-1.8011, -2.6836, 1.4357, 5.0335, -5.0225, 1.3052, -3.9401, 0.6612, 1.3935, -1.9710]
    ])  # [4, 10]
    label = torch.tensor([5, 3, 2, 3])
    loss_func = nn.CrossEntropyLoss()
    print(loss_func(out, label))

    # API计算交叉熵损失2
    import torch.nn.functional as F
    # new_out = torch.softmax(out, dim=1).sum(dim=1)
    # print('softmax: ', new_out)
    new_out = F.log_softmax(out, dim=1)
    print(new_out)
    print(F.nll_loss(new_out, label))

    # 自己来计算交叉熵
    # 预测的概率 [1.9750e-02, 1.7727e-03, 6.8996e-01, 9.3748e-03, 1.5107e-02, 3.8306e-03, 2.3847e-01, 7.4515e-04, 1.1290e-02, 9.7059e-03]
    # 真实的概率 [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.]
    new_out = torch.softmax(out, dim=1)
    print('softmax: ', new_out)
    new_out = torch.log(new_out)
    print('log: ', new_out)
    # print('log_softmax: ', F.log_softmax(out, dim=1))
    encode_result = F.one_hot(label, 10).float()
    print('one_hot: ', encode_result)
    print(- (new_out * encode_result).sum() / len(out))
    # print(F.nll_loss(new_out, label))

    # 自己计算soft_max
    # print(torch.exp(torch.tensor(0, dtype=torch.float32)))
    exp_out_fenzi = torch.exp(out)
    print(exp_out_fenzi)
    exp_out_fenmu = torch.exp(out).sum(dim=1)
    print(exp_out_fenmu)
    exp_out_fenmu = exp_out_fenmu.view(-1, 1)
    print(exp_out_fenmu)
    new_out = exp_out_fenzi / exp_out_fenmu
    print(new_out)


     t1 = torch.tensor([
         [2, 2, 3, 4],
         [1, 6, 3, 4],
         [1, 2, 6, 4],
         [1, 2, 3, 8],
                       ], dtype=torch.float32)
     t2 = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
     t2 = t2.view(4, 1)
     print(t2)
     print(t1 / t2)

dataset数据集代码

"""准备数据集"""
import cv2
import torchvision
import numpy as np
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision import transforms
import matplotlib.pyplot as plt


def imshow(inp):
    inp = inp.numpy().transpose((1, 2, 0))
    plt.imshow(inp)
    plt.pause(0.001)


if __name__ == '__main__':

    # mnist1 = MNIST(root="./data", train=True, download=True, transform=transforms.ToTensor())
    # mnist2 = MNIST(root="./data", train=False, download=False, transform=None)
    # dataloader = DataLoader(mnist1, batch_size=32, shuffle=True)   //这是实例化                 //mnist1 []用到了getitem()的作用 , batch_size是数量 。
    #
    #                                                                                         //drop_last  =F就有3331 , =T就有333 //datasets大小是100, batch size =30
    # print(mnist1[0])   //[0]对应着访问0 第一个地址 getitem()的作用 得到形状+标签          //num_worker=0 cpu读取, 小于0报错,  ifnum_worker=0串行加载,else 并行加载
    # print(len(mnist1)) //训练集6W张                                    
    # print(len(mnist2)) //测试集1W
    # mnist3 = mnist1 + mnist2
    # print(len(mnist3))
    #
    # for images, labels in dataloader:     //  结构体
    #     print(images.shape)              //引出的结果 torch.size [16,1,28,28]数目,通道数,高,宽  //RGB通道数是3 (0-255)彩色 // 通道为1灰 
    #     print(labels.shape)//   labels 引出有16个结果
    #     print(labels)      // 16个分别标签
    #     out = torchvision.utils.make_grid(images)// 16个数据拼成大网格 grid
    #     imshow(out)// 展示输出
    #     break

    # 80 * 35  w * h
    im = np.fromfile(r"D:\PyProject\不定长文本识别\samples\train\0I9KC_af6ce971cc2f412d412e8a68b17b0fb9.jpg", dtype=np.uint8)  #避免转译 , dtype=np.uint8是无符号整型通道值是0-255个
    print(im.shape)   #就是一个值,,,,不是不是不是h*w*c 格式 
    im = cv2.imdecode(im, -1)#用open cv2对图片作一个译码
    print(im.shape)   # 35, 80, 3  h w c   是H W C格式 但是我们要求C H W 格式 所以引出轴交换 

    print(transforms.ToTensor()(im).shape)  # 3, 35, 80 #这个函数等价于轴交换的意思

    # h w c  -->     c h w

test测试集代码

"""模型的评估"""

from dataset import get_dataloader
from models import  MnistModel
import torch.nn.functional as F
import conf
import numpy as np
import torch
import os
import torch.nn as nn
import matplotlib.pyplot as plt
import torchvision


def eval(): ##测试模式 评估模式
    # 1. 实例化模型,损失函数
    model = MnistModel().to(conf.device)  #继承nn.model
    loss_func = nn.CrossEntropyLoss()
    if os.path.exists("./models/model.pkl"):
        model.load_state_dict(torch.load("./models/model.pkl"))

    test_dataloader = get_dataloader(train=False)
    total_loss = []
    total_acc = []
    model.eval()
    with torch.no_grad():#以下的代码不需要计算梯度,计梯度目的是更新参数。达到最优解
        for input, target in test_dataloader:  # 2.进行循环,进行训练
            # out = torchvision.utils.make_grid(input)
            # imshow(out)

            input = input.to(conf.device)
            target = target.to(conf.device)
            # 计算预测值
            output = model(input)  # [batch_size, 10][0,0.9,,,,,,,,]
            # 计算损失
            # loss = F.nll_loss(output,target)
            loss = loss_func(output, target)
            total_loss.append(loss.item())#一轮下来总的平均的loss

            # 计算准确率,怎么计算?
            pred = output.max(dim=1)[1]   # [5, 7, 2, 8] 取出里面最大值
            total_acc.append(pred.eq(target).float().mean().item())

            # print(pred)
            # break

    print("test loss:{},test acc:{}".format(np.mean(total_loss), np.mean(total_acc)))


def imshow(inp):
    inp = inp.numpy().transpose((1, 2, 0))
    plt.imshow(inp)
    plt.pause(0.001)


if __name__ == '__main__':
    eval()
     t1 = torch.tensor([
        [0, 0.98, 0, 0, 0, 0, 0, 0, 0, 0.02],
         [0, 0, 0.98, 0, 0, 0, 0, 0, 0, 0.02]
     ])
     print(t1.shape)
     pred_v, pred_i = t1.max(dim=1) #max计算最大值,指定方向,1的意思是2X10的矩阵,
     #在10的方向上计算max,返回两个值 
     print(pred_v, pred_i)
     print(pred_i.eq(torch.tensor([1, 1])).sum())

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值