3.7. softmax回归的简洁实现

3.7. softmax回归的简洁实现

pytorch版本

模型搭建及训练

import torch
from torch import nn
from d2l import torch as d2l

import matplotlib.pyplot as plt


print(torch.__version__)
print(torch.cuda.is_available())



batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)


# 定义模型
net = nn.Sequential(nn.Flatten(),nn.Linear(784,10))
print(net)

# for param in net.named_parameters():
#     print(param)

# 初始化模型参数

def init_weights(m):
    # print("m",m)
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, mean=0.0, std=0.01)
        nn.init.constant_(m.bias, val=0.0)
net.apply(init_weights)

# 加载模型权重
state_dict = torch.load('./model_Softmax100.pth')
net.load_state_dict(state_dict['model'])

# for param in net.named_parameters():
#     print(type(param))


#损失函数
loss = nn.CrossEntropyLoss(reduction='none')
#优化算法
trainer = torch.optim.SGD(net.parameters(), lr=0.1)
#训练

num_epochs = 200
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)

# 保存模型参数
## 保存模型参数
torch.save({'model': net.state_dict()}, './model_Softmax200.pth')

模型预测

import torch
from torch import nn
from d2l import torch as d2l

import matplotlib.pyplot as plt


print(torch.__version__)
print(torch.cuda.is_available())


def predict_ch3(net, test_iter, n=6):
    """预测标签(定义见第3章)。"""
    for X, y in test_iter:
        break
    trues = d2l.get_fashion_mnist_labels(y)
    preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))

    pre = net(X).argmax(axis=1)
    print(net(X).argmax(axis=1),net(X).argmax(axis=1).shape)
    num=0
    for i in range(256):
        if y[i]==pre[i]:
            num+=1
    print("num", num)
    titles = [true + '\n' + pred for true, pred in zip(trues, preds)]
    # pytorch转libtorch
    traced_script_module = torch.jit.trace(net, X)
    traced_script_module.save("./pytorch_Libtorch_Softmax300.pt")

    print(trues,"\n",preds,"\n",titles)
    d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])



batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)


# 定义模型
net = nn.Sequential(nn.Flatten(),nn.Linear(784,10))
print(net)

# 加载模型权重
state_dict = torch.load('./model_Softmax200.pth')
net.load_state_dict(state_dict['model'])

#预测
predict_ch3(net, test_iter)
plt.show()

预测效果

请添加图片描述

libtorch版本

utils.h文件

#pragma once
#include<iostream>
#include<vector>
#include<string>
#include<torch/script.h>
#include<torch/torch.h>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;

//获取fashion_mnist数据集对应的标签
vector<string> get_fashion_mnist_labels(vector<int> label);
//绘图
void show_images(torch::data::Example<> image, int num, vector<string> total_labels);
//tensor转opencv
cv::Mat tensor2Mat(torch::Tensor& i_tensor);

对应的cpp文件

#include "utils.h"



cv::Mat tensor2Mat(torch::Tensor& i_tensor)
{
    int height = i_tensor.size(0), width = i_tensor.size(1);
    //i_tensor = i_tensor.to(torch::kF32);
    //i_tensor = i_tensor.to(torch::kCPU);

    auto out_tensor = i_tensor;
    //data_ptr() 返回 tensor 首元素的内存地址
    cv::Mat o_Mat(cv::Size(width, height), CV_32F, out_tensor.data_ptr());
    return o_Mat;
}




//batch .data().shape[8, 1, 28, 28]
void show_images(torch::data::Example<> image, int num, vector<string> total_labels) {
    //[N,C,H,W] 转为[N,H,W,C]
    //auto image_data = image.data.permute({ 0,2,3,1 });
    auto image_data = image.data;


    cout << image_data.sizes() << endl;

    for (int i = 0; i < num; i++) {
        auto image_data1 = image.data[i].permute({ 1,2,0 });
        //cout << image_data1.sizes() << endl;


        cout << "****************************" << endl;
        cv::Mat o_Mat = tensor2Mat(image_data1);
        
        cout << o_Mat.size() << endl;
        cv::resize(o_Mat, o_Mat, Size(512, 512));
        cv::cvtColor(o_Mat, o_Mat, COLOR_RGB2BGR); // convert to BGR
        cout << o_Mat.size() << endl;
        string name = total_labels[i];
        cv::imshow(name, o_Mat);

        cv::waitKey(0);
        cv::destroyWindow(name);
  
    }
   

}

vector<string> get_fashion_mnist_labels(vector<int> label) {

    vector<string> text_labels = { "t-shirt", "trouser", "pullover", "dress", "coat", "sandal", "shirt", "sneaker", "bag", "ankle boot" };

    int size = label.size();

    vector<string> text_labels_res;
    for (auto index:label) {
        text_labels_res.push_back(text_labels[index]);
    }

 /*   for (int i = 0; i < size; i++) {
        text_labels_res.push_back(text_labels[index]);
    }*/


    return text_labels_res;

}

main.cpp

#include<torch/script.h>
#include<torch/torch.h>
#include <iostream>
#include <Eigen/Dense>
#include <vector>
#include "utils.h"
using namespace std;


int main() {

    cout << "安装的 LibTorch 版本 : "
        << TORCH_VERSION_MAJOR << "."
        << TORCH_VERSION_MINOR << "."
        << TORCH_VERSION_PATCH << endl;
    cout << "对应的 PyTorch 版本: " << TORCH_VERSION << endl;


    //加载模型权重
    torch::jit::script::Module model;
    model = torch::jit::load("pytorch_Libtorch_Softmax200.pt");
    model.eval();


    int batch_size = 8;

    // 利用数据加载器,加载MNIST数据集并设置batchsize
    //堆栈排序规则,它将 a batch of tensors沿着第一个维度堆叠成一个tensor
    auto dataset = torch::data::datasets::MNIST("E:\\pytorch_libtorch\\data\\FashionMNIST\\raw\\", torch::data::datasets::MNIST::Mode::kTest)
        .map(torch::data::transforms::Stack<>());


    auto data_loader = torch::data::make_data_loader(
        std::move(dataset),
        torch::data::DataLoaderOptions().batch_size(batch_size).workers(0));
    

    
    for (torch::data::Example<>& batch : *data_loader)
    {
        auto img = batch;

        cout << "batch .shape" << batch.data.sizes() <<"\t" << batch.data.dtype() <<"\t" << batch.data[0].sizes() << endl;
        cout << typeid(batch).name() << endl; 
        cout << typeid(batch.data[0]).name() << endl;
        cout << "****************" << endl;    
       
        // 用训练好的网络处理测试数据
        auto outputs = model.forward({ batch.data });
        cout << typeid(outputs).name() << endl;

        // 获取标签数据, 0 ~ 9
        auto labels = batch.target;
        cout << "true:" << labels << labels.sizes() <<labels.dtype() << endl;
        cout << "****************" << endl;
        // 得到预测值,0 ~ 9
        auto ouputTensor = outputs.toTensor();
        auto output_max = ouputTensor.argmax(1);
        cout << "output_max" << output_max.data()<< output_max.sizes() << output_max.dtype()<< endl;
       
        
        cout << "****************" << endl;
        cout << labels[0].item().toInt() << output_max.data()[0] << endl;
        
       
        int s = labels.sizes()[0];
        cout << "s: " << s << endl;

        //用来存储标签序号
        vector<int> truelabel;
        vector<int> prelabel;

     
       
        for (int i = 0; i < s; i++) {
            //truelabel.push_back(labels[i].item().toInt());
            prelabel.push_back(output_max[i].item<int>());
            truelabel.push_back(labels[i].item<int>());


        }



        //计算一次迭代周期中 测试正确的个数
        int n = 0;
        for (int i = 0; i < batch_size; i++) {
            if (labels[i].item<int>() == output_max[i].item<int>()) {
                n++;
            }
        }

        cout << "num: " << n << endl;

        //获取标签名称
        vector<string> trues = get_fashion_mnist_labels(truelabel);
        vector<string> pre = get_fashion_mnist_labels(prelabel);

        cout << "truelabel: " << truelabel.size()<< endl;
        cout << "prelabel: " << prelabel.size()<<  endl;

    
        //标签合并
        vector<string> total_labels;
        std::transform(trues.begin(), trues.end(), pre.begin(),
            std::back_inserter(total_labels),
            [](const auto& aa, const auto& bb)
            {
                return string("true: "+ aa + "    " + "pre: " + bb);
            });

        cout << total_labels.size() << endl;

        //画图
        show_images(img, batch_size, total_labels);
        break;
    }
   
	system("pause");
	return 0;
}

模型推理结果

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值