Libtorch常用代码

Libtorch常用代码

https://zhuanlan.zhihu.com/p/369928669

https://www.cnblogs.com/yanghailin/p/12901586.html

torch::DeviceType device = torch::kCUDA;
module = torch::jit::load("traced_model.pt");
  torch::jit::script::Module module;
  try {
    // Deserialize the ScriptModule from a file using torch::jit::load().
    module = torch::jit::load("traced_model.pt");
  } catch (const c10::Error &e) {
    std::cerr << "error loading the model\n";
  }
  
  module.to(device);
cv::cvtColor(image, input, CV_BGR2RGB);
tensor_image = tensor_image.to(torch::kCUDA);
torch::Tensor result = module.forward({ tensor_image }).toTuple()->elements()[0].toTensor();
torch::Tensor pred = result.argmax(1);
pred = pred.squeeze();
pred = pred.to(torch::kU8);
pred = pred.to(torch::kCPU);

std::cout << torch::cuda::cudnn_is_available() << endl;//输出为1,成功
std::cout << torch::cuda::is_available()<<endl;//输出为1,成功
#include "QtGuiApplication2.h"
#undef slots
#include <torch/script.h>
#include<torch/torch.h>
#define slots Q_SLOTS
#include<iostream>
using namespace std;
QtGuiApplication2::QtGuiApplication2(QWidget *parent)
	: QMainWindow(parent)
{
	ui.setupUi(this);
	std::cout << torch::cuda::cudnn_is_available() << endl;//输出为1,成功
	std::cout << torch::cuda::is_available()<<endl;//输出为1,成功
	torch::jit::script::Module module = torch::jit::load("F:/pytorch-gpu.pt",torch::kCUDA);
	module.to(at::kCUDA);
	torch::NoGradGuard on_grad;
	std::vector<torch::jit::IValue> inputs;
	inputs.push_back(torch::ones({ 1, 3,512, 512 }, torch::kCUDA));
	at::Tensor output = module.forward(inputs).toTensor();
	std::cout << output << std::endl;
}

PyTorch生成模型

from torchvision.models import resnet34
import torch.nn.functional as F
import torch.nn as nn
import torch
import cv2

#读取一张图片,并转换成[1,3,224,224]的float张量并归一化
image = cv2.imread("flower.jpg")
image = cv2.resize(image,(224,224))
input_tensor = torch.tensor(image).permute(2,0,1).unsqueeze(0).float()/225.0

#定义并加载resnet34模型在imagenet预训练的权重
model = resnet34(pretrained=True)
model.eval()
#查看模型预测该付图的结果
output = model(input_tensor)
output = F.softmax(output,1)
print("模型预测结果为第{}类,置信度为{}".format(torch.argmax(output),output.max()))

#生成pt模型,按照官网来即可
model=model.to(torch.device("cpu"))
model.eval()
var=torch.ones((1,3,224,224))
traced_script_module = torch.jit.trace(model, var)
traced_script_module.save("resnet34.pt")

LibTorch常用代码模板

#include<opencv2/opencv.hpp>
#include <torch/torch.h>
#include <torch/script.h> 
using namespace std;
int main()
{

	//std::cout << torch::cuda::cudnn_is_available() << endl;//输出为1,成功
	//std::cout << torch::cuda::is_available() << endl;//输出为1,成功
	//定义使用cuda
	auto device = torch::Device(torch::kCUDA, 0);
	//读取图片
	auto image = cv::imread("E:\\WorkPlace\\PycharmProjects\\ModelTrainingSystem\\flower.png");
	//缩放至指定大小
	cv::resize(image, image, cv::Size(224, 224));
	//转成张量
	auto input_tensor = torch::from_blob(image.data, { image.rows, image.cols, 3 }, torch::kByte).permute({ 2, 0, 1 }).unsqueeze(0).to(torch::kFloat32) / 225.0;
	//加载模型
	auto model = torch::jit::load("E:\\WorkPlace\\PycharmProjects\\ModelTrainingSystem\\resnet34.pt");
	model.eval();
	//前向传播
	auto output = model.forward({ input_tensor }).toTensor();
	output = torch::softmax(output, 1);
	std::cout << "模型预测结果为第" << torch::argmax(output) << "类,置信度为" << output.max() << std::endl;
	return 0;
}

张量的常见操作

auto t = torch::zeros({3,4});
t = torch::ones({3,4});
t = torch::tensor({33,22,11});

https://zhuanlan.zhihu.com/p/369930315

基本模块搭建

数据加载模块使用

分类模型搭建,训练和预测

分割模型搭建,训练和预测

目标检测模型搭建,训练和预测

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值