1. visual stdio 安装使用onnxruntime
visual stdio 安装onnxruntime,工具->Nuget包管理->点管理台后输入如第二张图 PM> xxx, 等下载好后就可以在本项目的目录中看到package文件夹中的onnxruntime了
2. 模型推理代码
首先配置好项目的依赖 属性中配置include和lib,如下二图。再找到onnxruntime.dll copy到项目根目录下。(注不清楚这一步做不做有没有关系,因为后来我发现vs中自己导入了onnxruntime.dll,onnxruntime_providers_shared.dll这两个文件)
代码如下:
c++接口 导出成dll后还依赖源码
void printModelInfo(Ort::Session &session, Ort::AllocatorWithDefaultOptions &allocator)
{
//输出模型输入节点的数量
size_t num_input_nodes = session.GetInputCount();
size_t num_output_nodes = session.GetOutputCount();
cout << "Number of input node is:" << num_input_nodes << endl;
cout << "Number of output node is:" << num_output_nodes << endl;
//获取输入输出维度
for (auto i = 0; i < num_input_nodes; i++)
{
std::vector<int64_t> input_dims = session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
cout << endl << "input " << i << " dim is: ";
for (auto j = 0; j < input_dims.size(); j++)
cout << input_dims[j] << " ";
}
for (auto i = 0; i < num_output_nodes; i++)
{
std::vector<int64_t> output_dims = session.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
cout << endl << "output " << i << " dim is: ";
for (auto j = 0; j < output_dims.size(); j++)
cout << output_dims[j] << " ";
}
cout << endl;
//input_dims_2[0] = input_dims_1[0] = output_dims[0] = 1;//batch size = 1
}
void testImgenDenosing() {
//refer: https://blog.csdn.net/qq_42995327/article/details/122622222
// initialize environment...one environment per process
Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test");
// initialize session options if needed
Ort::SessionOptions session_options;
session_options.SetInterOpNumThreads(1);
Ort::AllocatorWithDefaultOptions allocator;
#ifdef _WIN32
const wchar_t* model_path = L"D:\\Documents\\projects\\Image-Denoising-using-Deep-Learning\\model.onnx";
#else
const char* model_path = "D:\\Documents\\projects\\Image-Denoising-using-Deep-Learning\\model.onnx";
#endif
//#ifdef _WIN32
// const wchar_t* model_path = L"D:\\Documents\\projects\\Image-Denoising-using-Deep-Learning\\best_REDNet_blindnoise_256x256.onnx";
//#else
// const char* model_path = "D:\\Documents\\projects\\Image-Denoising-using-Deep-Learning\\best_REDNet_blindnoise_256x256.onnx";
//#endif
Ort::Session session(env, model_path, session_options);
Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
// print number of model input nodes
size_t num_input_nodes = session.GetInputCount();
cout << num_input_nodes << endl;
printModelInfo(session, allocator);
cout << session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetElementType() << endl;
/**
输出的数字对应
typedef enum ONNXTensorElementDataType {
ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED,
ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, // maps to c type float
ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, // maps to c type uint8_t
ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8, // maps to c type int8_t
ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16, // maps to c type uint16_t
ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16, // maps to c type int16_t
ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32, // maps to c type int32_t
ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64, // maps to c type int64_t
ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING, // maps to c++ type std::string
ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL,
ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16,
ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE, // maps to c type double
ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32, // maps to c type uint32_t
ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64, // maps to c type uint64_t
ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64, // complex with float32 real and imaginary components
ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128, // complex with float64 real and imaginary components
ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16 // Non-IEEE floating-point format based on IEEE754 single-precision
} ONNXTensorElementDataType;
**/
//输入网络的维度
static constexpr const int width = 256;
static constexpr const int height = 256;
static constexpr const int channel = 3;
std::array<int64_t, 4> input_shape_{ 1,width, height,channel };
Mat img_1 = cv::imread("C:\\Users\\cls\\Downloads\\240px-NIND_BruegelLibraryS1_ISOH1.png");
//通道交换
cv::cvtColor(img_1, img_1, COLOR_BGR2RGB);
Mat Input_1;
resize(img_1, Input_1, Size(width, height));
通道分离
//vector<Mat> channels;
//split(Input_1, channels);
//Mat imageBlueChannel;
//Mat imageGreenChannel;
//Mat imageRedChannel;
//imageBlueChannel = channels.at(0);
//imageGreenChannel = channels.at(1);
//imageRedChannel = channels.at(2);
//channels.at(0) = imageRedChannel;
//channels.at(2) = imageBlueChannel;
//merge(channels, Input_1);
std::vector<const char*> input_node_names = { "input_layer" };
std::vector<const char*> output_node_names = { "add_3" };
std::array<UINT8, width * height *channel> input_image_1{};
UINT8* input_1 = input_image_1.data();
for (int i = 0; i < Input_1.rows; i++) {
for (int j = 0; j < Input_1.cols; j++) {
for (int c = 0; c < 3; c++)
{
//NHWC 格式
if (c == 0)
input_1[i*Input_1.cols * 3 + j * 3 + c] = Input_1.ptr<uchar>(i)[j * 3 + 2];
if (c == 1)
input_1[i*Input_1.cols * 3 + j * 3 + c] = Input_1.ptr<uchar>(i)[j * 3 + 1];
if (c == 2)
input_1[i*Input_1.cols * 3 + j * 3 + c] = Input_1.ptr<uchar>(i)[j * 3 + 0];
//NCHW 格式
// if (c == 0)
// input_1[c*imgSource.rows*imgSource.cols + i * imgSource.cols + j] = imgSource.ptr<uchar>(i)[j * 3 + 2]/255.0;
// if (c == 1)
// input_1[c*imgSource.rows*imgSource.cols + i * imgSource.cols + j] = imgSource.ptr<uchar>(i)[j * 3 + 1]/255.0;
// if (c == 2)
// input_1[c*imgSource.rows*imgSource.cols + i * imgSource.cols + j] = imgSource.ptr<uchar>(i)[j * 3 + 0]/255.0;
}
}
}
std::vector<Ort::Value> input_tensors;
input_tensors.push_back(Ort::Value::CreateTensor<UINT8>(
memory_info, input_1, input_image_1.size(), input_shape_.data(), input_shape_.size()));
auto output_tensors = session.Run(Ort::RunOptions{ nullptr },
input_node_names.data(), //输入节点名
input_tensors.data(), //input tensors
input_tensors.size(), //1
output_node_names.data(), //输出节点名
output_node_names.size()); //1
float* output = output_tensors[0].GetTensorMutableData<float>();
Mat output_1 = Mat::zeros(Size(256, 256), CV_8UC3);
//for (int i = 0; i < output_1.rows; i++) {
// for (int j = 0; j < output_1.cols; j++) {
// for (int c = 0; c < 3; c++)
// {
// //NHWC 格式
// if (c == 0)
// {
// output_1.at<Vec3f>(i, j)[2] = int(output[i*output_1.cols * 3 + j * 3 + c]); cout << output_1.at<Vec3f>(i, j)[2] << " ";
// }
//
// else if (c == 1)
// {
// output_1.at<Vec3f>(i, j)[1] = int(output[i*output_1.cols * 3 + j * 3 + c]); cout << output_1.at<Vec3f>(i, j)[2] << " ";
// }
// else if (c == 2)
// {
// output_1.at<Vec3f>(i, j)[0] = int(output[i*output_1.cols * 3 + j * 3 + c]); cout << output_1.at<Vec3f>(i, j)[2] << " ";
// }
//
// }
// cout << endl;
// }
//}
for (int i = 0; i < output_1.rows; i++) {
for (int j = 0; j < output_1.cols; j++) {
for (int c = 0; c < 3; c++)
{
//NHWC 格式
if (c == 0)
{
output_1.ptr<UINT8>(i)[j * 3 + 2] = UINT8(output[i*output_1.cols * 3 + j * 3 + c]);
}
else if (c == 1)
{
output_1.ptr<UINT8>(i)[j * 3 + 1] = UINT8(output[i*output_1.cols * 3 + j * 3 + c]);
}
else if (c == 2)
{
output_1.ptr<UINT8>(i)[j * 3 + 0] = UINT8(output[i*output_1.cols * 3 + j * 3 + c]);
}
}
}
}
cout << "tun to here" << endl;
cv::imshow("input", Input_1);
cv::imshow("output", output_1);
cv::waitKey(0);
}
去噪效果感觉不行,主要是跑网络模型啦:
c接口 将版本设成10再导成dll可不依赖源码
const char* inputNames[] = { "input_layer" }; //输入节点名
const char* outputNames[] = { "add_3" }; //输出节点名
int inputNodeNum = 1;//输入节点个数
int outputNodeNum = 1; //输出节点个数
int input_w = 256;//模型输入width
int input_h = 256;//模型输入height
//H*W*C
size_t model_input_ele_count = input_h * input_w * 3;
//input data
UINT8* model_input = (UINT8*)malloc(sizeof(UINT8) * model_input_ele_count);
//{N,C,H,W}
int64_t input_shape[4];
input_shape[0] = 1;
input_shape[1] = input_h;
input_shape[2] = input_w;
input_shape[3] = 3;
size_t input_shape_len = 4;//dim = 4
size_t model_input_len = model_input_ele_count * sizeof(UINT8);
//初始化OrtApi
const OrtApi* g_ort = nullptr;
//g_ort = OrtGetApiBase()->GetApi(ORT_API_VERSION); //这里得用10
g_ort = OrtGetApiBase()->GetApi(10);
//初始化OrtEnv
OrtEnv* env;
g_ort->CreateEnv(ORT_LOGGING_LEVEL_WARNING, "test", &env);
//初始化session_options
OrtSessionOptions* session_options;
g_ort->CreateSessionOptions(&session_options);
//初始化session
OrtSession* session;
g_ort->CreateSession(env, model_path, session_options, &session);
//预处理
cv::resize(srcimg, srcimg, Size(input_w, input_h));
//BGR2RGB
for (int i = 0; i < input_h; i++) {
for (int j = 0; j < input_w; j++) {
model_input[i * input_w + j + 0] = srcimg.at<cv::Vec3b>(i, j)[2];//R
model_input[i * input_w + j + 1 * input_w * input_h] = srcimg.at<cv::Vec3b>(i, j)[1];//G
model_input[i * input_w + j + 2 * input_w * input_h] = srcimg.at<cv::Vec3b>(i, j)[0];//B
}
}
//input_tensor
OrtValue* input_tensor = NULL;
OrtMemoryInfo* memory_info = NULL;
g_ort->CreateCpuMemoryInfo(OrtArenaAllocator, OrtMemTypeDefault, &memory_info);
g_ort->CreateTensorWithDataAsOrtValue(memory_info, model_input, model_input_len, input_shape, input_shape_len, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8, &input_tensor);
assert(input_tensor != NULL);
//推理
OrtValue* output_tensor = NULL;
g_ort->Run(session, NULL, inputNames, &input_tensor, inputNodeNum, outputNames, outputNodeNum, &output_tensor);
assert(output_tensor != NULL);
//后处理
std::vector<float> result;
//获取output的shape
struct OrtTensorTypeAndShapeInfo* shape_info;
g_ort->GetTensorTypeAndShape(output_tensor, &shape_info);
//获取output的dim
size_t dim_count;
g_ort->GetDimensionsCount(shape_info, &dim_count);
//std::cout<< dim_count << std::endl;
//获取output的shape
int64_t dims[2];
g_ort->GetDimensions(shape_info, dims, sizeof(dims) / sizeof(dims[0]));
//std::cout<< dims[0] << "," << dims[1] << std::endl;
//取output数据
float* f;
auto onnx_status = g_ort->GetTensorMutableData(output_tensor, (void**)&f);
float max_val = *max_element(f, f + input_h * input_w * 3);
float min_val = *min_element(f, f + input_h * input_w * 3);
float scale = max_val - min_val;
Mat output = Mat::zeros(Size(input_h, input_w), CV_8UC3);
//BGR2RGB
for (int i = 0; i < input_h; i++) {
for (int j = 0; j < input_w; j++) {
output.at<cv::Vec3b>(i, j)[2] = UINT8((f[i * input_h + j + 0] - min_val) / scale * 255);
output.at<cv::Vec3b>(i, j)[1] = UINT8((f[i * input_h + j + 1 * input_h * input_w] - min_val) / scale * 255);
output.at<cv::Vec3b>(i, j)[0] = UINT8((f[i * input_h + j + 2 * input_h * input_w] - min_val) / scale * 255);
}
}
cv::imshow("res", output);
cv::waitKey(0);
cv::destroyAllWindows();
refer: https://blog.csdn.net/qq_42995327/article/details/122622222