C# 使用OnnxRuntime 部署yolov11 (Detection)
C# ONNXRuntime部署yoloV11
链接: https://pan.baidu.com/s/1RM4wTkaPn4efwOfS7HpgXw?pwd=1234
提取码: 1234
一 安装Yolo,导出onnx模型
pip install ultralytics
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load an official model
# Export the model
model.export(format="onnx")
二 安装所需包OpenCvsharp和Microsoft.ML.OnnxRuntime
三 设计界面
四 定义变量和辅助函数Transpose、辅助类DetectionResult
public unsafe float[] Transpose(float[] tensorData, int rows, int cols)
{
float[] transposedTensorData = new float[tensorData.Length];
fixed (float* pTensorData = tensorData)
{
fixed (float* pTransposedData = transposedTensorData)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
int index = i * cols + j;
int transposedIndex = j * rows + i;
pTransposedData[transposedIndex] = pTensorData[index];
}
}
}
}
return transposedTensorData;
}
string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
string image_path = "";
string model_path;
string classer_path;
public string[] class_names;
public int class_num;
DateTime dt1 = DateTime.Now;
DateTime dt2 = DateTime.Now;
int input_height;
int input_width;
float ratio_height;
float ratio_width;
InferenceSession onnx_session;
int box_num;
float conf_threshold;
float nms_threshold;
public class DetectionResult
{
public DetectionResult(int ClassId, string Class, Rect Rect, float Confidence)
{
this.ClassId = ClassId;
this.Confidence = Confidence;
this.Rect = Rect;
this.Class = Class;
}
public string Class { get; set; }
public int ClassId { get; set; }
public float Confidence { get; set; }
public Rect Rect { get; set; }
}
五 加载模型和图像和label.txt
private void Form1_Load(object sender, EventArgs e)
{
model_path = @"D:\\workplace\\repos\\WindowsFormsApp12\\WindowsFormsApp12\\model\\yolo11n.onnx";
//创建输出会话,用于输出模型读取信息
SessionOptions options = new SessionOptions();
options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_INFO;
options.AppendExecutionProvider_CPU(0);// 设置为CPU上运行
// 创建推理模型类,读取模型文件
onnx_session = new InferenceSession(model_path, options);//model_path 为onnx模型文件的路径
input_height = 640;
input_width = 640;
box_num = 8400;
conf_threshold = 0.25f;
nms_threshold = 0.5f;
classer_path = @"D:\\workplace\\repos\\WindowsFormsApp12\\WindowsFormsApp12\\model\\label.txt";
class_names = File.ReadAllLines(classer_path, Encoding.UTF8);
class_num = class_names.Length;
image_path = "D:/cat.png";
pictureBox1.Image = new Bitmap(image_path);
}
六 推理
private void button2_Click(object sender, EventArgs e)
{
if (image_path == "")
{
return;
}
button2.Enabled = false;
pictureBox2.Image = null;
textBox1.Text = "";
Application.DoEvents();
Mat image = new Mat(image_path);
//图片缩放
int height = image.Rows;
int width = image.Cols;
Mat temp_image = image.Clone();
if (height > input_height || width > input_width)
{
float scale = Math.Min((float)input_height / height, (float)input_width / width);
OpenCvSharp.Size new_size = new OpenCvSharp.Size((int)(width * scale), (int)(height * scale));
Cv2.Resize(image, temp_image, new_size);
}
ratio_height = (float)height / temp_image.Rows;
ratio_width = (float)width / temp_image.Cols;
Mat input_img = new Mat();
Cv2.CopyMakeBorder(temp_image, input_img, 0, input_height - temp_image.Rows, 0, input_width - temp_image.Cols, BorderTypes.Constant, 0);
//Cv2.ImShow("input_img", input_img);
//输入Tensor
Tensor<float> input_tensor = new DenseTensor<float>(new[] { 1, 3, 640, 640 });
for (int y = 0; y < input_img.Height; y++)
{
for (int x = 0; x < input_img.Width; x++)
{
input_tensor[0, 0, y, x] = input_img.At<Vec3b>(y, x)[0] / 255f;
input_tensor[0, 1, y, x] = input_img.At<Vec3b>(y, x)[1] / 255f;
input_tensor[0, 2, y, x] = input_img.At<Vec3b>(y, x)[2] / 255f;
}
}
List<NamedOnnxValue> input_container = new List<NamedOnnxValue>
{
NamedOnnxValue.CreateFromTensor("images", input_tensor)
};
//推理
dt1 = DateTime.Now;
var ort_outputs = onnx_session.Run(input_container).ToArray();
dt2 = DateTime.Now;
float[] data = Transpose(ort_outputs[0].AsTensor<float>().ToArray(), 4 + class_num, box_num);
float[] confidenceInfo = new float[class_num];
float[] rectData = new float[4];
List<DetectionResult> detResults = new List<DetectionResult>();
for (int i = 0; i < box_num; i++)
{
Array.Copy(data, i * (class_num + 4), rectData, 0, 4);
Array.Copy(data, i * (class_num + 4) + 4, confidenceInfo, 0, class_num);
float score = confidenceInfo.Max(); // 获取最大值
int maxIndex = Array.IndexOf(confidenceInfo, score); // 获取最大值的位置
int _centerX = (int)(rectData[0] * ratio_width);
int _centerY = (int)(rectData[1] * ratio_height);
int _width = (int)(rectData[2] * ratio_width);
int _height = (int)(rectData[3] * ratio_height);
detResults.Add(new DetectionResult(
maxIndex,
class_names[maxIndex],
new Rect(_centerX - _width / 2, _centerY - _height / 2, _width, _height),
score));
}
//NMS
CvDnn.NMSBoxes(detResults.Select(x => x.Rect), detResults.Select(x => x.Confidence), conf_threshold, nms_threshold, out int[] indices);
detResults = detResults.Where((x, index) => indices.Contains(index)).ToList();
//绘制结果
Mat result_image = image.Clone();
foreach (DetectionResult r in detResults)
{
Cv2.PutText(result_image, $"{r.Class}:{r.Confidence:P0}", new OpenCvSharp.Point(r.Rect.TopLeft.X, r.Rect.TopLeft.Y - 10), HersheyFonts.HersheySimplex, 1, Scalar.Red, 2);
Cv2.Rectangle(result_image, r.Rect, Scalar.Red, thickness: 2);
}
pictureBox2.Image = new Bitmap(result_image.ToMemoryStream());
textBox1.Text = "推理耗时:" + (dt2 - dt1).TotalMilliseconds + "ms";
button2.Enabled = true;
}