二、模型转化(pytorch 转 ONNX)
import torch
import models.crnn as crnn
model = crnn.CRNN(32, 1, 37, 256)
model.load_state_dict(torch.load("crnn.pth"))
dummy_input = torch.randn(1, 1, 32, 100)
torch.onnx.export(model, dummy_input, "crnn.onnx")
import cv2
import numpy as np
net = cv2.dnn.readNet("crnn.onnx")
inp = np.random.standard_normal([1, 1, 32, 100]).astype(np.float32)
net.setInput(inp)
out = net.forward()
print(out.shape)
三、Opencv部署代码
#include<iostream>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main()
{
dnn::Net net = dnn::readNet("crnn.onnx");
Mat in = imread("test.jpg",0);
in.convertTo(in,CV_32F,1.0/255);
in -= 0.5;
in /= 0.5;
resize(in,in,Size(100, 32)); // fixed !
Mat blob = dnn::blobFromImage(in);
net.setInput(blob);
Mat res = net.forward(); // 26 x 1 x 37
string alphabet = "0123456789abcdefghijklmnopqrstuvwxyz"; // 10 nums + 26 letters + 1 = 37
for (size_t r = 0; r < res.size[0]; r++) {
Mat slice = Mat(1,res.size[2],CV_32F,res.ptr<float>(r)); // 37 one-hot encoded pins
Point p; double m;
minMaxLoc(slice, 0, &m, 0, &p);
char c = p.x>0 ? alphabet[p.x-1] : '-'; // '-' == no detection
cout << r << "\t" << c << "\t" << p << "\t" << m << endl;
}
return 0;
}