opencv常用函数
获取像素值
image.at<uchar>(j,i)= value;
image.at<cv::Vec3b>(j,i)[channel]= value;
getPerspectiveTransform和perspectiveTransform
getPerspectiveTransform和perspectiveTransform函数配合使用,目的是获取经过变化矩阵的图片
Mat getPerspectiveTransform(const Point2f src[], const Point2f dst[])
输入是原图和对应图点的xy坐标
void perspectiveTransform(InputArray src, OutputArray dst, InputArray m)
输入是原图坐标和getPerspectiveTransform函数的输出,输入是对应的转化图的坐标点
int main( )
{
Mat img=imread("boy.png");
int img_height = img.rows;
int img_width = img.cols;
vector<Point2f> corners(4);
corners[0] = Point2f(0,0);
corners[1] = Point2f(img_width-1,0);
corners[2] = Point2f(0,img_height-1);
corners[3] = Point2f(img_width-1,img_height-1);
vector<Point2f> corners_trans(4);
corners_trans[0] = Point2f(150,250);
corners_trans[1] = Point2f(771,0);
corners_trans[2] = Point2f(0,img_height-1);
corners_trans[3] = Point2f(650,img_height-1);
Mat transform = getPerspectiveTransform(corners,corners_trans);
cout<<transform<<endl;
vector<Point2f> ponits, points_trans;
for(int i=0;i<img_height;i++){
for(int j=0;j<img_width;j++){
ponits.push_back(Point2f(j,i));
}
}
perspectiveTransform( ponits, points_trans, transform);
Mat img_trans = Mat::zeros(img_height,img_width,CV_8UC3);
int count = 0;
for(int i=0;i<img_height;i++){
uchar* p = img.ptr<uchar>(i);
for(int j=0;j<img_width;j++){
int y = points_trans[count].y;
int x = points_trans[count].x;
uchar* t = img_trans.ptr<uchar>(y);
t[x*3] = p[j*3];
t[x*3+1] = p[j*3+1];
t[x*3+2] = p[j*3+2];
count++;
}
}
imwrite("boy_trans.png",img_trans);
return 0;
}
yolo预处理
cv::Mat cropped(input_size, input_size, CV_8UC3, 127);
int resize_w,resize_h;
if(srcResize.cols>srcResize.rows)
{
resize_w=input_size;
resize_h=float (input_size)/srcResize.cols*srcResize.rows;
} else
{
resize_h=input_size;
resize_w=float(input_size)/srcResize.rows*srcResize.cols;
}
Mat myresize;
resize(srcResize, myresize, cv::Size(resize_w, resize_h));
myresize.copyTo(cropped(cv::Rect(0,0,myresize.cols,myresize.rows)));
for(int i=0;i<temp_result.size();i++)
{
int colectx[4]={temp_result[i].boxPoint[0].x,temp_result[i].boxPoint[1].x,temp_result[i].boxPoint[2].x,temp_result[i].boxPoint[3].x};
int colecty[4]={temp_result[i].boxPoint[0].y,temp_result[i].boxPoint[1].y,temp_result[i].boxPoint[2].y,temp_result[i].boxPoint[3].y};
int left = int(*std::min_element(colectx, colectx + 4));
int right = int(*std::max_element(colectx, colectx + 4));
int top = int(*std::min_element(colecty, colecty + 4));
int bottom = int(*std::max_element(colecty, colecty + 4));
cv::Rect rect(left, top, right - left, bottom - top);
cv::rectangle(cropped, rect, color, 2);
均值和方差
tofloat
std::vector<float> substractMeanNormalize(cv::Mat &src, const float *meanVals, const float *normVals,const float *stdmVals) {
auto inputTensorSize = src.cols * src.rows * src.channels();
std::vector<float> inputTensorValues(inputTensorSize);
size_t numChannels = src.channels();
size_t imageSize = src.cols * src.rows;
for (size_t pid = 0; pid < imageSize; pid++) {
for (size_t ch = 0; ch < numChannels; ++ch) {
float data = (float) ((src.data[pid * numChannels + ch] * normVals[ch] - meanVals[ch]) / stdmVals[ch]);
inputTensorValues[ch * imageSize + pid] = data;
}
}
return inputTensorValues;
}
tomat
std::vector<float> mean_value{0.406, 0.456, 0.485};
std::vector<float> std_value{0.225, 0.224, 0.229};
cv::Mat src, dst;
std::vector<cv::Mat> bgrChannels(3);
cv::split(src, bgrChannels);
for (auto i = 0; i < bgrChannels.size(); i++)
{
bgrChannels[i].convertTo(bgrChannels[i], CV_32FC1, 1.0 / std_value[i], (0.0 - mean_value[i]) / std_value[i]);
}
cv::meger(bgrChannels, dst);
element
int main()
{
// std::vector<float> mean_value{0.406, 0.456, 0.485};
// std::vector<float> std_value{0.225, 0.224, 0.229};
std::vector<float> mean_value{0.406*255, 0.456*255, 0.485*255};
std::vector<float> std_value{0.225*255, 0.224*255, 0.229*255};
// std::vector<float> std_value{1, 1, 1};
cv::Mat src, dst;
src=imread("/disk3/c++/cmake-build-debug/00.jpg");
imwrite("1.jpg",src);
cv::Mat_<cv::Vec3b>::iterator it = src.begin<cv::Vec3b>();
// obtain end position
cv::Mat_<cv::Vec3b>::iterator itend = src.end<cv::Vec3b>();
// loop over all pixels
for ( ; it!= itend; ++it)
{
for(int j=0;j<3;j++)
{
// process each pixel ---------------------
(*it)[j] = ((*it)[j] - mean_value[j])/std_value[j];
}
}
imwrite("3.jpg",src);
return 0;
}
void colorReduce2(cv::Mat &image, int div=64)
{
int nl = image.rows; // number of lines
int nc = image.cols * image.channels();
if (image.isContinuous())
{
// then no padded pixels
nc = nc * nl;
nl = 1; // it is now a 1D array
}
// this loop is executed only once
// in case of continuous images
for (int j = 0; j < nl; j++)
{
uchar* data = image.ptr<uchar>(j);
for (int i = 0; i < nc; i++)
{
// process each pixel ---------------------
data[i] = data[i] / div * div + div / 2;
// end of pixel processing ----------------
} // end of line
}
}
RotatedRect
temp_rect=RotatedRect(Point2f(output[0],output[1]),Size2f(output[2],output[3]),angle_id);
Point2f vertices[4];
temp_rect.points(vertices);