主要思路
主要分为三步:
一、调用手机摄像头,获取实时的图像数据
二、利用JNI将实时图像传到C++端进行相关处理
三、在c++部分处理二维物体跟踪用来两个方法互补来保证跟踪的效果:
1 、利用ORB进行物体的特征点的提取
2、 根据ORB提取的特征点进行LK光流跟踪
3、利用BRIFE进行校准->如果在校准过程中发现特征点跟踪丢失 或者出现部分特征点出现跟踪缺失的情况,则重新进行ORB对于特征点进行提取
手机摄像头实时图像获取
初始化摄像头
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_camera);
// 动态权限
if (Build.VERSION.SDK_INT >= buildVersion) {
ActivityCompat.requestPermissions(this,
new String[]{Manifest.permission.CAMERA, Manifest.permission.WRITE_EXTERNAL_STORAGE},
requestPermissionId);
}
// 设置窗口
getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
// 初始化相机
mCameraView = findViewById(R.id.cv_camera);
mCameraView.setVisibility(SurfaceView.VISIBLE);
mCameraView.setCvCameraViewListener(this);
// 开始预览
mCameraView.setCameraIndex(0);
mCameraView.enableView();
mCameraView.enableFpsMeter();
// 切换摄像头方法
RadioButton backOption = findViewById(R.id.backCameraOption);
backOption.setChecked(true);
backOption.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
cameraId = (cameraId + 1) % 2;
cameraSwitch(cameraId);
}
});
Switch btHistogram = findViewById(R.id.sw_color1);
btHistogram.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
process = (process + 1) % 2;
}
});
// Switch btGray = findViewById(R.id.sw_color1);
// btGray.setOnClickListener(new View.OnClickListener() {
// @Override
// public void onClick(View v) {
// colorToGray = (colorToGray + 1) % 2;
// }
// });
}
对于得到的帧数据进行处理
private Mat process(Mat frame) {
Bitmap image1 = BitmapFactory.decodeResource(this.getResources(), R.drawable.moss);
Mat mat1 = new Mat(image1.getHeight()/4,image1.getWidth()/4,CvType.CV_8UC4);
Utils.bitmapToMat(image1,mat1);
resize(mat1, mat1, new Size(200,200));
if(process==1)
{
new NDKInterface().trace11(frame.getNativeObjAddr(),mat1.getNativeObjAddr(),mGray.getNativeObjAddr());
return mGray;
}
return frame;
}
调用摄像头的结果
将获得的实时帧传送到后台C++端处理
这里的参数 srcAdd指的是实时帧的地址,dstAdd存放的是图片处理结果的地址
public class NDKInterface {
static{
System.loadLibrary("native-lib");
}
public native void getEdge(Object bitmap);
public native int colorToGray(long srcAdd, long dstAdd);
public native int histogram(long srcAdd,long dstAdd);
public native int OEBMatch(long srcAdd,long temAdd,long dstAdd);
public native void tem(long srcAdd,long dstAdd);
public native void opticalflow(long srcAdd,long dstAdd);
public native void trace(long srcAdd,long temAdd,long dstAdd);
public native void trace11(long nativeObjAddr, long nativeObjAddr1, long nativeObjAddr2);
public native void mtf(long nativeObjAddr, long nativeObjAddr1, long nativeObjAddr2);
}
ORB特征点提取以及特征匹配(特征模板与实时帧做匹配)
vector<Point2f> match(Mat &img_scene,Mat &dst,Mat &img_object){ //两个图片的特征点提取和特征点匹配
Ptr<ORB> detector = ORB::create();
std::vector<KeyPoint> keypoints_object, keypoints_scene;
Mat descriptors_object, descriptors_scene;
detector->detectAndCompute( img_object, Mat(), keypoints_object, descriptors_object );
detector->detectAndCompute( img_scene, Mat(), keypoints_scene, descriptors_scene );
BFMatcher matcher;
std::vector< DMatch> matches;
matcher.match(descriptors_object, descriptors_scene, matches);
double max_dist = 0;
double min_dist = 100;
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std::vector<DMatch> good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{
if( matches[i].distance <= 3*min_dist )
{
good_matches.push_back( matches[i]);
}
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++ )
{
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
std::vector<cv::Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows );
obj_corners[3] = cvPoint( 0, img_object.rows );
//std::vector<cv::Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_cornerssss, H);
line(img_scene, scene_cornerssss[0] , scene_cornerssss[1] , Scalar(0, 255, 0), 4);
line(img_scene, scene_cornerssss[1] , scene_cornerssss[2] , Scalar(0, 255, 0), 4);
line(img_scene, scene_cornerssss[2] , scene_cornerssss[3] , Scalar(0, 255, 0), 4);
line(img_scene, scene_cornerssss[3] , scene_cornerssss[0] , Scalar(0, 255, 0), 4);
return scene;
}
LK光流
/光流法 实现跟踪
JNIEXPORT void JNICALL
Java_com_example_bymyself_NDKInterface_opticalflow(JNIEnv *env, jobject thiz, jlong src_add,
jlong dst_add) {
// TODO: implement opticalflow()
Mat &frame = *(Mat *) src_add; //输入的帧frame(手机摄像头获得的图像信息)
Mat &output = *(Mat *) dst_add; //输出的帧(处理以后画出跟踪的帧)
//此句代码的OpenCV3版为:
cvtColor(frame, gray, COLOR_BGR2GRAY);
frame.copyTo(output);
// 添加特征点(得到可以跟踪的点)
if (addNewPoints())
{
goodFeaturesToTrack(gray, features, maxCount, qLevel, minDist); //得到可以跟踪的点,结果放在features之中
points[0].insert(points[0].end(), features.begin(), features.end()); //points[0]是特征点原来的位置,将可以跟踪的点放入特征点原来的特征点的集合之中
initial.insert(initial.end(), features.begin(), features.end()); //同理 对初始跟踪点也做相应操作
}
if (gray_prev.empty()) //判断一下预测的图片是否为空
{
gray.copyTo(gray_prev); //如果为空就将初始的图片赋值给预测的图片
}
// L-k光流法运动估计
calcOpticalFlowPyrLK(gray_prev, gray, points[0], points[1], status, err); //进行L-K光流法的运动估计
// 去掉一些不好的特征点
int k = 0;
for (size_t i = 0; i < points[1].size(); i++)
{
if (acceptTrackedPoint(i))
{
initial[k] = initial[i];
points[1][k++] = points[1][i];
}
}
points[1].resize(k);
initial.resize(k);
// 显示特征点和运动轨迹
for (size_t i = 0; i < points[1].size(); i++)
{
line(output, initial[i], points[1][i], Scalar(0, 0, 255));
circle(output, points[1][i], 3, Scalar(0, 255, 0), -1);
}
// 把当前跟踪结果作为下一次的参考
swap(points[1], points[0]);
swap(gray_prev, gray);
}
两者判断效果好坏,做结果分析
JNIEXPORT void JNICALL
Java_com_example_bymyself_NDKInterface_trace(JNIEnv *env, jobject thiz, jlong src_add, jlong tem_add,
jlong dst_add) {
Mat &frame = *(Mat *) src_add;
Mat &output = *(Mat *) dst_add;
Mat &img_object = *(Mat *) tem_add;
if (ready == true) {
feats = match(frame, output, img_object);
ready = false;
} else {
cvtColor(frame, gra, COLOR_BGR2GRAY);
frame.copyTo(output);
if (addNewPoint()) {
pq[0].insert(pq[0].end(), feats.begin(),feats.end()); //points[0]是特征点原来的位置,将可以跟踪的点放入特征点原来的特征点的集合之中
init.insert(init.end(), feats.begin(), feats.end()); //同理 对初始跟踪点也做相应操作
}
if (g_prev.empty()) //判断一下预测的图片是否为空
{
gra.copyTo(g_prev); //如果为空就将初始的图片赋值给预测的图片
}
calcOpticalFlowPyrLK(g_prev, gra, pq[0], pq[1], stat, err); //进行L-K光流法的运动估计,跟踪内部的点
calcOpticalFlowPyrLK(g_prev, gra, scene_cornerssss, site[0], stat1, err); //跟踪模板的边界点
if(flag)
{
good[0] = scene_cornerssss;
flag = false;
LOGD("4444444444444444444444444444444 is finished");
}
int k = 0;
for (size_t i = 0; i < pq[1].size(); i++) {
if (acceptTrackedPointss(i)) {
init[k] = init[i];
pq[1][k++] = pq[1][i];
}
}
pq[1].resize(k);
init.resize(k);
//显示特征点和运动轨迹
for (size_t i = 0; i < pq[1].size(); i++) {
line(output, init[i], pq[1][i], Scalar(0, 0, 255));
circle(output, pq[1][i], 3, Scalar(0, 255, 0), -1);
}
for(int i = 0;i < 4;i++)
{
if(stat1[i]== 0)
flags = false;
}
if (!getN(frame,img_object,good[0],site[0])&&flags) { //good[0]表示最好的匹配结果 site[0]表示跟踪的结果
line(output, site[0][0], site[0][1], Scalar(0, 255, 0), 4);
line(output, site[0][1], site[0][2], Scalar(0, 255, 0), 4);
line(output, site[0][2], site[0][3], Scalar(0, 255, 0), 4);
line(output, site[0][3], site[0][0], Scalar(0, 255, 0), 4);
// 把当前跟踪结果作为下一次的参考
feats = pq[1];
scene_cornerssss = site[0];
swap(pq[1], pq[0]);
swap(g_prev, gra);
LOGD("55555555555555555555555555555555555555555 is finished");
} else
{
line(output, site[0][0], site[0][1], Scalar(0, 255, 0), 4);
line(output, site[0][1], site[0][2], Scalar(0, 255, 0), 4);
line(output, site[0][2], site[0][3], Scalar(0, 255, 0), 4);
line(output, site[0][3], site[0][0], Scalar(0, 255, 0), 4);
LOGD("6666666666666666666666666666666666666666666666 is finished");
}
}
}