转:差分法求运动轮廓

该博客介绍了一种利用OpenCV库进行运动轮廓检测的方法,通过差分法结合帧差来跟踪运动物体。主要涉及图像处理步骤包括颜色转换、绝对差分、阈值处理、轮廓查找等操作。代码中展示了从摄像头捕获图像,然后处理并显示运动成分的流程。
摘要由CSDN通过智能技术生成
#include "stdafx.h" #include "cv.h" // includes OpenCV definitions #include "highgui.h" // includes highGUI definitions #include "cvcam.h" #include #include // includes C standard input/output definitions using namespace std; // various tracking parameters (in seconds) const double MHI_DURATION = 1; const double MAX_TIME_DELTA = 0.5; const double MIN_TIME_DELTA = 0.05; // number of cyclic frame buffer used for motion detection // (should, probably, depend on FPS) const int N = 4; // ring image buffer IplImage **buf = 0; int last = 0; // temporary images IplImage *mhi = 0; // MHI IplImage *orient = 0; // orientation IplImage *mask = 0; // valid orientation mask IplImage *segmask = 0; // motion segmentation map CvMemStorage* storage = 0; // temporary storage IplImage* abs_image = 0; IplImage* add_abs_image = 0; IplImage* abs_images[3]; IplImage* grey =0; IplImage* pre_grey = 0; IplImage* dst = 0; CvSeq* contour = 0; int test( IplImage* src,IplImage* pre_src ); int _tmain(int argc, _TCHAR* argv[]) { // Determine the number of available cameras int numCameras = cvcamGetCamerasCount() ; cout << "=========================================" << endl ; cout << "== Located devices: =>" << numCameras << "<=" << endl ; cout << "=========================================" << endl ; // Make sure that a camera is attached if( numCameras == 0 ) { getchar(); return -1; } IplImage* pre_image = 0; IplImage* image = 0; int frame_count =0; CvCapture * capture=cvCaptureFromCAM( CV_CAP_ANY ); if( capture ) { printf( "=> OK/n"); } else { fprintf(stderr,"ERROR: capture is NULL /n"); getchar(); return -1; } cvNamedWindow( "Source", 1 ); cvNamedWindow( "Components", 1 ); for(;;) { if( !cvGrabFrame( capture )) break; image = cvRetrieveFrame( capture ); if (!pre_image) { pre_image = cvCreateImage( cvGetSize(image), 8, 3 ); abs_image = cvCreateImage( cvGetSize(image), 8, 1 ); add_abs_image = cvCreateImage( cvGetSize(image), 8, 1 ); grey = cvCreateImage( cvGetSize(image), 8, 1 ); pre_grey = cvCreateImage( cvGetSize(image), 8, 1 ); dst = cvCreateImage( cvGetSize(image), 8, 3 ); abs_image->origin = image->origin; add_abs_image->origin = image->origin; dst->origin = image->origin; storage = cvCreateMemStorage(0); abs_images[0] = cvCreateImage( cvGetSize(image), 8, 1 ); abs_images[1] = cvCreateImage( cvGetSize(image), 8, 1 ); abs_images[2] = cvCreateImage( cvGetSize(image), 8, 1 ); abs_images[3] = cvCreateImage( cvGetSize(image), 8, 1 ); cvZero(abs_images[0]); cvZero(abs_images[1]); cvZero(abs_images[2]); cvZero(abs_images[3]); } frame_count++; test(image,pre_image); cvCopy(abs_images[frame_count%3],abs_image,0); cvCopy(image,pre_image,0); if( cvWaitKey(10) >= 0 ) break; } cvcamStop(); cvcamExit(); // Release the capture device housekeeping cvDestroyWindow("src"); cvReleaseCapture(&capture); return 0; } int test( IplImage* src,IplImage* pre_src ) { cvCvtColor(src, grey, CV_BGR2GRAY); cvZero(add_abs_image); cvCvtColor(pre_src, pre_grey, CV_BGR2GRAY); cvAbsDiff( grey,pre_grey, abs_image ); cvThreshold( abs_image, abs_image, 20, 255, CV_THRESH_BINARY ); cvAdd(abs_images[0],abs_image,add_abs_image); cvThreshold( add_abs_image, add_abs_image, 10, 255, CV_THRESH_BINARY ); cvAdd(abs_images[1],add_abs_image,add_abs_image); cvThreshold( add_abs_image, add_abs_image, 10, 255, CV_THRESH_BINARY ); cvAdd(abs_images[2],add_abs_image,add_abs_image); cvThreshold( add_abs_image, add_abs_image, 10, 255, CV_THRESH_BINARY ); cvAdd(abs_images[3],add_abs_image,add_abs_image); cvThreshold( add_abs_image, add_abs_image, 10, 255, CV_THRESH_BINARY ); //cvWaitKey(0); cvFindContours( abs_image, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); cvZero( dst ); for( ; contour != 0; contour = contour->h_next ) { CvScalar color = CV_RGB( 255, 255, 255 ); /* replace CV_FILLED with 1 to see the outlines */ cvDrawContours( dst, contour, color, color, -1, CV_FILLED, 8 ); } //cvNamedWindow( "Source", 1 ); cvShowImage( "Source", src ); //cvNamedWindow( "Components", 1 ); cvShowImage( "Components", dst ); //cvWaitKey(0); return 0; }
  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
帧间差分法(Interframe differencing)通常用于视频流中估计帧之间的运动,从而推算物体的加速度。在Python中,我们可以使用OpenCV库来实现这个过程。以下是一个简单的例子,展示如何使用帧间差分计算帧速率和加速度: ```python import cv2 import numpy as np # 假设video_cap是已经打开的视频文件或摄像头 video_cap = cv2.VideoCapture('your_video.mp4') # 初始化帧数和时间 frame_count = 0 start_time = cv2.getTickCount() # 获取前两帧 ret, frame1 = video_cap.read() ret, frame2 = video_cap.read() while ret: # 计算帧差 diff = cv2.absdiff(frame1, frame2) gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(gray, 30, 255, cv2.THRESH_BINARY) # 对帧差进行膨胀运算以消除噪声 kernel = np.ones((3, 3), np.uint8) dilated = cv2.dilate(thresh, kernel, iterations=2) # 找到运动区域的轮廓 contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 如果没有发现运动,跳过这一帧 if not contours: frame1 = frame2 frame2 = video_cap.read() continue # 计算加速度 (假设每一帧间隔是固定的) frame_count += 1 elapsed_time = (cv2.getTickCount() - start_time) / cv2.getTickFrequency() if elapsed_time > 1: # 如果超过一秒,重新计算帧率和加速度 fps = frame_count / elapsed_time frame_rate = 1 / fps # 帧率 # 在这里可以根据实际需计算加速度,但通常需要连续帧的运动信息 # 加速度计算可能涉及二次差分或更复杂的算法,例如使用Kalman滤波器 # 加速度 = (current_speed - previous_speed) / frame_rate start_time = cv2.getTickCount() frame_count = 0 frame1 = frame2 frame2 = video_cap.read() ret, frame1 = video_cap.read() # 结束后关闭视频流 video_cap.release()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值