相关链接:
《 刚刚完成的一个进程通信及托管非托管混合编程的总结之概述》
《 刚刚完成的一个进程通信及托管非托管混合编程的总结之自定义数据》
《 刚刚完成的一个进程通信及托管非托管混合编程的总结之CFileMapping》
FrameProcessor这个程序,由于是控制台程序,所以主要的代码都在FrameProcessor.cpp中。首先是一些预定义、包含:
然后是一些全局变量:
下面是对视频帧进行运算、录像的函数,可以看到其参数均为托管代码类型。另外,可以看到,全局变量中定义了两个数组px和pxRecord,它们都是用来保存位图的像素数据的,下面的函数中也相应地用它们分别建立了两个Bitmap对象lastFrame和frameToBeRecorded。从它们的名字和相关处理代码可以看到,lastFrame是用来做画矩形处理以及传递给motionDetector->ProcessFrame(lastFrame)实现移动侦测的,而frameToBeRecorded则只是用在writer->AddFrame(frameToBeRecorded)中实现录像的。为什么要分开做呢?因为发现motionDetector->ProcessFrame(lastFrame)会改变lastFrame的内容,如果直接用lastFrame进行录像的画,画面上会有一些不必要的信息,所以只好多用些内存,牺牲一点点性能,单独保存一个frameToBeRecorded用来录像。
最后是main函数,其中实现了消息机制。这里我没有用默认生成的int _tmain(int argc, _TCHAR* argv[]),而是改成了void main(),只是为了简化吧。
进程A中的原理与此类似,区别仅在于以CMainFrame的成员变量的形式定义了这里的全局变量,而不同步骤也分散在不同的成员函数中,不再赘述。
《 刚刚完成的一个进程通信及托管非托管混合编程的总结之概述》
《 刚刚完成的一个进程通信及托管非托管混合编程的总结之自定义数据》
《 刚刚完成的一个进程通信及托管非托管混合编程的总结之CFileMapping》
FrameProcessor这个程序,由于是控制台程序,所以主要的代码都在FrameProcessor.cpp中。首先是一些预定义、包含:
#include
"
stdafx.h
"
#include " FileMapping.h "
#pragma managed
// #using "System.Drawing.dll"
// #using "VideoMonitorLib.dll"
// 上面两个dll改为在工程属性中引用
using namespace MotionDetector;
using namespace Avi;
using namespace System;
using namespace System::IO;
using namespace System::Drawing;
using namespace System::Drawing::Imaging;
using namespace System::Runtime::InteropServices;
#include " FileMapping.h "
#pragma managed
// #using "System.Drawing.dll"
// #using "VideoMonitorLib.dll"
// 上面两个dll改为在工程属性中引用
using namespace MotionDetector;
using namespace Avi;
using namespace System;
using namespace System::IO;
using namespace System::Drawing;
using namespace System::Drawing::Imaging;
using namespace System::Runtime::InteropServices;
然后是一些全局变量:
DWORD dwCaptureTID;
DWORD dwProcessorTID;
CFileMapping myFileMapping;
char * buffer = new char [ sizeof (FRAME)];
FRAME * pFrame = new FRAME();
BYTE * px = new BYTE[ 3145728 ]; // 声明暂存数组
BYTE * pxRecord = new BYTE[ 3145728 ]; // 声明暂存数组
TCHAR szPath[MAX_PATH];
bool blnIsRecording;
long nMINFRAMES;
long nRecordedFrames;
int nTimeOutFrames = 0 ;
DWORD dwProcessorTID;
CFileMapping myFileMapping;
char * buffer = new char [ sizeof (FRAME)];
FRAME * pFrame = new FRAME();
BYTE * px = new BYTE[ 3145728 ]; // 声明暂存数组
BYTE * pxRecord = new BYTE[ 3145728 ]; // 声明暂存数组
TCHAR szPath[MAX_PATH];
bool blnIsRecording;
long nMINFRAMES;
long nRecordedFrames;
int nTimeOutFrames = 0 ;
下面是对视频帧进行运算、录像的函数,可以看到其参数均为托管代码类型。另外,可以看到,全局变量中定义了两个数组px和pxRecord,它们都是用来保存位图的像素数据的,下面的函数中也相应地用它们分别建立了两个Bitmap对象lastFrame和frameToBeRecorded。从它们的名字和相关处理代码可以看到,lastFrame是用来做画矩形处理以及传递给motionDetector->ProcessFrame(lastFrame)实现移动侦测的,而frameToBeRecorded则只是用在writer->AddFrame(frameToBeRecorded)中实现录像的。为什么要分开做呢?因为发现motionDetector->ProcessFrame(lastFrame)会改变lastFrame的内容,如果直接用lastFrame进行录像的画,画面上会有一些不必要的信息,所以只好多用些内存,牺牲一点点性能,单独保存一个frameToBeRecorded用来录像。
int
ProcessNewFrame(MotionDetector3Optimized
^
motionDetector, AVIWriter
^
writer, String
^
path)
{
int nResult;
myFileMapping.Read(buffer);
memcpy(pFrame,buffer,sizeof(FRAME));
nTimeOutFrames=0;
int nWidth=pFrame->image.bmWidth;
int nHeigh=pFrame->image.bmHeight;
int nBytesPerPix=pFrame->image.bmBitsPixel/8;
PixelFormat pf;
switch(pFrame->image.bmBitsPixel)
{
case 8:
{
pf=PixelFormat::Format8bppIndexed;
break;
}
case 16:
{
pf=PixelFormat::Format16bppRgb565;
break;
}
case 24:
{
pf=PixelFormat::Format24bppRgb;
break;
}
case 32:
{
pf=PixelFormat::Format32bppRgb;
break;
}
default:
{
pf=PixelFormat::Format24bppRgb;
break;
}
}
memcpy(px,pFrame->bit,nWidth*nHeigh*nBytesPerPix);
memcpy(pxRecord,pFrame->bit,nWidth*nHeigh*nBytesPerPix);
Bitmap ^ lastFrame=gcnew Bitmap(nWidth,nHeigh,pFrame->image.bmWidthBytes,PixelFormat::Format24bppRgb,IntPtr(px));
Bitmap ^ frameToBeRecorded=gcnew Bitmap(nWidth,nHeigh,pFrame->image.bmWidthBytes,pf,IntPtr(pxRecord));
// draw the rectangle
if (pFrame->rect.width > 0 && pFrame->rect.height > 0) //有矩形
{
System::Drawing::Rectangle rc(pFrame->rect.x,pFrame->rect.y,pFrame->rect.width,pFrame->rect.height);
lastFrame = lastFrame->Clone(rc, System::Drawing::Imaging::PixelFormat::DontCare);
}
// apply motion detector
motionDetector->ProcessFrame(lastFrame);
// check motion level
if (motionDetector->MotionLevel >= pFrame->fMotionLevel)
{
// lets save the frame
if(!blnIsRecording)
{
DateTime date = DateTime::Now;
String ^ fileName = String::Format(path+"/{0}-{1}-{2} {3}-{4}-{5}.avi",
date.Year, date.Month, date.Day, date.Hour, date.Minute, date.Second);
// open AVI file
try
{
writer->Open(fileName, nWidth, nHeigh);
}
catch (ApplicationException ^ e)
{
Console::WriteLine(e->ToString());
nResult= FRMRST_ERROR;
}
nRecordedFrames=0;
blnIsRecording=true;
}
try
{
writer->AddFrame(frameToBeRecorded);
}
catch (ApplicationException ^ e)
{
Console::WriteLine(e->ToString());
nResult=FRMRST_ERROR;
}
nRecordedFrames++;
nResult=FRMRST_MOTION;
}
else
{
if (blnIsRecording)
if (nRecordedFrames<nMINFRAMES)
{
try
{
writer->AddFrame(frameToBeRecorded);
}
catch (ApplicationException ^ e)
{
Console::WriteLine(e->ToString());
return FRMRST_ERROR;
}
nRecordedFrames++;
}
else
{
writer->Close();
blnIsRecording=false;
}
nResult=FRMRST_NOMOTION;
}
delete frameToBeRecorded;
delete lastFrame;
return nResult;
}
{
int nResult;
myFileMapping.Read(buffer);
memcpy(pFrame,buffer,sizeof(FRAME));
nTimeOutFrames=0;
int nWidth=pFrame->image.bmWidth;
int nHeigh=pFrame->image.bmHeight;
int nBytesPerPix=pFrame->image.bmBitsPixel/8;
PixelFormat pf;
switch(pFrame->image.bmBitsPixel)
{
case 8:
{
pf=PixelFormat::Format8bppIndexed;
break;
}
case 16:
{
pf=PixelFormat::Format16bppRgb565;
break;
}
case 24:
{
pf=PixelFormat::Format24bppRgb;
break;
}
case 32:
{
pf=PixelFormat::Format32bppRgb;
break;
}
default:
{
pf=PixelFormat::Format24bppRgb;
break;
}
}
memcpy(px,pFrame->bit,nWidth*nHeigh*nBytesPerPix);
memcpy(pxRecord,pFrame->bit,nWidth*nHeigh*nBytesPerPix);
Bitmap ^ lastFrame=gcnew Bitmap(nWidth,nHeigh,pFrame->image.bmWidthBytes,PixelFormat::Format24bppRgb,IntPtr(px));
Bitmap ^ frameToBeRecorded=gcnew Bitmap(nWidth,nHeigh,pFrame->image.bmWidthBytes,pf,IntPtr(pxRecord));
// draw the rectangle
if (pFrame->rect.width > 0 && pFrame->rect.height > 0) //有矩形
{
System::Drawing::Rectangle rc(pFrame->rect.x,pFrame->rect.y,pFrame->rect.width,pFrame->rect.height);
lastFrame = lastFrame->Clone(rc, System::Drawing::Imaging::PixelFormat::DontCare);
}
// apply motion detector
motionDetector->ProcessFrame(lastFrame);
// check motion level
if (motionDetector->MotionLevel >= pFrame->fMotionLevel)
{
// lets save the frame
if(!blnIsRecording)
{
DateTime date = DateTime::Now;
String ^ fileName = String::Format(path+"/{0}-{1}-{2} {3}-{4}-{5}.avi",
date.Year, date.Month, date.Day, date.Hour, date.Minute, date.Second);
// open AVI file
try
{
writer->Open(fileName, nWidth, nHeigh);
}
catch (ApplicationException ^ e)
{
Console::WriteLine(e->ToString());
nResult= FRMRST_ERROR;
}
nRecordedFrames=0;
blnIsRecording=true;
}
try
{
writer->AddFrame(frameToBeRecorded);
}
catch (ApplicationException ^ e)
{
Console::WriteLine(e->ToString());
nResult=FRMRST_ERROR;
}
nRecordedFrames++;
nResult=FRMRST_MOTION;
}
else
{
if (blnIsRecording)
if (nRecordedFrames<nMINFRAMES)
{
try
{
writer->AddFrame(frameToBeRecorded);
}
catch (ApplicationException ^ e)
{
Console::WriteLine(e->ToString());
return FRMRST_ERROR;
}
nRecordedFrames++;
}
else
{
writer->Close();
blnIsRecording=false;
}
nResult=FRMRST_NOMOTION;
}
delete frameToBeRecorded;
delete lastFrame;
return nResult;
}
最后是main函数,其中实现了消息机制。这里我没有用默认生成的int _tmain(int argc, _TCHAR* argv[]),而是改成了void main(),只是为了简化吧。
void
main()
{
dwProcessorTID=::GetCurrentThreadId();
myFileMapping.Initialize((LPCTSTR)("frame"),sizeof(FRAME));
myFileMapping.Read(buffer);
memcpy(pFrame,buffer,sizeof(FRAME));
if(pFrame->flags.flgProcessorRunning)
{
pFrame->flags.flgProcessorRunning=false;
myFileMapping.Write((char*)pFrame);
return;
}
dwCaptureTID=pFrame->CaptureTID;
pFrame->flags.flgProcessorRunning=true;
pFrame->ProcessorTID=dwProcessorTID;
myFileMapping.Write((char*)pFrame);
::PostThreadMessage(dwCaptureTID, USR_WM_PROCSESSOR_OK, (WPARAM)0, (LPARAM)0);
nRecordedFrames=0;
blnIsRecording=false;
MotionDetector3Optimized ^ motionDetector=gcnew MotionDetector3Optimized();
motionDetector->MotionLevelCalculation = true;
AVIWriter ^ writer=gcnew AVIWriter("wmv3");
writer->FrameRate = 5;
nMINFRAMES=writer->FrameRate*10;
GetModuleFileName(NULL, szPath, sizeof(szPath) / sizeof(szPath[0]));
if (_tcsrchr(szPath, _T('/')) != NULL)
*_tcsrchr(szPath, _T('/')) = _T('');
String ^ path=gcnew String(szPath);
//*发送此消息启动capture写新帧。capture在收到 USR_WM_NO_MOTION或者USR_WM_MOTION_DETECTED后才会发送新的视频帧数据,发送成功后再向processor发送USR_WM_NEW_FRAME消息,提示processor有新帧*/
Sleep(3000);
::PostThreadMessage(dwCaptureTID, USR_WM_NO_MOTION, (WPARAM)0, (LPARAM)0);
while(1)
{
MSG msg;
if (::PeekMessage(&msg, NULL, USR_WM_MIN, USR_WM_MAX, PM_REMOVE))
{
if (msg.message == USR_WM_NEW_FRAME) //新帧
{
int nRst=ProcessNewFrame(motionDetector, writer, path);
if(FRMRST_ERROR==nRst)
{ ::PostThreadMessage(dwCaptureTID, USR_WM_PROCESSOR_ERROR, (WPARAM)0, (LPARAM)0);
break;
}
if (FRMRST_MOTION==nRst)
{ ::PostThreadMessage(dwCaptureTID, USR_WM_MOTION_DETECTED, (WPARAM)0, (LPARAM)0);
}
if (FRMRST_NOMOTION==nRst)
{ ::PostThreadMessage(dwCaptureTID, USR_WM_NO_MOTION, (WPARAM)0, (LPARAM)0);
}
}
if (msg.message == USR_WM_TERMINATE) //终止
{
break;
}
}
//检测是否超时(用超时帧),若超时,说明capture已停止,退出
nTimeOutFrames++;
if (!(pFrame->flags.flgCaptureRunning) && nTimeOutFrames>=TIME_OUT_FRAMES) break;
::PostThreadMessage(dwCaptureTID, USR_WM_NO_MOTION, (WPARAM)0, (LPARAM)0);
pFrame->flags.flgProcessorRunning=true;
pFrame->flags.flgCaptureRunning=false;
myFileMapping.Write((char*)pFrame);
Sleep(5);
}
writer->Close();
pFrame->flags.flgProcessorRunning=false;
myFileMapping.Write((char*)pFrame);
return;
}
{
dwProcessorTID=::GetCurrentThreadId();
myFileMapping.Initialize((LPCTSTR)("frame"),sizeof(FRAME));
myFileMapping.Read(buffer);
memcpy(pFrame,buffer,sizeof(FRAME));
if(pFrame->flags.flgProcessorRunning)
{
pFrame->flags.flgProcessorRunning=false;
myFileMapping.Write((char*)pFrame);
return;
}
dwCaptureTID=pFrame->CaptureTID;
pFrame->flags.flgProcessorRunning=true;
pFrame->ProcessorTID=dwProcessorTID;
myFileMapping.Write((char*)pFrame);
::PostThreadMessage(dwCaptureTID, USR_WM_PROCSESSOR_OK, (WPARAM)0, (LPARAM)0);
nRecordedFrames=0;
blnIsRecording=false;
MotionDetector3Optimized ^ motionDetector=gcnew MotionDetector3Optimized();
motionDetector->MotionLevelCalculation = true;
AVIWriter ^ writer=gcnew AVIWriter("wmv3");
writer->FrameRate = 5;
nMINFRAMES=writer->FrameRate*10;
GetModuleFileName(NULL, szPath, sizeof(szPath) / sizeof(szPath[0]));
if (_tcsrchr(szPath, _T('/')) != NULL)
*_tcsrchr(szPath, _T('/')) = _T('');
String ^ path=gcnew String(szPath);
//*发送此消息启动capture写新帧。capture在收到 USR_WM_NO_MOTION或者USR_WM_MOTION_DETECTED后才会发送新的视频帧数据,发送成功后再向processor发送USR_WM_NEW_FRAME消息,提示processor有新帧*/
Sleep(3000);
::PostThreadMessage(dwCaptureTID, USR_WM_NO_MOTION, (WPARAM)0, (LPARAM)0);
while(1)
{
MSG msg;
if (::PeekMessage(&msg, NULL, USR_WM_MIN, USR_WM_MAX, PM_REMOVE))
{
if (msg.message == USR_WM_NEW_FRAME) //新帧
{
int nRst=ProcessNewFrame(motionDetector, writer, path);
if(FRMRST_ERROR==nRst)
{ ::PostThreadMessage(dwCaptureTID, USR_WM_PROCESSOR_ERROR, (WPARAM)0, (LPARAM)0);
break;
}
if (FRMRST_MOTION==nRst)
{ ::PostThreadMessage(dwCaptureTID, USR_WM_MOTION_DETECTED, (WPARAM)0, (LPARAM)0);
}
if (FRMRST_NOMOTION==nRst)
{ ::PostThreadMessage(dwCaptureTID, USR_WM_NO_MOTION, (WPARAM)0, (LPARAM)0);
}
}
if (msg.message == USR_WM_TERMINATE) //终止
{
break;
}
}
//检测是否超时(用超时帧),若超时,说明capture已停止,退出
nTimeOutFrames++;
if (!(pFrame->flags.flgCaptureRunning) && nTimeOutFrames>=TIME_OUT_FRAMES) break;
::PostThreadMessage(dwCaptureTID, USR_WM_NO_MOTION, (WPARAM)0, (LPARAM)0);
pFrame->flags.flgProcessorRunning=true;
pFrame->flags.flgCaptureRunning=false;
myFileMapping.Write((char*)pFrame);
Sleep(5);
}
writer->Close();
pFrame->flags.flgProcessorRunning=false;
myFileMapping.Write((char*)pFrame);
return;
}
进程A中的原理与此类似,区别仅在于以CMainFrame的成员变量的形式定义了这里的全局变量,而不同步骤也分散在不同的成员函数中,不再赘述。