从github上下载最新源码,修改顶层cmakelist如下,添加交叉编译选项,准备好libjpeg等库
set (CMAKE_C_COMPILER "/home/chaosi/gcc-linaro-7.5.0-2019.12-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc")
set (CMAKE_CXX_COMPILER "home/chaosi/gcc-linaro-7.5.0-2019.12-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-g++")
依次执行以下命令即可编译生成可以在其嵌入式linux板卡上运行的程序
cd mjpg-streamer-experimental
mkdir _build
cd _build
cmake -DENABLE_HTTP_MANAGEMENT=ON ..
make
sudo make install
首先列出程序框架:
global结构体就是上图中的global_buffer,充当输入和输出的中转站
struct _globals {
int stop;
/* input plugin */
input in[MAX_INPUT_PLUGINS];
int incnt;
/* output plugin */
output out[MAX_OUTPUT_PLUGINS];
int outcnt;
};
调用input_init,在input_run函数中会分配buf空间
用context描述每个摄像头
context *pctx;
pglobal = param->global;//指针指向全局的global仓库
pglobal->in[id].context = pctx; //pglobal的私有数据
调用init_videoIn初始化struct vdIn *videoIn;并分配以下两个和摄像头采集相关的缓冲区
if(init_videoIn(pctx->videoIn, dev, width, height, fps, format, 1, pctx->pglobal, id, tvnorm)
struct vdIn {
unsigned char *tmpbuffer;
unsigned char *framebuffer;
};
然后执行input_run函数创建摄像头线程,由此可见,input_uvc是mjpg_streamer和v4l2uvc之间的桥梁
int input_run(int id)
{
input * in = &pglobal->in[id]; //获得上层指针,以取出包含摄像头描述的结构体
context *pctx = (context*)in->context;
/* create thread and pass context to thread function */
pthread_create(&(pctx->threadID), NULL, cam_thread, in);
在cam_thread中获取每一帧图像,如果不是jpeg格式就转换,如果是jpeg格式就直接copy到global的buf中
if(uvcGrab(pcontext->videoIn) < 0) { //错误会设置退出信号
DBG("copying frame from input: %d\n", (int)pcontext->id);
//从v4l2uvc中struct vdIn的buf拷贝图像至global仓库
pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbytesused);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
待续