1. 如何将mjpg-streamer移植到开发板上
文件系统:fs_mini_mdev_new_auto_wifi_ap.tar.bz2
(1) libjpeg 的移植
tar xzvf libjpeg-turbo-1.2.1.tar.gz
cd libjpeg-turbo-1.2.1
mkdir tmp
./configure --prefix=/work/jz2440/libjpeg-turbo-1.2.1/tmp --host=arm-linux
make
make install
cp /work/jz2440/libjpeg-turbo-1.2.1/tmp/lib/*so* /work/nfs_root/fs_mini_mdev_new/lib/ -d
(2) mjpg-streamer 的移植:
tar xvf mjpg-streamer-r63.tar.gz
cd mjpg-streamer-r63
修改所有的Makefile
--(1) 将 CC=gcc 修改为 CC=arm-linux-gcc
--(2) 修改plugins/input_uvc/Makfile
a. 将
CFLAGS += -O2 -DLINUX -D_GNU_SOURCE -Wall -shared -fPIC
改为
CFLAGS += -O2 -DLINUX -D_GNU_SOURCE -Wall -shared -fPIC -I /work/jz2440/libjpeg-turbo-1.2.1/tmp/include
注意:
-I /work/jz2440/libjpeg-turbo-1.2.1/tmp/include // 是编译libjpeg 生成的文件
b. 将
input_uvc.so: $(OTHER_HEADERS) input_uvc.c v4l2uvc.lo jpeg_utils.lo dynctrl.lo
$(CC) $(CFLAGS) -ljpeg -o $@ input_uvc.c v4l2uvc.lo
jpeg_utils.lo dynctrl.lo
改为
input_uvc.so: $(OTHER_HEADERS) input_uvc.c v4l2uvc.lo jpeg_utils.lo dynctrl.lo
$(CC) $(CFLAGS) -ljpeg -L /work/jz2440/libjpeg-turbo-1.2.1/tmp/lib -o
$@ input_uvc.c v4l2uvc.lo jpeg_utils.lo dynctrl.lo
make
cp mjpg_streamer /work/nfs_root/fs_mini_mdev_new/bin/
cp *so* /work/nfs_root/fs_mini_mdev_new/lib/ -d
2.分析源码
input_init();
output_init();
input_run();
output_run();
struct _globals { //全局结构体
int stop;
/* signal fresh frames */
pthread_mutex_t db;
pthread_cond_t db_update;
/* global JPG frame, this is more or less the "database" */
unsigned char *buf;
int size;
/* input plugin */
input in;
/* output plugin */
output out[MAX_OUTPUT_PLUGINS];
int outcnt;
};
struct vdIn {
int fd;
char *videodevice;
char *status;
char *pictName;
struct v4l2_capability cap;
struct v4l2_format fmt;
struct v4l2_buffer buf;
struct v4l2_requestbuffers rb;
void *mem[NB_BUFFER];
unsigned char *tmpbuffer;
unsigned char *framebuffer;
int isstreaming;
int grabmethod;
int width;
int height;
int fps;
int formatIn;
int formatOut;
int framesizeIn;
int signalquit;
int toggleAvi;
int getPict;
int rawFrameCapture;
/* raw frame capture */
unsigned int fileCounter;
/* raw frame stream capture */
unsigned int rfsFramesWritten;
unsigned int rfsBytesWritten;
/* raw stream capture */
FILE *captureFile;
unsigned int framesWritten;
unsigned int bytesWritten;
int framecount;
int recordstart;
int recordtime;
};
mjpg_streamer.c
main(int argc, char *argv[])
pthread_mutex_init(&global.db, NULL)
pthread_cond_init(&global.db_update, NULL)
填充全局结构体里参数
input_init(input_parameter *param) --param = global.in.param
解析参数
init_videoIn
struct vdIn *vd 设置vdIn结构体
init_v4l2
open(vd->videodevice, O_RDWR) 打开设备节点
ioctl(vd->fd, VIDIOC_QUERYCAP, &vd->cap) 查询设备支持的功能
ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt) 设置摄像头的输出格式(分辨率、输出格式(MJPEG/YUV))
ioctl(vd->fd, VIDIOC_S_PARM, setfps) 设置摄像头参数,比如输出帧率
ioctl(vd->fd, VIDIOC_REQBUFS, &vd->rb) 申请缓存
ioctl(vd->fd, VIDIOC_QUERYBUF, &vd->buf) 获取内核空间的视频缓冲区的信息
vd->mem[i] = mmap(0 /* start anywhere */ , 做映射操作
vd->buf.length, PROT_READ, MAP_SHARED, vd->fd,
vd->buf.m.offset);
ioctl(vd->fd, VIDIOC_QBUF, &vd->buf) 投放一个空的视频缓冲区到视频缓冲区队列中
vd->framebuffer = (unsigned char *) calloc 分配一个临时缓冲区,用于接收摄像头数据
input_run --pglobal = param->global
pglobal->buf = malloc(videoIn->framesizeIn); 给仓库分配内存空间
pthread_create(&cam, 0, cam_thread, NULL); 创建线程
cam_thread
while( !pglobal->stop )
{
uvcGrab(videoIn)
ioctl(vd->fd, VIDIOC_STREAMON, &type) 使能视频设备
ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf) 从缓冲区队列,取出有数据的缓存区
memcpy(vd->tmpbuffer, vd->mem[vd->buf.index] ... 拷贝数据 --
或 memcpy (vd->framebuffer, vd->mem[vd->buf.index] ...
ioctl(vd->fd, VIDIOC_QBUF, &vd->buf) 投放一个空的视频缓冲区到视频缓冲区队列中
pthread_mutex_lock( &pglobal->db );
pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer
, videoIn->buf.bytesused) 数据最终存放到 pglobal->buf
pthread_cond_broadcast(&pglobal->db_update); 激活所有等待线程
pthread_mutex_unlock( &pglobal->db );
}
output_init(output_parameter *param) --param = global.out.param 该函数只是解析参数,然后给相应的变量赋值
servers[param->id].
output_run ----pglobal = param->global
pthread_create(&(servers[id].threadID), NULL, server_thread, &(servers[id]));
server_thread(context *pcontext) 创建线程
pcontext->sd = socket(PF_INET, SOCK_STREAM, 0)
bind(pcontext->sd, (struct sockaddr*)&addr, sizeof(addr))
listen(pcontext->sd, 10)
while ( !pglobal->stop )
{
pcfd->fd = accept(pcontext->sd, (struct sockaddr *)&client_addr, &addr_len) 阻塞直到有客户端连接
pthread_create(&client, NULL, &client_thread, pcfd)
client_thread
从客服端接收一些数据,用来表示客服端发来的请求
_readline(lcfd.fd, &iobuf, buffer, sizeof(buffer)-1, 5)
strstr(buffer, "GET /?action=stream") 解析得到的数据
req.type = A_STREAM
...
case A_STREAM:
send_stream(lcfd.fd);
while ( !pglobal->stop )
{
pthread_cond_wait(&pglobal->db_update, &pglobal->db)
memcpy(frame, pglobal->buf, frame_size) 从仓库中取出一帧数据
write(fd, buffer, strlen(buffer) 发送一帧图片
write(fd, "BOUNDARY" ... 一帧图片结束标志
}
}
3.自己写客服端
(1).发送一个请求字符串
"GET /?action=snapshot\n"
"GET /?action=stream\n"
"GET /?action=command\n"
(2).再发送一次字符串
如果我们不使用密码功能!则只需发送任意长度为小于2字节的字符串,比如:
"f\n"
如果发送的请求是:"GET /?action=snapshot\n"
(3).需要接收一次字符串(是服务器发过来的报文)
(4).接收一帧图片
如果发送的请求是:"GET /?action=stream\n"
(3).需要接收一次字符串(是服务器发过来的报文)
while(1)
{
(4).再接收一次报文,解析它,得到一帧图片的大小(size)
(5).接收size个字节的数据
}
文件系统:fs_mini_mdev_new_auto_wifi_ap.tar.bz2
(1) libjpeg 的移植
tar xzvf libjpeg-turbo-1.2.1.tar.gz
cd libjpeg-turbo-1.2.1
mkdir tmp
./configure --prefix=/work/jz2440/libjpeg-turbo-1.2.1/tmp --host=arm-linux
make
make install
cp /work/jz2440/libjpeg-turbo-1.2.1/tmp/lib/*so* /work/nfs_root/fs_mini_mdev_new/lib/ -d
(2) mjpg-streamer 的移植:
tar xvf mjpg-streamer-r63.tar.gz
cd mjpg-streamer-r63
修改所有的Makefile
--(1) 将 CC=gcc 修改为 CC=arm-linux-gcc
--(2) 修改plugins/input_uvc/Makfile
a. 将
CFLAGS += -O2 -DLINUX -D_GNU_SOURCE -Wall -shared -fPIC
改为
CFLAGS += -O2 -DLINUX -D_GNU_SOURCE -Wall -shared -fPIC -I /work/jz2440/libjpeg-turbo-1.2.1/tmp/include
注意:
-I /work/jz2440/libjpeg-turbo-1.2.1/tmp/include // 是编译libjpeg 生成的文件
b. 将
input_uvc.so: $(OTHER_HEADERS) input_uvc.c v4l2uvc.lo jpeg_utils.lo dynctrl.lo
$(CC) $(CFLAGS) -ljpeg -o $@ input_uvc.c v4l2uvc.lo
jpeg_utils.lo dynctrl.lo
改为
input_uvc.so: $(OTHER_HEADERS) input_uvc.c v4l2uvc.lo jpeg_utils.lo dynctrl.lo
$(CC) $(CFLAGS) -ljpeg -L /work/jz2440/libjpeg-turbo-1.2.1/tmp/lib -o
$@ input_uvc.c v4l2uvc.lo jpeg_utils.lo dynctrl.lo
make
cp mjpg_streamer /work/nfs_root/fs_mini_mdev_new/bin/
cp *so* /work/nfs_root/fs_mini_mdev_new/lib/ -d
2.分析源码
input_init();
output_init();
input_run();
output_run();
struct _globals { //全局结构体
int stop;
/* signal fresh frames */
pthread_mutex_t db;
pthread_cond_t db_update;
/* global JPG frame, this is more or less the "database" */
unsigned char *buf;
int size;
/* input plugin */
input in;
/* output plugin */
output out[MAX_OUTPUT_PLUGINS];
int outcnt;
};
struct vdIn {
int fd;
char *videodevice;
char *status;
char *pictName;
struct v4l2_capability cap;
struct v4l2_format fmt;
struct v4l2_buffer buf;
struct v4l2_requestbuffers rb;
void *mem[NB_BUFFER];
unsigned char *tmpbuffer;
unsigned char *framebuffer;
int isstreaming;
int grabmethod;
int width;
int height;
int fps;
int formatIn;
int formatOut;
int framesizeIn;
int signalquit;
int toggleAvi;
int getPict;
int rawFrameCapture;
/* raw frame capture */
unsigned int fileCounter;
/* raw frame stream capture */
unsigned int rfsFramesWritten;
unsigned int rfsBytesWritten;
/* raw stream capture */
FILE *captureFile;
unsigned int framesWritten;
unsigned int bytesWritten;
int framecount;
int recordstart;
int recordtime;
};
mjpg_streamer.c
main(int argc, char *argv[])
pthread_mutex_init(&global.db, NULL)
pthread_cond_init(&global.db_update, NULL)
填充全局结构体里参数
input_init(input_parameter *param) --param = global.in.param
解析参数
init_videoIn
struct vdIn *vd 设置vdIn结构体
init_v4l2
open(vd->videodevice, O_RDWR) 打开设备节点
ioctl(vd->fd, VIDIOC_QUERYCAP, &vd->cap) 查询设备支持的功能
ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt) 设置摄像头的输出格式(分辨率、输出格式(MJPEG/YUV))
ioctl(vd->fd, VIDIOC_S_PARM, setfps) 设置摄像头参数,比如输出帧率
ioctl(vd->fd, VIDIOC_REQBUFS, &vd->rb) 申请缓存
ioctl(vd->fd, VIDIOC_QUERYBUF, &vd->buf) 获取内核空间的视频缓冲区的信息
vd->mem[i] = mmap(0 /* start anywhere */ , 做映射操作
vd->buf.length, PROT_READ, MAP_SHARED, vd->fd,
vd->buf.m.offset);
ioctl(vd->fd, VIDIOC_QBUF, &vd->buf) 投放一个空的视频缓冲区到视频缓冲区队列中
vd->framebuffer = (unsigned char *) calloc 分配一个临时缓冲区,用于接收摄像头数据
input_run --pglobal = param->global
pglobal->buf = malloc(videoIn->framesizeIn); 给仓库分配内存空间
pthread_create(&cam, 0, cam_thread, NULL); 创建线程
cam_thread
while( !pglobal->stop )
{
uvcGrab(videoIn)
ioctl(vd->fd, VIDIOC_STREAMON, &type) 使能视频设备
ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf) 从缓冲区队列,取出有数据的缓存区
memcpy(vd->tmpbuffer, vd->mem[vd->buf.index] ... 拷贝数据 --
或 memcpy (vd->framebuffer, vd->mem[vd->buf.index] ...
ioctl(vd->fd, VIDIOC_QBUF, &vd->buf) 投放一个空的视频缓冲区到视频缓冲区队列中
pthread_mutex_lock( &pglobal->db );
pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer
, videoIn->buf.bytesused) 数据最终存放到 pglobal->buf
pthread_cond_broadcast(&pglobal->db_update); 激活所有等待线程
pthread_mutex_unlock( &pglobal->db );
}
output_init(output_parameter *param) --param = global.out.param 该函数只是解析参数,然后给相应的变量赋值
servers[param->id].
output_run ----pglobal = param->global
pthread_create(&(servers[id].threadID), NULL, server_thread, &(servers[id]));
server_thread(context *pcontext) 创建线程
pcontext->sd = socket(PF_INET, SOCK_STREAM, 0)
bind(pcontext->sd, (struct sockaddr*)&addr, sizeof(addr))
listen(pcontext->sd, 10)
while ( !pglobal->stop )
{
pcfd->fd = accept(pcontext->sd, (struct sockaddr *)&client_addr, &addr_len) 阻塞直到有客户端连接
pthread_create(&client, NULL, &client_thread, pcfd)
client_thread
从客服端接收一些数据,用来表示客服端发来的请求
_readline(lcfd.fd, &iobuf, buffer, sizeof(buffer)-1, 5)
strstr(buffer, "GET /?action=stream") 解析得到的数据
req.type = A_STREAM
...
case A_STREAM:
send_stream(lcfd.fd);
while ( !pglobal->stop )
{
pthread_cond_wait(&pglobal->db_update, &pglobal->db)
memcpy(frame, pglobal->buf, frame_size) 从仓库中取出一帧数据
write(fd, buffer, strlen(buffer) 发送一帧图片
write(fd, "BOUNDARY" ... 一帧图片结束标志
}
}
3.自己写客服端
(1).发送一个请求字符串
"GET /?action=snapshot\n"
"GET /?action=stream\n"
"GET /?action=command\n"
(2).再发送一次字符串
如果我们不使用密码功能!则只需发送任意长度为小于2字节的字符串,比如:
"f\n"
如果发送的请求是:"GET /?action=snapshot\n"
(3).需要接收一次字符串(是服务器发过来的报文)
(4).接收一帧图片
如果发送的请求是:"GET /?action=stream\n"
(3).需要接收一次字符串(是服务器发过来的报文)
while(1)
{
(4).再接收一次报文,解析它,得到一帧图片的大小(size)
(5).接收size个字节的数据
}