嵌入式Linux视频服务器1

 

嵌入式LINUX视频服务器

 

/*采用H.264对视频数据进行编码,并且利用网络进行实时传输(UDP)*/

#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <stdlib.h>
#include <linux/types.h>
#include <linux/videodev.h>
#include <setjmp.h>
#include <string.h>
#include <signal.h>
#include <errno.h>
#include <sys/socket.h> 
#include <netinet/in.h>
#include "convert.h"

#include "./avc-src-0.14/avc/common/T264.h"

#define SERVER_PORT 8888 
//#define MAX_MSG_SIZE 176*144*3 
#define MAX_MSG_SIZE 320*240*3 
#define WB
 
T264_t* m_t264;
T264_param_t m_param;
char* m_pSrc;
char* m_pDst;
int* m_lDstSize;
char* m_pPoolData;


//WB
#define USB_VIDEO "/dev/video0"
int cam_fd;
const int bpp = 24;
struct video_mmap cam_mm;/*视频内存映射*/
/*包含摄像头的基本信息,例如设备名称、
支持的最大最小分辨率、信号源信息等*/
struct video_capability cam_cap;
/*亮度、对比度等和voide_mmap中的分辨率*/
struct video_picture cam_pic;
struct video_mbuf cam_mbuf;/*摄像头存储缓冲区的帧信息*/
struct video_window win;/* 设备采集窗口参数*/
char *cam_data = NULL;
int nframe;
static unsigned char cam_yuv[2457616];

void read_video(char *pixels,int w, int h)
{
  int ret;
  int frame=0; 
  cam_mm.width = w;
  cam_mm.height = h;
 /* 对于单帧采集只需设置 frame=0*/
  cam_mm.frame = 0;
  cam_mm.format=VIDEO_PALETTE_RGB24;
/*  若调用成功,则激活设备真正开始一帧图像的截取,是非阻塞的*/
  ret = ioctl(cam_fd,VIDIOCMCAPTURE,&cam_mm);
  if( ret<0 ) {
    printf("ERROR: VIDIOCMCAPTURE\n");
  }
/*  函数判断该帧图像是否截取完毕,成功返回表示截取完毕*/
  ret = ioctl(cam_fd,VIDIOCSYNC,&frame);
  if( ret<0 ) {
    printf("ERROR: VIDIOCSYNC\n");
  }
}

void config_vid_pic()
{
  char cfpath[100];
  FILE *cf;
  int ret;
  if (ioctl(cam_fd, VIDIOCGPICT, &cam_pic) < 0) {
    printf("ERROR:VIDIOCGPICT\n");
  }
  cam_pic.palette = VIDEO_PALETTE_RGB24;

  if( cf==NULL ) 
 {

    cam_pic.brightness  = 30464;
    cam_pic.hue =  111;
    cam_pic.colour =  555;
    cam_pic.contrast =  43312;
    cam_pic.whiteness =  111;
    cam_pic.depth =         24;
    ret = ioctl( cam_fd, VIDIOCSPICT,&cam_pic );      /*设置摄像头缓冲中voideo_picture信息*/

    if( ret<0 ) {
      close(cam_fd);
      printf("ERROR: VIDIOCSPICT,Can't set video_picture format\n");
    }
    return;
  }
  
//  fscanf(cf,"%d",&cam_pic.brightness);
//  fscanf(cf,"%d",&cam_pic.hue);
//  fscanf(cf,"%d",&cam_pic.colour);
//  fscanf(cf,"%d",&cam_pic.contrast);
//  fscanf(cf,"%d",&cam_pic.whiteness);
//  fclose( cf );
//  ret = ioctl( cam_fd, VIDIOCSPICT,&cam_pic );      /*设置摄像头缓冲中voideo_picture信息*/
 // if( ret<0 ) {
 //   close(cam_fd);
 //   errexit("ERROR: VIDIOCSPICT,Can't set video_picture format\n");
 // }
  
}


void init_video(int w,int h,int bpp) /* bpp == bytes per pixel*/
{
  int ret;
  cam_fd = open( USB_VIDEO, O_RDWR );
  if( cam_fd<0 )
    printf("Can't open video device\n");

  ret = ioctl( cam_fd,VIDIOCGCAP,&cam_cap );       /* 摄像头的基本信息*/
  if( ret<0 ) {
    printf("Can't get device information: VIDIOCGCAP\n");
  }
  printf("Device name:%s\nWidth:%d ~ %d\nHeight:%d ~ %d\n",cam_cap.name, cam_cap.maxwidth, cam_cap.minwidth, cam_cap.maxheight, cam_cap.minheight);
  if( ioctl(cam_fd,VIDIOCGWIN,&win)<0 ) {
    printf("ERROR:VIDIOCGWIN\n");
  }
  win.x = 0;
  win.y = 0;
  win.width=w;
  win.height=h;
  if (ioctl(cam_fd, VIDIOCSWIN, &win) < 0) {
    printf("ERROR:VIDIOCSWIN\n");
  }

  config_vid_pic();  
  
  ret = ioctl(cam_fd,VIDIOCGMBUF,&cam_mbuf);/*函数获得摄像头存储缓冲区的帧信息*/
  if( ret<0 ) {
    printf("ERROR:VIDIOCGMBUF,Can't get video_mbuf\n");
  }
  printf("Frames:%d\n",cam_mbuf.frames);
  nframe = cam_mbuf.frames;
/*接着把摄像头对应的设备文件映射到内存区*/
  cam_data = (char*)mmap(0, cam_mbuf.size, PROT_READ|PROT_WRITE,MAP_SHARED,cam_fd,0); //采用mmap方式共享采集的数据
  if( cam_data == MAP_FAILED ) {
    printf("ERROR:mmap\n");
  }
  printf("Buffer size:%d\nOffset:%d\n",cam_mbuf.size,cam_mbuf.offsets[0]);
  InitLookupTable();
  
}


void init_param(T264_param_t* param, const char* file)
{
 int total_no;
 FILE* fd; 
 char line[255];
 int32_t b;
 if (!(fd = fopen(file,"r")))
 {
  printf("Couldn't open parameter file %s.\n", file);
  exit(-1);
 }

 memset(param, 0, sizeof(*param));
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 if (b != 4)
 {
  printf("wrong param file version, expect v4.0\n");
  exit(-1);
 }
 fgets(line, 254, fd); sscanf(line,"%d", &param->width);
 fgets(line, 254, fd); sscanf(line,"%d", &param->height);
 fgets(line, 254, fd); sscanf(line,"%d", &param->search_x);
 fgets(line, 254, fd); sscanf(line,"%d", &param->search_y);
 fgets(line, 254, fd); sscanf(line,"%d", &total_no);
 fgets(line, 254, fd); sscanf(line,"%d", &param->iframe);
 fgets(line, 254, fd); sscanf(line,"%d", &param->idrframe);
 fgets(line, 254, fd); sscanf(line,"%d", &param->b_num);
 fgets(line, 254, fd); sscanf(line,"%d", &param->ref_num);
 fgets(line, 254, fd); sscanf(line,"%d", &param->enable_rc);
 fgets(line, 254, fd); sscanf(line,"%d", &param->bitrate);
 fgets(line, 254, fd); sscanf(line,"%f", &param->framerate);
 fgets(line, 254, fd); sscanf(line,"%d", &param->qp);
 fgets(line, 254, fd); sscanf(line,"%d", &param->min_qp);
 fgets(line, 254, fd); sscanf(line,"%d", &param->max_qp);
 fgets(line, 254, fd); sscanf(line,"%d", &param->enable_stat);
 fgets(line, 254, fd); sscanf(line,"%d", &param->disable_filter);
 fgets(line, 254, fd); sscanf(line,"%d", &param->aspect_ratio);
 fgets(line, 254, fd); sscanf(line,"%d", &param->video_format);
 fgets(line, 254, fd); sscanf(line,"%d", &param->luma_coeff_cost);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_INTRA16x16) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_INTRA4x4) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_INTRAININTER) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_HALFPEL) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_QUARTPEL) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_SUBBLOCK) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_FULLSEARCH) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_DIAMONDSEACH) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_FORCEBLOCKSIZE) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_FASTINTERPOLATE) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_SAD) * b;
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_EXTRASUBPELSEARCH) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->flags |= (USE_SCENEDETECT) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_16x16P) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_16x8P) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_8x16P) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_8x8P) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_8x4P) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_4x8P) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_4x4P) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_16x16B) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_16x8B) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_8x16B) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &b);
 param->block_size |= (SEARCH_8x8B) * (!!b);
 fgets(line, 254, fd); sscanf(line,"%d", &param->cpu);
 fgets(line, 254, fd); sscanf(line, "%d", &param->cabac);

 //  fgets(line, 254, fd); sscanf(line,"%s", src_path);
 //  fgets(line, 254, fd); sscanf(line,"%s", out_path);
 //  fgets(line, 254, fd); sscanf(line,"%s", rec_path);
 //  param->rec_name = rec_path;

 fclose(fd);
}


void init_encoder()
{
 //编码准备
 const char* paramfile = "fastspeed.txt";
 init_param(&m_param, paramfile);
 m_param.direct_flag = 1;
 m_t264 = T264_open(&m_param);
 m_lDstSize  = m_param.height * m_param.width + (m_param.height * m_param.width >> 1);
// m_pSrc = (uint8_t*)T264_malloc(m_lDstSize, CACHE_SIZE);
 m_pDst = (uint8_t*)T264_malloc(m_lDstSize, CACHE_SIZE);
 m_pPoolData = malloc(m_param.width*m_param.height*3/2); 
}
 
void udps_respon(int sockfd,int w,int h) 
{ 
 struct sockaddr_in addrsrc;
 struct sockaddr_in addrdst; 
 int addrlen,n; 
 
 int32_t iActualLen;
 int row_stride = w*3*h/2;

 bzero(&addrdst,sizeof(struct sockaddr_in)); 
 addrdst.sin_family=AF_INET; 
 addrdst.sin_addr.s_addr=inet_addr("192.168.8.133");
 addrdst.sin_port=htons(SERVER_PORT);

 while(1) 
 {

  read_video(NULL,w,h);
  ConvertRGB2YUV(w,h,cam_data,cam_yuv);//将RGB彩色空间转换为YUV
  iActualLen = T264_encode(m_t264, cam_yuv, m_pDst, row_stride);//H.264编码
  printf("encoded:%d, %d bytes.\n",row_stride,iActualLen); 
  memcpy(m_pPoolData,&m_t264->frame_num,1);
  memcpy(m_pPoolData+1, m_pDst, iActualLen);
  iActualLen++;
  sendto(sockfd,m_pPoolData,iActualLen,0,(struct sockaddr*)&addrdst,sizeof(struct sockaddr_in));  //向客户端发送数据
 } 
}


void free_dev()
{
  printf("free device\n");
  close(cam_fd);
}

int main(void) 
{  
 int sockfd; 
 struct sockaddr_in addr;

 printf("start 2.0...\n");

 sockfd=socket(AF_INET,SOCK_DGRAM,0);//建立基于UDP的SOCKET

 if(sockfd<0) 
 {
  printf("0-");
  printf("Socket Error\n"); 
  exit(1); 
 }

 

 bzero(&addr,sizeof(struct sockaddr_in)); //初始化addr结构
 addr.sin_family=AF_INET; 
 addr.sin_addr.s_addr=htonl(INADDR_ANY); //网络字节序转换
 addr.sin_port=htons(SERVER_PORT); 
 
 if(bind(sockfd,(struct sockaddr *)&addr,sizeof(struct sockaddr_in))<0 ) 
 { 
  printf(stderr,"Bind Error:%s\n",strerror(errno)); 
  exit(1); 
 }

 init_encoder();

#ifdef WB
 atexit( &free_dev );
 init_video(m_param.width,m_param.height,bpp);
#endif

 udps_respon(sockfd,m_param.width,m_param.height); 
 
 close(sockfd);

 

深度学习是机器学习的一个子领域,它基于人工神经网络的研究,特别是利用多层次的神经网络来进行学习和模式识别。深度学习模型能够学习数据的高层次特征,这些特征对于图像和语音识别、自然语言处理、医学图像分析等应用至关重要。以下是深度学习的一些关键概念和组成部分: 1. **神经网络(Neural Networks)**:深度学习的基础是人工神经网络,它是由多个层组成的网络结构,包括输入层、隐藏层和输出层。每个层由多个神经元组成,神经元之间通过权重连接。 2. **前馈神经网络(Feedforward Neural Networks)**:这是最常见的神经网络类型,信息从输入层流向隐藏层,最终到达输出层。 3. **卷积神经网络(Convolutional Neural Networks, CNNs)**:这种网络特别适合处理具有网格结构的数据,如图像。它们使用卷积层来提取图像的特征。 4. **循环神经网络(Recurrent Neural Networks, RNNs)**:这种网络能够处理序列数据,如时间序列或自然语言,因为它们具有记忆功能,能够捕捉数据中的时间依赖性。 5. **长短期记忆网络(Long Short-Term Memory, LSTM)**:LSTM 是一种特殊的 RNN,它能够学习长期依赖关系,非常适合复杂的序列预测任务。 6. **生成对抗网络(Generative Adversarial Networks, GANs)**:由两个网络组成,一个生成器和一个判别器,它们相互竞争,生成器生成数据,判别器评估数据的真实性。 7. **深度学习框架**:如 TensorFlow、Keras、PyTorch 等,这些框架提供了构建、训练和部署深度学习模型的工具和库。 8. **激活函数(Activation Functions)**:如 ReLU、Sigmoid、Tanh 等,它们在神经网络中用于添加非线性,使得网络能够学习复杂的函数。 9. **损失函数(Loss Functions)**:用于评估模型的预测与真实值之间的差异,常见的损失函数包括均方误差(MSE)、交叉熵(Cross-Entropy)等。 10. **优化算法(Optimization Algorithms)**:如梯度下降(Gradient Descent)、随机梯度下降(SGD)、Adam 等,用于更新网络权重,以最小化损失函数。 11. **正则化(Regularization)**:技术如 Dropout、L1/L2 正则化等,用于防止模型过拟合。 12. **迁移学习(Transfer Learning)**:利用在一个任务上训练好的模型来提高另一个相关任务的性能。 深度学习在许多领域都取得了显著的成就,但它也面临着一些挑战,如对大量数据的依赖、模型的解释性差、计算资源消耗大等。研究人员正在不断探索新的方法来解决这些问题。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值