ffmpeg远程监控桌面

/**
 * ref. 雷霄骅 Lei Xiaohua
 */
#include <stdio.h>
#include <iostream>
#include <winsock2.h>
#pragma comment(lib,"ws2_32.lib")
#define __STDC_CONSTANT_MACROS
extern "C"
{
#include "include/libavcodec/avcodec.h"
#include "include/libavformat/avformat.h"
#include "include/libswscale/swscale.h"
#include "include/libavdevice/avdevice.h"
#include "include/libavutil/imgutils.h"
#include "include/libavutil/opt.h"
#include "include/libavutil/imgutils.h"
//#include "includes/SDL.h"
};

int Send(uint8_t * buf ,int size) {

	return 0;
}
struct sockaddr_in G_Servaddr;

int main(int argc, char* argv[])
{
	AVFormatContext	*pFormatContext = NULL;
	int	i = 0, videoindex;
	AVCodecContext	*pCodecContext = NULL;
	AVCodec			*pCodec = NULL;
		
	FILE *fp_out;
	fp_out = fopen("ds.h264", "wb");

	AVPacket pPacket;	
	int y_size;
	FILE *fp_yuv = fopen("output.yuv", "wb+");
	struct SwsContext *img_convert_ctx;
	AVFormatContext	*pFormatContextEncod = NULL;
	AVCodecContext	*pCodecContextEncod = NULL;
	AVCodec			*pCodecEncod = NULL;
	AVCodecID codec_id = AV_CODEC_ID_H264;
	int in_w = 1920, in_h = 1080;
	int framenum = 100;
	int  ret, got_output, xy_size, got_picture;

	/*socket--------------------------------*/
	WSADATA wsa; 
	if (WSAStartup(MAKEWORD(2, 2), &wsa) != 0)
	{
		printf("WSAStartup failed!\n");
		return 1;
	}

	int connfd;
	//socklen_t addrlen(0);
	SOCKET  ServerS = socket(AF_INET, SOCK_DGRAM, 0);
	 int  WSAStartup(WORD wVersionRequested,LPWSADATA lpWSAData);
	SOCKADDR_IN DistAddr;
	DistAddr.sin_family = AF_INET;
	DistAddr.sin_port = htons(8800);
	DistAddr.sin_addr.s_addr = inet_addr("127.0.0.1"); //inet_addr("192.168.23.232");
	if (DistAddr.sin_addr.s_addr == INADDR_NONE)
	{
		printf("不可用地址!\n");
		return -1;
	}
	int time_out = 2000;
	//ret = setsockopt(ServerS, SOL_SOCKET, SO_RCVTIMEO, (char*)&time_out, sizeof(time_out));
	char	buf[150000];     //1500 bytes

	/*socket--------------------------------*/

	av_register_all();
	avformat_network_init();
	pFormatContext = avformat_alloc_context();
	avdevice_register_all();

	AVDictionary* options = NULL;
	av_dict_set(&options, "video_size", "1920*1080", 0);//设定捕捉范围
	av_dict_set(&options, "framerate", "25", 0);
	AVInputFormat *ifmt = av_find_input_format("gdigrab");
	if (avformat_open_input(&pFormatContext, "desktop", ifmt, &options) != 0)
	{		printf("Couldn't open input stream.\n");		return -1;	}

	if (avformat_find_stream_info(pFormatContext, NULL) < 0)//找到流
	{		printf("Couldn't find stream information.\n");		return -1;	}
	videoindex = -1;
	for (i = 0; i < pFormatContext->nb_streams; i++)
	{
		if (pFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)//流的数目计数
		{
			videoindex = i;			break;
		}
	}
	if (videoindex == -1)
	{		printf("Didn't find a video stream.\n");	return -1;	}
	av_dump_format(pFormatContext, 0, 0, 0);

	pCodecContext = pFormatContext->streams[videoindex]->codec;//编码器参数直接取数据流的参数(桌面流
	pCodec = avcodec_find_decoder(pCodecContext->codec_id);//解码器 BMP解码
	if (pCodec == NULL)
	{
		printf("Codec not found.\n");
		return -1;
	}
	if (avcodec_open2(pCodecContext, pCodec, NULL) < 0)//在环境中打开这个编码器
	{
		printf("Could not open codec.\n");
		return -1;
	}
	AVFrame	*pFrame, *pFrameYUV;
	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	uint8_t *out_buffer;
	AVPacket *packet;
	out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecContext->width, pCodecContext->height, 1));
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecContext->width, pCodecContext->height, 1);
	packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	printf("--------------- File Information ----------------\n");
	//手工调试函数,输出tbn、tbc、tbr、PAR、DAR的含义
	av_dump_format(pFormatContext, 0, 0, 0);

	初始化输出流上下文。
	//AVFormatContext * output_format_context_ = NULL;
	//avformat_alloc_output_context2(&output_format_context_, NULL, "rtsp", "rtsp://127.0.0.1:8800/");
	//AVDictionary *format_opts = NULL;
	//av_dict_set(&format_opts, "stimeout", "2000000", 0);
	//av_dict_set(&format_opts, "rtsp_transport", "tcp", 0);
	//avformat_write_header(output_format_context_, &format_opts);


	// encode-----------------------------------
	pCodecEncod = avcodec_find_encoder(codec_id);
	if (!pCodecEncod) {
		printf("Codec not found\n");
		return -1;
	}
	pCodecContextEncod = avcodec_alloc_context3(pCodecEncod);
	if (!pCodecContextEncod) {
		printf("Could not allocate video codec context\n");
		return -1;
	}
	pCodecContextEncod->bit_rate = 1000000;/*比特率越高,传送的数据越大,越清晰*/
	pCodecContextEncod->width = in_w;
	pCodecContextEncod->height = in_h;
	pCodecContextEncod->time_base.num = 1;
	pCodecContextEncod->time_base.den = 25;
	pCodecContextEncod->gop_size = 50;
	pCodecContextEncod->max_b_frames = 1;
	pCodecContextEncod->pix_fmt = AV_PIX_FMT_YUV420P;
	/*  关键帧的周期,也就是两个IDR帧之间的距离,一个帧组的最大帧数,一般而言,每一秒视频至少需要使用 1 个关键帧。
	增加关键帧个数可改善质量,但是同时增加带宽和网络负载。*/
	av_opt_set(pCodecContextEncod->priv_data, "preset", "slow", 0);

	if (avcodec_open2(pCodecContextEncod, pCodecEncod, NULL) < 0) {
		printf("Could not open codec\n");
		return -1;
	}
	xy_size = pCodecContextEncod->width * pCodecContextEncod->height;

	// encode-----------------------------------
	//&packet = (AVPacket *)av_malloc(sizeof(AVPacket));

	img_convert_ctx = sws_getContext(pCodecContext->width, pCodecContext->height, pCodecContext->pix_fmt,
		pCodecContext->width, pCodecContext->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	i = 0;
	//读取数据
	while (av_read_frame(pFormatContext, packet) >= 0 && i < 200)
	{
		if (packet->stream_index == videoindex)
		{
			av_init_packet(&pPacket);
			pPacket.data = NULL;
			pPacket.size = 0;
			pFrame->pts = i;
			ret = avcodec_decode_video2(pCodecContext, pFrame, &got_picture, packet);
			if (ret < 0)
			{
				printf("Decode Error.\n");
				return -1;
			}

			if (got_picture >= 1)
			{
				//成功解码一帧
				sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecContext->height,
					pFrameYUV->data, pFrameYUV->linesize);//转换图像格式

				y_size = pCodecContext->width*pCodecContext->height;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1], 1, y_size / 4,  fp_yuv); //U	Cb 
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V Cr 
				//printf("Succeed to decode-scale-fwrite 1 frame!\n");
				pFrameYUV->width = 1920;
				pFrameYUV->height = 1080;
				pFrameYUV->format = AV_PIX_FMT_YUV420P;
				/*encode-----------------------*/
	
				ret = avcodec_encode_video2(pCodecContextEncod, &pPacket, pFrameYUV, &got_output);
				if (ret < 0) {
					printf("Error encoding frame\n");
					return -1;
				}
				if (got_output) {
					//	printf("Succeed to encode frame: %5d\tsize:%5d\n", framenum, pPacket.size);
					framenum++;
					fwrite(pPacket.data, 1, pPacket.size, fp_out);
				
					/*sendudp----------------------*/
					memset(buf,0, sizeof(buf));
					//int a;
					memcpy(buf, &pPacket.data, pPacket.size);
					int result = sendto(ServerS, buf, sizeof(pPacket.size)+1, 0, (SOCKADDR *)&DistAddr, sizeof(DistAddr));
//					int ret1 = av_interleaved_write_frame(output_format_context_, &pPacket);
					av_free_packet(&pPacket);
				
				}

				/*sendudp----------------------*/
				/*encode-----------------------*/
				pFrameYUV->pts++;
				i++;
			}
			else
			{
				//未解码到一帧,可能时结尾B帧或延迟帧,在后面做flush decoder处理
			}
		}
		av_free_packet(packet);
	}

	av_dump_format(pFormatContext, 0, 0, 0);
	//Flush Decoder
	while (true)
	{
		if (!(pCodec->capabilities & AV_CODEC_CAP_DELAY))
			break;

		ret = avcodec_decode_video2(pCodecContext, pFrame, &got_picture, NULL);
		if (ret < 0)
		{
			break;
		}
		if (!got_picture)
		{
			break;
		}

		sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecContext->height,
			pFrameYUV->data, pFrameYUV->linesize);

		y_size = pCodecContext->width*pCodecContext->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
		printf("Flush Decoder: Succeed to decode 1 frame!\n");
	}
	//Flush Encoder
	for (got_output = 1; got_output; i++) {
		ret = avcodec_encode_video2(pCodecContextEncod, &pPacket, NULL, &got_output);
		if (ret < 0) {
			printf("Error encoding frame\n");
			return -1;
		}
		if (got_output) {
			printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", pPacket.size);
			fwrite(pPacket.data, 1, pPacket.size, fp_out);//av_write_frame(pFormatCtx, &pkt);
			av_free_packet(&pPacket);
		}
	}

	closesocket(ServerS);
	WSACleanup();
	sws_freeContext(img_convert_ctx);
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecContext);
	avformat_close_input(&pFormatContext);
	fclose(fp_yuv);

	fclose(fp_out);
	avcodec_close(pCodecContextEncod);
	av_free(pCodecEncod);
	//av_freep(&pFrame->data[0]);
	//av_frame_free(&pFrame);

	//std:cin>>i>>endl;
	scanf("%d",&i);
	return 0;


}

  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值