ortp版本:ortp-0.18.0.tar.gz
sender
1.windows下编译ortp.lib
直接打开ortp-0.18.0\build\win32native的工程文件即可,VS2008下无需任何修改,即可编译出动态链接库 ortp.lib以及ortp.dll。
2.使用ortp提供的测试程序:ortp-0.18.0\src\tests下的win_receive以及win_sender目录下程序
在windows7下使用win_receiver时会提示下如下错误,这个在错误在windows XP下是不存在的
QOSAddSocketToFlow failed to add a flow with error 87
具体问题暂时还没有深究,这个应该是一个系统兼容性问题,需要系统支持qwave.lib。
在查找了错误出处,对比了前几个ortp的版本后,对ortp0.18的源代码进行了修改
rtp_session_inet.c /* set socket options (but don't change chosen states) */ /* rtp_session_set_dscp( session, -1 ); rtp_session_set_multicast_ttl( session, -1 ); rtp_session_set_multicast_loopback( session, -1 ); */ |
3. 发送H.264视频帧
ortp提供的win_receive以及win_sender一次只发送160字节的数据。但我们在发送一帧H.264视频帧,每次需要发送2000-3000字节的数据,关于ortp发送H.264视频帧,可以参考:
除了上面两篇文章提到需要注意的负载类型和时间戳之外,在ortp中win_receive中还需要显示调用:
rtp_session_set_recv_buf_size(rtp_session_mgr .rtp_session , recv_bufsize);
这里的recv_bufsize必须要比win_sender中
sended_bytes = rtp_session_send_with_ts (rtp_session_mgr .rtp_session ,
( uint8_t *) send_buffer,
wrapLen,
rtp_session_mgr.cur_timestamp );
的wrapLen要大,否则在receive端会出现如下错误:
ortp-warning-Error receiving RTP packet: Error code : 10040, err num [10040],error [-1]
4. 一个结合H.264编码,rtp发送,接收,保存mp4文件的小程序
下载地址:ortp测试程序(修改版)
编译环境:vs2008
压缩文件中包括了ffmpeg以及ortp的动态链接库,但你还需要在工程文件中修改它们的路径才能正确链接这两个库.
因为这个小程序是一个项目的一部分,源代码中还有些冗余代码,并没有来的及删掉,大家在看的时候需要注意下
receiver
#include <string.h>
#include "ortp/ortp.h"
extern "C"{
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
};
bool m_bExit = FALSE;
struct RtpSessionMgr
{
RtpSession *rtp_session;
int timestamp;
};
RtpSessionMgr rtp_session_mgr;
const int timestamp_inc = 3600; // 90000/25
const char recv_ip[] = "127.0.0.1";
const int recv_port = 8008;
const int recv_bufsize = 10240;
unsigned char *recv_buf;
/** 帧包头的标识长度 */
#define CMD_HEADER_LEN 10
/** 帧包头的定义 */
static uint8_t CMD_HEADER_STR[CMD_HEADER_LEN] = { 0xAA,0xA1,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xFF };
/** 帧的包头信息 */
typedef struct _sFrameHeader
{
/** 命令名称标识 */
unsigned char cmdHeader[CMD_HEADER_LEN];
/** 采集的通道号 0~7*/
unsigned char chId;
/** 数据类型,音频 或者 视频*/
unsigned char dataType;
/** 缓冲区的数据长度 */
uint32_t len;
/** 时间戳 */
uint32_t timestamp;
}FrameHeader;
///ffmpeg///
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *video_st;
AVCodecContext *codecContext;
const int image_width = 704;
const int image_height = 576;
const int frame_rate = 25;
static int frame_count;
BOOL ctrlHandlerFunction(DWORD fdwCtrlType)
{
switch (fdwCtrlType)
{
// Handle the CTRL+C signal.
// CTRL+CLOSE: confirm that the user wants to exit.
case CTRL_C_EVENT:
case CTRL_CLOSE_EVENT:
case CTRL_BREAK_EVENT:
case CTRL_LOGOFF_EVENT:
case CTRL_SHUTDOWN_EVENT:
m_bExit = TRUE;
return TRUE;
default:
return FALSE;
}
}
void rtpInit()
{
int ret;
WSADATA wsaData;
/** 初始化winsocket */
if ( WSAStartup(MAKEWORD(2,2), &wsaData) != 0)
{
fprintf(stderr, "WAStartup failed!\n");
return ;
}
ortp_init();
ortp_scheduler_init();
rtp_session_mgr.rtp_session = rtp_session_new(RTP_SESSION_RECVONLY);
rtp_session_set_scheduling_mode(rtp_session_mgr.rtp_session, 1);
rtp_session_set_blocking_mode(rtp_session_mgr.rtp_session, 1);
rtp_session_set_local_addr(rtp_session_mgr.rtp_session, recv_ip, recv_port);
rtp_session_enable_adaptive_jitter_compensation(rtp_session_mgr.rtp_session, TRUE);
rtp_session_set_jitter_compensation(rtp_session_mgr.rtp_session, 40);
rtp_session_set_payload_type(rtp_session_mgr.rtp_session, 34);
rtp_session_set_recv_buf_size(rtp_session_mgr.rtp_session, recv_bufsize);
rtp_session_mgr.timestamp = timestamp_inc;
}
int rtp2disk()
{
int err;
int havemore = 1;
while (havemore)
{
err = rtp_session_recv_with_ts(rtp_session_mgr.rtp_session,
(uint8_t *)recv_buf, recv_bufsize,
rtp_session_mgr.timestamp, &havemore);
if (havemore)
printf("==> Warning: havemore=1!\n");
if (err > 0)
{
FrameHeader *frameHeader;
printf("receive data is %d\n", err);
frameHeader = (FrameHeader *)recv_buf;
printf("frame_len = %d\n", frameHeader->len);
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index= video_st->index;
pkt.data= recv_buf + sizeof(FrameHeader);
pkt.size = frameHeader->len; // not the video_outbuf_size, note!
// write the compressed frame in the media file
err = av_write_frame(oc, &pkt);
if (err != 0)
{
printf("av_write_frame failed\n");
}
}
}
return 0;
}
AVCodecContext* createCodecContext(AVFormatContext *oc)
{
AVCodecContext *video_cc = avcodec_alloc_context();
video_cc = avcodec_alloc_context();
if (!video_cc)
{
fprintf(stderr, "alloc avcodec context failed\n");
exit(1);
}
video_cc->codec_id = (CodecID)CODEC_ID_H264;
video_cc->codec_type = AVMEDIA_TYPE_VIDEO;
video_cc->me_range = 16;
video_cc->max_qdiff = 4;
video_cc->qmin = 10;
video_cc->qmax = 51;
video_cc->qcompress = 0.6f;
/* put sample parameters */
video_cc->bit_rate = 400000;
/* resolution must be a multiple of two */
video_cc->width = image_width;
video_cc->height = image_height;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
video_cc->time_base.den = frame_rate;
video_cc->time_base.num = 1;
video_cc->gop_size = 12; /* emit one intra frame every twelve frames at most */
video_cc->pix_fmt = PIX_FMT_YUV420P;
// some formats want stream headers to be separate
if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
video_cc->flags |= CODEC_FLAG_GLOBAL_HEADER;
return video_cc;
}
void openVideo(AVFormatContext *oc)
{
AVCodec *codec;
/* find the video encoder */
codec = avcodec_find_encoder(codecContext->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
/* open the codec */
if (avcodec_open(codecContext, codec) < 0) {
fprintf(stderr, "could not open video codec\n");
exit(1);
}
}
void ffmpegEncodeInit()
{
// initialize libavcodec, and register all codecs and formats
av_register_all();
char filename[] = "test.mp4";
fmt = av_guess_format(NULL, filename, NULL);
oc = avformat_alloc_context();
oc->oformat = fmt;
fmt->video_codec = (CodecID) CODEC_ID_H264;
_snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
// add the video streams using the default format codecs and initialize the codecs
video_st = NULL;
if (fmt->video_codec != CODEC_ID_NONE) {
video_st = av_new_stream(oc, 0);
if (!video_st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
}
// alloc codecContext
codecContext = createCodecContext(oc);
video_st->codec = codecContext;
// set the output parameters (must be done even if no parameters).
if (av_set_parameters(oc, NULL) < 0) {
fprintf(stderr, "Invalid output format parameters\n");
exit(1);
}
dump_format(oc, 0, filename, 1);
/* now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */
if (codecContext)
openVideo(oc);
// open the output file, if needed
if (!(fmt->flags & AVFMT_NOFILE)) {
if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open '%s'\n", filename);
exit(1);
}
}
// write the stream header, if any
av_write_header(oc);
}
void ffmpegEncodeClose()
{
int i;
/* close each codec */
if (video_st)
avcodec_close(video_st->codec);
// write the trailer, if any
av_write_trailer(oc);
/* free the streams */
for(i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}
if (!(fmt->flags & AVFMT_NOFILE)) {
/* close the output file */
url_fclose(oc->pb);
}
/* free the stream */
av_free(oc);
}
int main()
{
recv_buf = (uint8_t *)malloc(recv_bufsize);
rtpInit();
ffmpegEncodeInit();
// =============== INSTALL THE CONTROL HANDLER ===============
if (SetConsoleCtrlHandler( (PHANDLER_ROUTINE) ctrlHandlerFunction, TRUE) == 0)
{
printf("==> Cannot handle the CTRL-C...\n");
}
printf("==> RTP Receiver started\n");
while (m_bExit == FALSE)
{
rtp2disk();
rtp_session_mgr.timestamp += timestamp_inc;
}
printf("==> Exiting\n");
free(recv_buf);
ffmpegEncodeClose();
rtp_session_destroy(rtp_session_mgr.rtp_session);
ortp_exit();
}
#include <ortp/ortp.h>
#include <string.h>
extern "C"{
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
};
struct RtpSessionMgr
{
RtpSession *rtp_session;
uint32_t timestamp_inc;
uint32_t cur_timestamp;
};
RtpSessionMgr rtp_session_mgr;
const char g_ip[] = "127.0.0.1";
const int g_port = 8008;
const uint32_t timestamp_inc = 3600; // 90000 / 25
const int image_width = 704;
const int image_height = 576;
const int frame_rate = 25;
static int frame_count, wrap_size;
AVCodecContext *video_cc;
AVFrame *picture;
/** 帧包头的标识长度 */
#define CMD_HEADER_LEN 10
/** 帧包头的定义 */
static uint8_t CMD_HEADER_STR[CMD_HEADER_LEN] = { 0xAA,0xA1,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xFF };
/** 帧的包头信息 */
typedef struct _sFrameHeader
{
/** 命令名称标识 */
unsigned char cmdHeader[CMD_HEADER_LEN];
/** 采集的通道号 0~7*/
unsigned char chId;
/** 数据类型,音频 或者 视频*/
unsigned char dataType;
/** 缓冲区的数据长度 */
uint32_t len;
/** 时间戳 */
uint32_t timestamp;
}FrameHeader;
// set frame header
FrameHeader frameHeader;
void rtpInit()
{
char *m_SSRC;
ortp_init();
ortp_scheduler_init();
printf("Scheduler initialized\n");
rtp_session_mgr.rtp_session = rtp_session_new(RTP_SESSION_SENDONLY);
rtp_session_set_scheduling_mode(rtp_session_mgr.rtp_session, 1);
rtp_session_set_blocking_mode(rtp_session_mgr.rtp_session, 1);
rtp_session_set_remote_addr(rtp_session_mgr.rtp_session, g_ip, g_port);
rtp_session_set_send_payload_type(rtp_session_mgr.rtp_session, 34); // 34 is for H.263 video frame
m_SSRC = getenv("SSRC");
if (m_SSRC != NULL)
{
rtp_session_set_ssrc(rtp_session_mgr.rtp_session, atoi(m_SSRC));
}
rtp_session_mgr.cur_timestamp = 0;
rtp_session_mgr.timestamp_inc = timestamp_inc;
printf("rtp init success!\n");
}
int rtpSend(unsigned char *send_buffer, int frame_len)
{
FrameHeader *fHeader = (FrameHeader *)send_buffer;
fHeader->chId = 0;
fHeader->dataType = 0; // SESSION_TYPE_VIDEO
fHeader->len = frame_len;
fHeader->timestamp = 0;
printf("frame header len = %d\n", fHeader->len);
int wrapLen;
wrapLen = frame_len + sizeof(FrameHeader);
int sended_bytes;
sended_bytes = rtp_session_send_with_ts(rtp_session_mgr.rtp_session,
(uint8_t *)send_buffer,
wrapLen,
rtp_session_mgr.cur_timestamp);
rtp_session_mgr.cur_timestamp += rtp_session_mgr.timestamp_inc;
return sended_bytes;
}
void createCodecContext()
{
video_cc = avcodec_alloc_context();
if (!video_cc)
{
fprintf(stderr, "alloc avcodec context failed\n");
exit(1);
}
video_cc->codec_id = (CodecID)CODEC_ID_H264;
video_cc->codec_type = AVMEDIA_TYPE_VIDEO;
video_cc->me_range = 16;
video_cc->max_qdiff = 4;
video_cc->qmin = 10;
video_cc->qmax = 51;
video_cc->qcompress = 0.6f;
/* put sample parameters */
video_cc->bit_rate = 400000;
/* resolution must be a multiple of two */
video_cc->width = image_width;
video_cc->height = image_height;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
video_cc->time_base.den = frame_rate;
video_cc->time_base.num = 1;
video_cc->gop_size = 12; /* emit one intra frame every twelve frames at most */
video_cc->pix_fmt = PIX_FMT_YUV420P;
}
AVFrame *allocPicture(int pix_fmt, int width, int height)
{
AVFrame *picture;
uint8_t *picture_buf;
int size;
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size((PixelFormat)pix_fmt, width, height);
picture_buf = (uint8_t *)av_malloc(size);
if (!picture_buf) {
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf, (PixelFormat)pix_fmt, width, height);
return picture;
}
void openVideo()
{
AVCodec *video_codec;
/* find the video encoder */
video_codec = avcodec_find_encoder(video_cc->codec_id);
if (!video_codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
/* open the codec */
if (avcodec_open(video_cc, video_codec) < 0) {
fprintf(stderr, "could not open video codec\n");
exit(1);
}
/* allocate the encoded raw picture */
picture = allocPicture(video_cc->pix_fmt, video_cc->width, video_cc->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
}
}
/* prepare a dummy image */
void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for(y=0;y<height/2;y++) {
for(x=0;x<width/2;x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
void ffmpegInit()
{
// initialize libavcodec, and register all codecs and formats
av_register_all();
// create a codec context
createCodecContext();
// open H.264 codec
openVideo();
}
void getEncodedFrame(unsigned char *buffer, int& len)
{
int out_size;
fill_yuv_image(picture, frame_count, video_cc->width, video_cc->height);
// encode the frame
out_size = avcodec_encode_video(video_cc, buffer, wrap_size-sizeof(FrameHeader), picture);
len = out_size;
frame_count++;
}
int main()
{
unsigned char *send_outbuf;
unsigned char *video_part;
frame_count = 0;
wrap_size = 20000;
send_outbuf = (unsigned char *)malloc(wrap_size);
// copy cmdHeader to frameInfo
memcpy(frameHeader.cmdHeader,CMD_HEADER_STR,CMD_HEADER_LEN);
memcpy(send_outbuf, &frameHeader, sizeof(FrameHeader));
video_part = send_outbuf + sizeof(FrameHeader);
ffmpegInit();
rtpInit();
while (1)
{
int frame_len;
// get encode frame
getEncodedFrame(video_part, frame_len);
printf("encodecFrame length is : %d\n", frame_len);
if (frame_len > 0)
{
rtpSend(send_outbuf, frame_len);
}
}
rtp_session_destroy(rtp_session_mgr.rtp_session);
free(send_outbuf);
// Give us some time
Sleep(250);
ortp_exit();
}