tstools封装H264+aac成TS文件

本例修改Tstools的esmerge.c示例 部分更改库内容,完成读取H264文件和AAC文件封装成TS文件。 

使用函数指针做参数传递,达到把读取源文件和写目标文件方法提到更好操作的地方。

 

1. 下载tstools源码

    官方网站: http://tstool.sourceforge.net/

    CSDN资源下载:http://download.csdn.net/download/u011298831/9596746

 

2. 编译

    源码编译时我出现了错误

    - LIBOPTS = -L$(LIBDIR) -ltstools $(ARCH_FLAGS)

   + LIBOPTS = -L$(LIBDIR) -ltstools $(ARCH_FLAGS) -lm

   修改Makefile之后即可正常编译

 

3. 参照esmerge.c添加自己的esmerge_plus.c

    Makefile中参考esmerge的方法添加esmerge_plus.c的方法

      $(OBJDIR)/esmerge.o \
    +$(OBJDIR)/esmerge_plus.o \

 

      $(BINDIR)/esmerge \
   +$(BINDIR)/esmerge_plus \

 

    $(BINDIR)/esmerge: $(OBJDIR)/esmerge.o $(LIB)
$(CC) $< -o $(BINDIR)/esmerge $(LDFLAGS) $(LIBOPTS)
     +$(BINDIR)/esmerge_plus: $(OBJDIR)/esmerge_plus.o $(LIB)
                $(CC) $< -o $(BINDIR)/esmerge_plus $(LDFLAGS) $(LIBOPTS)

 

    $(OBJDIR)/esmerge.o:     esmerge.c misc_fns.h $(ACCESSUNIT_H) $(AUDIO_H) $(TSWRITE_H) version.h
    $(CC) -c $< -o $@ $(CFLAGS)
  +$(OBJDIR)/esmerge_plus.o:     esmerge_plus.c misc_fns.h $(ACCESSUNIT_H) $(AUDIO_H) $(TSWRITE_H) version.h
    $(CC) -c $< -o $@ $(CFLAGS)

 

这样执行make即可获得bin\下的esmerge_plus生成文件

 

4. 正式修改库和esmerge_plus.c文件

    源esmerge.c文件是采用传递文件名字和文件句柄的方式,些ts目标文件也在较为深的地方,现在的目的是吧读源文件的和写目标文件提到main函数层次,方便实现read和write

    先提供main函数。

 

int main(int argc, char **argv)
{
	int    had_video_name = FALSE;
	int    had_audio_name = FALSE;
	int    had_output_name = FALSE;
	char  *video_name = NULL;
	char  *audio_name = NULL;
	char  *output_name = NULL;
	int    err = 0;
	ES_p   video_es = NULL;
	access_unit_context_p h264_video_context = NULL;
	avs_context_p avs_video_context = NULL;
	int    audio_file = -1;
	TS_writer_p output = NULL;
	int    quiet = FALSE;
	int    verbose = FALSE;
	int    debugging = FALSE;
	int    audio_samples_per_frame = ADTS_SAMPLES_PER_FRAME;
	int    audio_sample_rate = DAT_RATE;
	int    video_frame_rate = DEFAULT_VIDEO_FRAME_RATE;
	int    audio_type = AUDIO_ADTS;
	int    video_type = VIDEO_H264;
	int    pat_pmt_freq = 0;
	int    ii = 1;

#if TEST_PTS_DTS
	test_pts();
	return 0;
#endif

	if (argc < 2)
	{
		print_usage();
		return 0;
	}

	video_type = VIDEO_H264;
	read_file = video_name = argv[1];
	audio_read_file = audio_name = argv[2];
	write_file = output_name = argv[3];
	fprintf(stderr, "### esmerge: video_name:%s \n", video_name);
	fprintf(stderr, "### esmerge: audio_name:%s \n", audio_name);
	fprintf(stderr, "### esmerge: output_name:%s\n", output_name);

	//err = open_elementary_stream(video_name,&video_es);
	err = open_elementary_stream_ex(video_read_func, &video_es);
	if (err)
	{
		fprintf(stderr, "### esmerge: "
			"Problem starting to read video as ES - abandoning reading\n");
		return 1;
	}

	if (video_type == VIDEO_H264)
	{
		err = build_access_unit_context(video_es, &h264_video_context);
		if (err)
		{
			fprintf(stderr, "### esmerge: "
				"Problem starting to read video as H.264 - abandoning reading\n");
			close_elementary_stream(&video_es);
			return 1;
		}
	}
	else
	{
		fprintf(stderr, "### esmerge: Unknown video type\n");
		return 1;
	}

	//======================================================================
	//  audio_file = open_binary_file(audio_name,FALSE);
	//  if (audio_file == -1)
	//  {
	//    fprintf(stderr,"### esmerge: "
	//            "Problem opening audio file - abandoning reading\n");
	//    close_elementary_stream(&video_es);
	//    free_access_unit_context(&h264_video_context);
	//    free_avs_context(&avs_video_context);
	//    return 1;
	//  }
	//======================================================================

	//err = tswrite_open(TS_W_FILE,output_name,NULL,0,quiet,&output);
	err = tswrite_open_ex(TS_W_CALL, video_write_func, NULL, 0, quiet, &output);
	if (err)
	{
		fprintf(stderr, "### esmerge: "
			"Problem opening output file %s - abandoning reading\n",
			output_name);
		close_elementary_stream(&video_es);
		close_file(audio_file);
		free_access_unit_context(&h264_video_context);
		free_avs_context(&avs_video_context);
		return 1;
	}

	switch (audio_type)
	{
	case AUDIO_ADTS:
		audio_samples_per_frame = ADTS_SAMPLES_PER_FRAME;
		break;
	default:              // hmm - or we could give up...
		audio_samples_per_frame = ADTS_SAMPLES_PER_FRAME;
		break;
	}

	if (!quiet)
	{
		printf("Reading video from %s\n", video_name);
		printf("Writing output to  %s\n", output_name);
		printf("Video frame rate: %dHz\n", video_frame_rate);
	}

	if (video_type == VIDEO_H264)
		
		err = merge_with_h264(h264_video_context, audio_read_func, output,
		//err = merge_with_h264(h264_video_context, read_next_adts_frame, output,
		AUDIO_ADTS_MPEG2,
		audio_samples_per_frame, audio_sample_rate,
		video_frame_rate,
		pat_pmt_freq,
		quiet, verbose, debugging);
	else
	{
		printf("### esmerge: Unknown video type\n");
		return 1;
	}
	if (err)
	{
		printf("### esmerge: Error merging video and audio streams\n");
		close_elementary_stream(&video_es);
		free_access_unit_context(&h264_video_context);
		(void)tswrite_close(output, quiet);
		return 1;
	}

	close_elementary_stream(&video_es);
	free_access_unit_context(&h264_video_context);
	err = tswrite_close(output, quiet);
	if (err)
	{
		printf("### esmerge: Error closing output %s\n", output_name);
		return 1;
	}
	return 0;
}

本例只支持baseline/main profile的H264和-mp2adts AAC文件,读者自己根据源码做相应修改。

 

两个新添加的函数:

err = open_elementary_stream_ex(video_read_func, &video_es);

err = tswrite_open_ex(TS_W_CALL, video_write_func, NULL, 0, quiet, &output);

 

1)open_elementary_stream_ex  (es.c跟踪open_elementary_stream函数添加以下代码)

 

extern int open_elementary_stream_ex(int  (*callreadfun)(char*, int),
                                  ES_p  *es)
{
  int err;
  int input;

  input = -1;
  
  err = build_elementary_stream_file_ex(callreadfun,es);
  if (err)
  {
    fprintf(stderr,"### build_elementary_stream_file_ex ");
    return 1;
  }
  return 0;
}

 

 

同上,在相应位置添加build_elementary_stream_file_ex(es.c)方法

 extern int build_elementary_stream_file_ex(int  (*callreadfun)(char*, int),
                                        ES_p  *es)
{
  ES_p new = malloc(SIZEOF_ES);
  if (new == NULL)
  {
    fprintf(stderr,"### Unable to allocate elementary stream datastructure\n");
    return 1;
  }

  new->self_input = 1;
  new->callreadfun = callreadfun;
  new->reading_ES = TRUE;
  new->input = -1;
  new->reader = NULL;
  //fprintf(stderr,"===>> setup_readahead new->callreadfun:%x\n",new->callreadfun);
  setup_readahead(new);

  *es = new;
  return 0;
}

这里出现两个ES_p结构原本没有的成员:self_input和callreadfun

struct elementary_stream
{
  int       reading_ES;  // TRUE if we're reading ES data direct, FALSE if PES

  +//self define read
  +int self_input;
  +int (*callreadfun)(char* data, int* len);

  ……
};

如上,在结构体elementary_stream(es_defns.h)中添加了self_input和 callreadfun,用作描述是否是个人定义的方式和传递读数据的函数方法

 

2)tswrite_open_ex(TS_W_CALL, video_write_func, NULL, 0, quiet, &output); (tswrite.c 跟踪tswrite_open函数添加以下代码)

 

extern int tswrite_open_ex(TS_WRITER_TYPE  how,
                        void (*callfuct)(char*,int),
                        char           *multicast_if,
                        int             port,
                        int             quiet,
                        TS_writer_p    *tswriter)
{
	TS_writer_p  new;
	int err = tswrite_build(how,quiet,tswriter);
	if (err) return 1;

	new = *tswriter;
	switch (how)
	{
	case TS_W_CALL:	
		if (!quiet) printf("TS_W_CALL\n");
		new->where.callfun = callfuct;
	break;
	default:
		printf("### Unexpected writer type %d to tswrite_open()\n",how);
		free(new);
		return 1;
	}
	return 0;
}

同1)出现TS_writer_p结构出现where.callfun新的结构
union TS_writer_output
{
  FILE   *file;
  SOCKET  socket;
  //for ts use
  +void (*callfun)(char*,int);

};

 

如上,在联合体TS_writer_output(tswrite_defns.h)中添加了 callfun,写数据的函数方法

 

读者应该对tstools有一定的熟悉,了解其工作流程。所以对tswrite_write方法 tswrite_close_file方法 static inline int get_more_data(ES_p  es)方法需要修改

下面给我我修改后的static inline int get_more_data(ES_p  es)方法(es.c文件中)

 

static inline int get_more_data(ES_p  es)
{
  if (es->reading_ES)
  {
    if(es->self_input == 1)
	{
		int len = 0;
		int ret = es->callreadfun(es->read_ahead,&len);
		//printf("### Error reading next bytes: %d\n",len);
		if (ret == 0)
			return EOF;
		else if (ret == -1)
		{
			printf("### Error reading next bytes: %s\n",strerror(errno));
			return 1;
		}
		
		es->read_ahead_posn += es->read_ahead_len;  // length of the *last* buffer
		es->read_ahead_len = len;
		es->data = es->read_ahead;     // should be done in the setup function
		es->data_end = es->data + len; // one beyond the last byte
		es->data_ptr = es->data;
	}
	else
	{
	// Call `read` directly - we don't particularly mind if we get a "short"
	// read, since we'll just catch up later on
#ifdef _WIN32
		int len = _read(es->input,&es->read_ahead,ES_READ_AHEAD_SIZE);
#else
		ssize_t  len = read(es->input,&es->read_ahead,ES_READ_AHEAD_SIZE);
#endif
		printf("### Error reading next bytes: %d\n",len);
		if (len == 0)
			return EOF;
		else if (len == -1)
		{
			printf("### Error reading next bytes: %s\n",strerror(errno));
			return 1;
		}

		es->read_ahead_posn += es->read_ahead_len;  // length of the *last* buffer
		es->read_ahead_len = len;
		es->data = es->read_ahead;     // should be done in the setup function
		es->data_end = es->data + len; // one beyond the last byte
		es->data_ptr = es->data;
	}

    return 0;
  }
  else
  {
    return get_next_pes_packet(es);
  }
}

tswrite_write方法中,为了区分出是我们新定义的方式,我们使用TS_W_CALL这个值(tswrite.c文件中)
switch (tswriter->how)

 

{

……

+case TS_W_CALL:
+ tswriter->where.callfun(packet,TS_PACKET_SIZE);
        +break;

……

}

tswrite_close_file方法中,为了区分出是我们新定义的方式,我们使用TS_W_CALL这个值(tswrite.c文件中)

switch (tswriter->how)

{

……

  +case TS_W_CALL:
  + break;

……

}

TS_W_CALL所加在的地方,查找TS_W_FILE即可

enum TS_writer_type
{
  TS_W_UNDEFINED,
  TS_W_STDOUT,  // standard output
  TS_W_FILE,    // a file
  TS_W_TCP,     // a socket, over TCP/IP
  TS_W_UDP,     // a socket, over UDP
  +TS_W_CALL,    // self define
};

如上,在枚举TS_writer_type(tswrite_defns.h)中添加了 TS_W_CALL


就这样,对原库代码的修改完成,回到esmerge_plus.c文件中。

 

3)merge_with_h264方法

 

static int merge_with_h264(access_unit_context_p  video_context,
	int(*callbackfun)(char*, int*),
	TS_writer_p            output,
	int                    audio_type,
	int                    audio_samples_per_frame,
	int                    audio_sample_rate,
	int                    video_frame_rate,
	int                    pat_pmt_freq,
	int                    quiet,
	int                    verbose,
	int                    debugging)
{
	int  ii;
	int  err;
	uint32_t prog_pids[2];
	byte     prog_type[2];

	int video_frame_count = 0;
	int audio_frame_count = 0;

	uint32_t video_pts_increment = 90000 / video_frame_rate;
	uint32_t audio_pts_increment = (90000 * audio_samples_per_frame) / audio_sample_rate;
	uint64_t video_pts = 0;
	uint64_t audio_pts = 0;

	// The "actual" times are just for information, so we aren't too worried
	// about accuracy - thus floating point should be OK.
	double audio_time = 0.0;
	double video_time = 0.0;

	int got_video = TRUE;
	int got_audio = TRUE;

	if (verbose)
		printf("Video PTS increment %u\n"
		"Audio PTS increment %u\n", video_pts_increment, audio_pts_increment);

	// Start off our output with some null packets - this is in case the
	// reader needs some time to work out its byte alignment before it starts
	// looking for 0x47 bytes
	for (ii = 0; ii < 8; ii++)
	{
		err = write_TS_null_packet(output);
		if (err) return 1;
	}

	// Then write some program data
	// @@@ later on we might want to repeat this every so often
	prog_pids[0] = DEFAULT_VIDEO_PID;
	prog_pids[1] = DEFAULT_AUDIO_PID;
	prog_type[0] = AVC_VIDEO_STREAM_TYPE;

	switch (audio_type)
	{
	case AUDIO_ADTS:
	case AUDIO_ADTS_MPEG2:
	case AUDIO_ADTS_MPEG4:
		prog_type[1] = ADTS_AUDIO_STREAM_TYPE;
		break;
	case AUDIO_L2:
		prog_type[1] = MPEG2_AUDIO_STREAM_TYPE;
		break;
	case AUDIO_AC3:
		prog_type[1] = ATSC_DOLBY_AUDIO_STREAM_TYPE;
		break;
	default:              // what else can we do?
		prog_type[1] = ADTS_AUDIO_STREAM_TYPE;
		break;
	}
	err = write_TS_program_data2(output,
		1, // transport stream id
		1, // program number
		DEFAULT_PMT_PID,
		DEFAULT_VIDEO_PID,  // PCR pid
		2, prog_pids, prog_type);
	if (err)
	{
		fprintf(stderr, "### Error writing out TS program data\n");
		return 1;
	}

	while (got_video
		|| got_audio
		)
	{
		access_unit_p  access_unit;
		audio_frame_p  aframe;

		// Start with a video frame
		if (got_video)
		{
			err = get_next_h264_frame(video_context, quiet, debugging, &access_unit);
			if (err == EOF)
			{
				if (verbose)
					fprintf(stderr, "EOF: no more video data\n");
				got_video = FALSE;
			}
			else if (err)
			{
				fprintf(stderr, "EOF: no more video data return 1;\n");
				return 1;
			}

		}

		if (got_video)
		{
			video_time = video_frame_count / (double)video_frame_rate;
			video_pts += video_pts_increment;
			video_frame_count++;
			if (verbose)
				printf("\n%s video frame %5d (@ %.2fs, " LLU_FORMAT ")\n",
				(is_I_or_IDR_frame(access_unit) ? "**" : "++"),
				video_frame_count, video_time, video_pts);

			if (pat_pmt_freq && !(video_frame_count % pat_pmt_freq))
			{
				if (verbose)
				{
					printf("\nwriting PAT and PMT (frame = %d, freq = %d).. ",
						video_frame_count, pat_pmt_freq);
				}
				err = write_TS_program_data2(output,
					1, // tsid
					1, // Program number
					DEFAULT_PMT_PID,
					DEFAULT_VIDEO_PID, // PCR pid
					2, prog_pids, prog_type);
			}


			// PCR counts frames as seen in the stream, so is easy
			// The presentation and decoding time for B frames (if we ever get any)
			// could reasonably be the same as the PCR.
			// The presentation and decoding time for I and IDR frames is unlikely to
			// be the same as the PCR (since frames come out later...), but it may
			// work to pretend the PTS is the PCR plus a delay time (for decoding)...

			// We could output the timing information every video frame,
			// but might as well only do it on index frames.
			if (is_I_or_IDR_frame(access_unit))
				err = write_access_unit_as_TS_with_pts_dts(access_unit, video_context,
				output, DEFAULT_VIDEO_PID,
				TRUE, video_pts + 45000,
				TRUE, video_pts);
			else
				err = write_access_unit_as_TS_with_PCR(access_unit, video_context,
				output, DEFAULT_VIDEO_PID,
				video_pts, 0);
			if (err)
			{
				free_access_unit(&access_unit);
				fprintf(stderr, "### Error writing access unit (frame)\n");
				return 1;
			}
			free_access_unit(&access_unit);

			// Did the logical video stream end after the last access unit?
			if (video_context->end_of_stream)
			{
				if (verbose)
					printf("Found End-of-stream NAL unit\n");
				got_video = FALSE;
			}
		}
		//continue;

		if (!got_audio || callbackfun == NULL)
			continue;

		// Then output enough audio frames to make up to a similar time
		while (audio_pts < video_pts || !got_video)
		{
			//err = read_next_audio_frame(audio_file,audio_type,&aframe);
			char aframe_buf[1024];
			int ret = callbackfun(aframe_buf, 1024);
			fprintf(stderr, "callbackfun audio data ret:%d\n", ret);
			if (ret <= 0)
			{
				got_audio = FALSE;
				break;
			}

			audio_time = audio_frame_count *
				audio_samples_per_frame / (double)audio_sample_rate;
			audio_pts += audio_pts_increment;
			audio_frame_count++;
			if (verbose)
				printf("** audio frame %5d (@ %.2fs, " LLU_FORMAT ")\n",
				audio_frame_count, audio_time, audio_pts);

			err = write_ES_as_TS_PES_packet_with_pts_dts(output, aframe_buf,
				ret,
				DEFAULT_AUDIO_PID,
				DEFAULT_AUDIO_STREAM_ID,
				TRUE, audio_pts,
				TRUE, audio_pts);
			if (err)
			{
				return 1;
			}
		}
	}

	if (!quiet)
	{
		uint32_t video_elapsed = 100 * video_frame_count / video_frame_rate;
		uint32_t audio_elapsed = 100 * audio_frame_count*
			audio_samples_per_frame / audio_sample_rate;
		printf("Read %d video frame%s, %.2fs elapsed (%dm %.2fs)\n",
			video_frame_count, (video_frame_count == 1 ? "" : "s"),
			video_elapsed / 100.0, video_elapsed / 6000, (video_elapsed % 6000) / 100.0);
		printf("Read %d audio frame%s, %.2fs elapsed (%dm %.2fs)\n",
			audio_frame_count, (audio_frame_count == 1 ? "" : "s"),
			audio_elapsed / 100.0, audio_elapsed / 6000, (audio_elapsed % 6000) / 100.0);
	}

	return 0;

}

 

 

然后是几个读写方法

 

#include <fcntl.h>
int video_fd_r = -1;
int video_fd_w = -1;
int audio_fd_r = -1;

char* read_file = NULL;
char* write_file = NULL;
char* audio_read_file = NULL;

int video_write_func(char* data, int len)
{
	if (video_fd_w == -1)
	{
		int flags = 0;
		flags = flags | O_WRONLY | O_CREAT | O_TRUNC;
		video_fd_w = open(write_file, flags, 00777);

		if (video_fd_w == -1)
		{
			fprintf(stderr, "### Error opening file %s %s\n", write_file, strerror(errno));
		}
		else
		{
			fprintf(stderr, "### opening file %s %d\n", write_file, video_fd_w);
		}
	}

	int _len = write(video_fd_w, data, len);
	if (_len == len)
	{
		//fprintf(stderr,"====>>> video_write_func _len:%d len:%d\n",_len,len);
	}
	else
	{
		//fprintf(stderr,"### error write file %s %s\n","test.ts",strerror(errno));	
	}

}

int audio_read_func(char* data, int len)
{
	if (audio_fd_r == -1)
	{
		int flags = 0;
		flags = flags | O_RDONLY;
		audio_fd_r = open(audio_read_file, flags);

		if (audio_fd_r == -1)
		{
			fprintf(stderr, "###audio_read_func Error opening file %s %s\n", audio_read_file, strerror(errno));
		}
		else
		{
			fprintf(stderr, "###audio_read_func opening file %s %d\n", audio_read_file, audio_fd_r);
		}
	}

	unsigned char aac_header[7];
	int true_size = 0;

	true_size = read(audio_fd_r, aac_header, 7);
	if (true_size <= 0)
	{
		return 0;
	}
	else
	{
		int frame_length = ((aac_header[3] & 0x03) << 11) | (aac_header[4] << 3) |
      						((unsigned)(aac_header[5] & 0xE0) >> 5);

		int ii;
		for (ii=0; ii<6; ii++)
    		data[ii] = aac_header[ii];

		true_size = read(audio_fd_r, &(data[7]), frame_length - 7);
		return frame_length;
	}

}

int video_read_func(char* data, int* len)
{
	if (video_fd_r == -1)
	{
		int flags = 0;
		flags = flags | O_RDONLY;
		video_fd_r = open(read_file, flags);

		if (video_fd_r == -1)
		{
			fprintf(stderr, "### Error opening file %s %s\n", read_file, strerror(errno));
		}
		else
		{
			fprintf(stderr, "### opening file %s %d\n", read_file, video_fd_r);
		}
	}
	*len = read(video_fd_r, data, ES_READ_AHEAD_SIZE);
	if (*len > 0)
	{
		//fprintf(stderr,"====>>> callreadfun len:%d\n",*len);
	}
	else if (*len < 0)
	{
		//fprintf(stderr,"### error read file %s %s\n","test.264",strerror(errno));	
	}
	return *len;
}

 

 

搞定!记得修改几个宏定义和初始化的值;

#define DEFAULT_VIDEO_FRAME_RATE  30

int    audio_sample_rate = DAT_RATE; //我用的是48000Hz的

最后声明:本例只支持baseline/main profile的H264和-mp2adts AAC文件,读者自己根据源码做相应修改。

附上我的源码:http://download.csdn.net/download/u011298831/9596805

 

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值