先看问题?
- 推流推流,流从哪里来? ---->摄像头或者视频decode
- 假设来自摄像头,且把流封装成帧了,我怎么把帧组装成RTMP的格式—> RTMP的封装(RTMPPacket)
- 拿到了RTMPPacket怎么发送?----> 交给rtmp.c发送
摄像头信息封装成视频帧
//TODO 待补充
RTMPPacket的生成
SPS、PPS的封装
public void onSPSPPSInfo(byte[] sps, byte[] pps) {
if (rtmpPush != null) {
rtmpPush.pushSPSPPS(sps, pps);
}
}
/**
* 将SPS PPS传递给native
* @param sps
* @param pps
*/
public void pushSPSPPS(byte[] sps, byte[] pps) {
if (sps != null && pps != null) {
pushSPSPPS(sps, sps.length, pps, pps.length);
}
}
private native void pushSPSPPS(byte[] sps, int sps_len, byte[] pps, int pps_len);
extern "C"
JNIEXPORT void JNICALL
Java_com_yxt_livepusher_network_rtmp_RtmpPush_pushSPSPPS(JNIEnv *env, jobject instance,
jbyteArray sps_, jint sps_len,
jbyteArray pps_, jint pps_len) {
jbyte *sps = env->GetByteArrayElements(sps_, NULL);
jbyte *pps = env->GetByteArrayElements(pps_, NULL);
// TODO
if (rtmpPush != NULL && !exit) {
rtmpPush->pushSPSPPS(reinterpret_cast<char *>(sps), sps_len, reinterpret_cast<char *>(pps),
pps_len);
}
env->ReleaseByteArrayElements(sps_, sps, 0);
env->ReleaseByteArrayElements(pps_, pps, 0);
}
void RtmpPush::pushSPSPPS(char *sps, int sps_len, char *pps, int pps_len) {
int bodysize = sps_len + pps_len + 16;// TODO 为什么要加16?
RTMPPacket *packet = static_cast<RTMPPacket *>(malloc(sizeof(RTMPPacket)));
RTMPPacket_Alloc(packet, bodysize);
RTMPPacket_Reset(packet);
char *body = packet->m_body;
int i = 0