在源码中Player中有Player的基类,作者采用了模板委托,设计模式我第一次遇到。
MediaPlayer是干什么的?
我们以RtmpMediaPlay为例,它其实是一个rtmp客户端,
class RtmpPlayer : public PlayerBase, public TcpClient, public RtmpProtocol{}
从它的继承我们可以看到有PlayerBase,TcpClient,RtmpProtocol,直白点就是一个tcp客户端,上层握手协议为rtmp,连接rtmp后可以从play()处获取到demuxer的h264数据。
ctx->player = mk_player_create();
//回复play的时候触发
mk_player_set_on_result(ctx->player, on_mk_play_event_func, ctx);
mk_player_set_on_shutdown(ctx->player, on_mk_play_event_func, ctx);
mk_player_set_on_data(ctx->player, on_mk_play_data_func, ctx);
mk_player_play(ctx->player, url_pull);
下面分析一下mk_player_create():
//首先
API_EXPORT mk_player API_CALL mk_player_create() {
MediaPlayerForC::Ptr *obj = new MediaPlayerForC::Ptr(new MediaPlayerForC());
(*obj)->setup();
return obj;
}
//这里创建了一个MediaPlayerForC对象,里面有一个成员变量MediaPlayer::Ptr _player;
//然后执行setup()
void setup(){
weak_ptr<MediaPlayerForC> weak_self = shared_from_this();
_player->setOnPlayResult([weak_self](const SockException &ex){
auto strong_self = weak_self.lock();
if(strong_self){
strong_self->onEvent(false,ex);
}
});
_player->setOnShutdown([weak_self](const SockException &ex){
auto strong_self = weak_self.lock();
if(strong_self){
strong_self->onEvent(true,ex);
}
});
}
//这里设置两个回调函数
void onEvent(bool is_shutdown, const SockException &ex){
lock_guard<recursive_mutex> lck(_mtx);
if(is_shutdown){
//播放中断
if(_on_shutdown){
_on_shutdown(_on_shutdown_data,ex.getErrCode(),ex.what());
}
return;
}
//播放结果
if(_on_play){
//mk_player_set_on_event设置的回调,创建的时候调一次
_on_play(_on_play_data,ex.getErrCode(),ex.what());
}
if(ex){
//播放失败
return;
}
//播放成功,添加事件回调
weak_ptr<MediaPlayerForC> weak_self = shared_from_this();
auto delegate = std::make_shared<FrameWriterInterfaceHelper>([weak_self](const Frame::Ptr &frame) {
auto strong_self = weak_self.lock();
if (strong_self) {
strong_self->onData(frame);
}
});
//这里有track通道,可以看通道的抽象,这里为false说明通道还没有准备好
for (auto &track : _player->getTracks(false)) {
track->addDelegate(delegate);
}
}
void onData(const Frame::Ptr &frame){
lock_guard<recursive_mutex> lck(_mtx);
if(_on_data){
_on_data(_on_data_data,frame->getTrackType(),frame->getCodecId(),frame->data(),frame->size(),frame->dts(),frame->pts());
}
}
//在这里强调一下就是关于track是否准备好,什么是准备好呢,我们拿h264track来举例,它包含的主要成员变量是:
bool _is_idr = false;
int _width = 0;
int _height = 0;
float _fps = 0;
string _sps;
string _pps;
再来看ready()
bool H264Track::ready() {
return !_sps.empty() && !_pps.empty();
}
//看到没,就是sps和pps都不为空了,说明这个通道就准备好了,一般我们创建一个通道的时候有两种方式,一种是
/**
* 不指定sps pps构造h264类型的媒体
* 在随后的inputFrame中获取sps pps
*/
H264Track() = default;
另外一种是:
/**
* 构造h264类型的媒体
* @param sps sps帧数据
* @param pps pps帧数据
* @param sps_prefix_len 264头长度,可以为3个或4个字节,一般为0x00 00 00 01
* @param pps_prefix_len 264头长度,可以为3个或4个字节,一般为0x00 00 00 01
*/
H264Track(const string &sps,const string &pps,int sps_prefix_len = 4,int pps_prefix_len = 4);
/**
* 构造h264类型的媒体
* @param sps sps帧
* @param pps pps帧
*/
H264Track(const Frame::Ptr &sps,const Frame::Ptr &pps);
但是我们常用的还是第一种,通过从后面获取到sps和pps直接为tracm赋值。
那么player在说明时候创建trackn呢,通过代码找到:
class Demuxer : public PlayerBase{
public:
class Listener{
public:
Listener() = default;
virtual ~Listener() = default;
virtual void onAddTrack(const Track::Ptr &track) = 0;
};
Demuxer(){};
virtual ~Demuxer(){};
/**
* 返回是否完成初始化完毕
* 在构造RtspDemuxer对象时有些rtsp的sdp不包含sps pps信息
* 所以要等待接收到到sps的rtp包后才能完成
*
* 在构造RtmpDemuxer对象时是无法获取sps pps aac_cfg等这些信息,
* 所以要调用inputRtmp后才会获取到这些信息,这时才初始化成功
* @param analysisMs 数据流最大分析时间 单位毫秒
* @return
*/
bool isInited(int analysisMs) override;
/**
* 获取所有Track
* @return 所有Track
*/
vector<Track::Ptr> getTracks(bool trackReady = true) const override;
/**
* 获取节目总时长
* @return 节目总时长,单位秒
*/
float getDuration() const override;
/**
* 设置track监听器
*/
void setTrackListener(Listener *listener);
protected:
void onAddTrack(const Track::Ptr &track);
protected:
Listener *_listener = nullptr;
AudioTrack::Ptr _audioTrack;//最原始的在这里track
VideoTrack::Ptr _videoTrack;
Ticker _ticker;
float _fDuration = 0;
};
class PlayerBase : public DemuxerBase, public mINI
继续:
class RtmpPlayerImp: public PlayerImp<RtmpPlayer,RtmpDemuxer>
在下面函数中找到创建track的代码
void RtmpDemuxer::inputRtmp(const RtmpPacket::Ptr &pkt) {
switch (pkt->type_id) {
case MSG_VIDEO: {
if (!_try_get_video_track) {
_try_get_video_track = true;
auto codec = AMFValue(pkt->getMediaType());
makeVideoTrack(codec, 0);//这里创建track
}
if (_video_rtmp_decoder) {
_video_rtmp_decoder->inputRtmp(pkt);
}
break;
}
case MSG_AUDIO: {
if (!_try_get_audio_track) {
_try_get_audio_track = true;
auto codec = AMFValue(pkt->getMediaType());
makeAudioTrack(codec, pkt->getAudioSampleRate(), pkt->getAudioChannel(), pkt->getAudioSampleBit(), 0);
}
if (_audio_rtmp_decoder) {
_audio_rtmp_decoder->inputRtmp(pkt);
}
break;
}
default : break;
}
}
//其实真正创建rtmpPlayer在这个地方
API_EXPORT void API_CALL mk_player_play(mk_player ctx, const char *url) {
assert(ctx && url);
MediaPlayerForC &obj = **((MediaPlayerForC::Ptr *)ctx);
auto player = obj.getPlayer();
string url_str(url);
player->getPoller()->async([url_str,player](){
//切换线程后再操作
player->play(url_str);
});
}
void MediaPlayer::play(const string &url) {
//根据url创建相应的player
_delegate = PlayerBase::createPlayer(_poller, url);
assert(_delegate);
setOnCreateSocket_l(_delegate, _on_create_socket);
_delegate->setOnShutdown(_shutdownCB);
_delegate->setOnPlayResult(_playResultCB);
_delegate->setOnResume(_resumeCB);
_delegate->setMediaSource(_pMediaSrc);
_delegate->mINI::operator=(*this);
//最终执行player在这里
_delegate->play(url);
}
//下面是rtmpplayer的player
void RtmpPlayer::play(const string &strUrl) {
teardown();
string host_url = FindField(strUrl.data(), "://", "/");
_app = FindField(strUrl.data(), (host_url + "/").data(), "/");
_stream_id = FindField(strUrl.data(), (host_url + "/" + _app + "/").data(), NULL);
_tc_url = string("rtmp://") + host_url + "/" + _app;
if (!_app.size() || !_stream_id.size()) {
onPlayResult_l(SockException(Err_other, "rtmp url非法"), false);
return;
}
DebugL << host_url << " " << _app << " " << _stream_id;
auto iPort = atoi(FindField(host_url.data(), ":", NULL).data());
if (iPort <= 0) {
//rtmp 默认端口1935
iPort = 1935;
} else {
//服务器域名
host_url = FindField(host_url.data(), NULL, ":");
}
if (!(*this)[kNetAdapter].empty()) {
setNetAdapter((*this)[kNetAdapter]);
}
weak_ptr<RtmpPlayer> weak_self = dynamic_pointer_cast<RtmpPlayer>(shared_from_this());
float play_timeout_sec = (*this)[kTimeoutMS].as<int>() / 1000.0f;
_play_timer.reset(new Timer(play_timeout_sec, [weak_self]() {
auto strong_self = weak_self.lock();
if (!strong_self) {
return false;
}
//这里设置了回调
strong_self->onPlayResult_l(SockException(Err_timeout, "play rtmp timeout"), false);
return false;
}, getPoller()));
_metadata_got = false;
//关键是这里,客户端连接服务器
startConnect(host_url, iPort, play_timeout_sec);
}
//注意这里有tcpclient的回调
onConnect
onRecv
void RtmpPlayer::onConnect(const SockException &err){
if (err.getErrCode() != Err_success) {
onPlayResult_l(err, false);
return;
}
weak_ptr<RtmpPlayer> weakSelf = dynamic_pointer_cast<RtmpPlayer>(shared_from_this());
//在这里完成了握手协议
startClientSession([weakSelf]() {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return;
}
//发送连接请求
strongSelf->send_connect();
});
}
void RtmpProtocol::startClientSession(const function<void()> &func) {
//发送 C0C1
char handshake_head = HANDSHAKE_PLAINTEXT;
onSendRawData(obtainBuffer(&handshake_head, 1));
RtmpHandshake c1(0);
onSendRawData(obtainBuffer((char *) (&c1), sizeof(c1)));
_next_step_func = [this, func](const char *data, size_t len) {
//等待 S0+S1+S2
return handle_S0S1S2(data, len, func);
};
}
const char* RtmpProtocol::handle_S0S1S2(const char *data, size_t len, const function<void()> &func) {
if (len < 1 + 2 * C1_HANDSHARK_SIZE) {
//数据不够
return nullptr;
}
if (data[0] != HANDSHAKE_PLAINTEXT) {
throw std::runtime_error("only plaintext[0x03] handshake supported");
}
//发送 C2
const char *pcC2 = data + 1;
onSendRawData(obtainBuffer(pcC2, C1_HANDSHARK_SIZE));
//握手结束
_next_step_func = [this](const char *data, size_t len) {
//握手结束并且开始进入解析命令模式
return handle_rtmp(data, len);
};
//这里发送connect命令
func();
return data + 1 + 2 * C1_HANDSHARK_SIZE;
}
const char* RtmpProtocol::handle_rtmp(const char *data, size_t len) {
auto ptr = data;
while (len) {
int offset = 0;
uint8_t flags = ptr[0];
size_t header_len = HEADER_LENGTH[flags >> 6];
_now_chunk_id = flags & 0x3f;
switch (_now_chunk_id) {
case 0: {
//0 值表示二字节形式,并且 ID 范围 64 - 319
//(第二个字节 + 64)。
if (len < 2) {
//need more data
return ptr;
}
_now_chunk_id = 64 + (uint8_t) (ptr[1]);
offset = 1;
break;
}
case 1: {
//1 值表示三字节形式,并且 ID 范围为 64 - 65599
//((第三个字节) * 256 + 第二个字节 + 64)。
if (len < 3) {
//need more data
return ptr;
}
_now_chunk_id = 64 + ((uint8_t) (ptr[2]) << 8) + (uint8_t) (ptr[1]);
offset = 2;
break;
}
//带有 2 值的块流 ID 被保留,用于下层协议控制消息和命令。
default : break;
}
if (len < header_len + offset) {
//need more data
return ptr;
}
RtmpHeader &header = *((RtmpHeader *) (ptr + offset));
auto &pr = _map_chunk_data[_now_chunk_id];
auto &now_packet = pr.first;
auto &last_packet = pr.second;
if (!now_packet) {
now_packet = RtmpPacket::create();
if (last_packet) {
//恢复chunk上下文
*now_packet = *last_packet;
}
//绝对时间戳标记复位
now_packet->is_abs_stamp = false;
}
auto &chunk_data = *now_packet;
chunk_data.chunk_id = _now_chunk_id;
switch (header_len) {
case 12:
chunk_data.is_abs_stamp = true;
chunk_data.stream_index = load_le32(header.stream_index);
case 8:
chunk_data.body_size = load_be24(header.body_size);
chunk_data.type_id = header.type_id;
case 4:
chunk_data.ts_field = load_be24(header.time_stamp);
}
auto time_stamp = chunk_data.ts_field;
if (chunk_data.ts_field == 0xFFFFFF) {
if (len < header_len + offset + 4) {
//need more data
return ptr;
}
time_stamp = load_be32(ptr + offset + header_len);
offset += 4;
}
if (chunk_data.body_size < chunk_data.buffer.size()) {
throw std::runtime_error("非法的bodySize");
}
auto more = min(_chunk_size_in, (size_t) (chunk_data.body_size - chunk_data.buffer.size()));
if (len < header_len + offset + more) {
//need more data
return ptr;
}
if (more) {
chunk_data.buffer.append(ptr + header_len + offset, more);
}
ptr += header_len + offset + more;
len -= header_len + offset + more;
if (chunk_data.buffer.size() == chunk_data.body_size) {
//frame is ready
_now_stream_index = chunk_data.stream_index;
chunk_data.time_stamp = time_stamp + (chunk_data.is_abs_stamp ? 0 : chunk_data.time_stamp);
//保存chunk上下文
last_packet = now_packet;
if (chunk_data.body_size) {
handle_chunk(std::move(now_packet));
} else {
now_packet = nullptr;
}
}
}
return ptr;
}
//然后发送connect
inline void RtmpPlayer::send_connect() {
AMFValue obj(AMF_OBJECT);
obj.set("app", _app);
obj.set("tcUrl", _tc_url);
//未使用代理
obj.set("fpad", false);
//参考librtmp,什么作用?
obj.set("capabilities", 15);
//SUPPORT_VID_CLIENT_SEEK 支持seek
obj.set("videoFunction", 1);
//只支持aac
obj.set("audioCodecs", (double) (0x0400));
//只支持H264
obj.set("videoCodecs", (double) (0x0080));
sendInvoke("connect", obj);
addOnResultCB([this](AMFDecoder &dec) {
//TraceL << "connect result";
dec.load<AMFValue>();
auto val = dec.load<AMFValue>();
auto level = val["level"].as_string();
auto code = val["code"].as_string();
if (level != "status") {
throw std::runtime_error(StrPrinter << "connect 失败:" << level << " " << code << endl);
}
send_createStream();
});
}
接着创建流
inline void RtmpPlayer::send_createStream() {
AMFValue obj(AMF_NULL);
sendInvoke("createStream", obj);
addOnResultCB([this](AMFDecoder &dec) {
//TraceL << "createStream result";
dec.load<AMFValue>();
_stream_index = dec.load<int>();
send_play();
});
}
然后发送play命令
inline void RtmpPlayer::send_play() {
AMFEncoder enc;
enc << "play" << ++_send_req_id << nullptr << _stream_id << (double) _stream_index;
sendRequest(MSG_CMD, enc.data());
auto fun = [](AMFValue &val) {
//TraceL << "play onStatus";
auto level = val["level"].as_string();
auto code = val["code"].as_string();
if (level != "status") {
throw std::runtime_error(StrPrinter << "play 失败:" << level << " " << code << endl);
}
};
addOnStatusCB(fun);
addOnStatusCB(fun);
}
//这里就是解析rtmp 包
void RtmpPlayer::onRecv(const Buffer::Ptr &buf){
try {
if (_benchmark_mode && !_play_timer) {
//在性能测试模式下,如果rtmp握手完毕后,不再解析rtmp包
_rtmp_recv_ticker.resetTime();
return;
}
onParseRtmp(buf->data(), buf->size());
} catch (exception &e) {
SockException ex(Err_other, e.what());
//定时器_pPlayTimer为空后表明握手结束了
onPlayResult_l(ex, !_play_timer);
}
}
void RtmpPlayer::onPlayResult_l(const SockException &ex, bool handshake_done) {
if (ex.getErrCode() == Err_shutdown) {
//主动shutdown的,不触发回调
return;
}
WarnL << ex.getErrCode() << " " << ex.what();
if (!handshake_done) {
//开始播放阶段
_play_timer.reset();
//是否为性能测试模式
_benchmark_mode = (*this)[Client::kBenchmarkMode].as<int>();
onPlayResult(ex);
} else if (ex) {
//播放成功后异常断开回调
onShutdown(ex);
} else {
//恢复播放
onResume();
}
if (!ex) {
//播放成功,恢复rtmp接收超时定时器
_rtmp_recv_ticker.resetTime();
auto timeout_ms = (*this)[kMediaTimeoutMS].as<uint64_t>();
weak_ptr<RtmpPlayer> weakSelf = dynamic_pointer_cast<RtmpPlayer>(shared_from_this());
auto lam = [weakSelf, timeout_ms]() {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return false;
}
if (strongSelf->_rtmp_recv_ticker.elapsedTime() > timeout_ms) {
//接收rtmp媒体数据超时
SockException ex(Err_timeout, "receive rtmp timeout");
strongSelf->onPlayResult_l(ex, true);
return false;
}
return true;
};
//创建rtmp数据接收超时检测定时器
_rtmp_recv_timer = std::make_shared<Timer>(timeout_ms / 2000.0f, lam, getPoller());
} else {
shutdown(SockException(Err_shutdown,"teardown"));
}
}