Android13 NuPlayer::start流程分析

首先从NuPlayer的start方法开始:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::start() {
    (new AMessage(kWhatStart, this))->post();
}

发送kWhatStart消息,消息会在onMessageReceived中处理:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
    switch (msg->what()) {
        case kWhatStart:
        {
            ALOGV("kWhatStart");
            if (mStarted) {
                // do not resume yet if the source is still buffering
                if (!mPausedForBuffering) {
                    onResume(); //调用onResume方法
                }
            } else {
                onStart(); //调用onStart方法
            }
            mPausedByClient = false;
            break;
        }
        default:
            TRESPASS();
            break;
    }
}

NuPlayer onResume

 NuPlayer的onResume方法:

sp<Source> mSource;
sp<Renderer> mRenderer;
//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::onResume() {
    if (!mPaused || mResetting) {
        ALOGD_IF(mResetting, "resetting, onResume discarded");
        return;
    }
    mPaused = false;
    if (mSource != NULL) {
        mSource->resume(); //调用Source的resume方法
    } else {
        ALOGW("resume called when source is gone or not set");
    }
    // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
    // needed.
    if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
        instantiateDecoder(true /* audio */, &mAudioDecoder);
    }
    if (mRenderer != NULL) {
        mRenderer->resume(); //调用Renderer的resume方法
    } else {
        ALOGW("resume called when renderer is gone or not set");
    }


    startPlaybackTimer("onresume");
}

这个方法的主要处理如下:

调用Source的resume方法

调用Renderer的resume方法

下面我们分别进行分析:

NuPlayer::GenericSource::resume

调用Source的resume方法,GenericSource继承于Source,因此会调用GenericSource的resume方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
void NuPlayer::GenericSource::resume() {
    Mutex::Autolock _l(mLock);
    mStarted = true;
}

NuPlayer::Renderer::resume

调用Renderer的resume方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
void NuPlayer::Renderer::resume() {
    (new AMessage(kWhatResume, this))->post();
}

创建一个AMessage,消息为kWhatResume,并发送,发送kWhatResume消息,会在NuPlayer::Renderer::onMessageReceived中接收:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
    switch (msg->what()) {
        case kWhatResume:
        {
            onResume(); //调用onResume方法
            break;
        }
        default:
            TRESPASS();
            break;
    }
    if (!mSyncFlag.test_and_set()) {
        Mutex::Autolock syncLock(mSyncLock);
        ++mSyncCount;
        mSyncCondition.broadcast();
    }
}

调用onResume方法:

    sp<MediaPlayerBase::AudioSink> mAudioSink;
    const sp<MediaClock> mMediaClock;
//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
void NuPlayer::Renderer::onResume() {
    if (!mPaused) {
        return;
    }


    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
    cancelAudioOffloadPauseTimeout();
    if (mAudioSink->ready()) {
        status_t err = mAudioSink->start(); //调用AudioSink的start方法
        if (err != OK) {
            ALOGE("cannot start AudioSink err %d", err);
            notifyAudioTearDown(kDueToError);
        }
    }


    {
        Mutex::Autolock autoLock(mLock);
        mPaused = false;
        // rendering started message may have been delayed if we were paused.
        if (mRenderingDataDelivered) {
            notifyIfMediaRenderingStarted_l(); // 向上层(播放器)通知渲染开始
        }
        // configure audiosink as we did not do it when pausing
        if (mAudioSink != NULL && mAudioSink->ready()) {
            mAudioSink->setPlaybackRate(mPlaybackSettings); //调用AudioSink的setPlaybackRate,设置波特率
        }


        mMediaClock->setPlaybackRate(mPlaybackRate); //调用MediaClock的setPlaybackRate,设置波特率


        if (!mAudioQueue.empty()) {
            postDrainAudioQueue_l();
        }
    }


    if (!mVideoQueue.empty()) {
        postDrainVideoQueue();
    }
}

上述方法的主要处理如下:

1、调用AudioSink的start方法

2、调用notifyIfMediaRenderingStarted_l方法,通知NuPlayer,Render开始了

3、调用AudioSink的setPlaybackRate,设置波特率

4、调用MediaClock的setPlaybackRate,设置波特率

下面分别进行分析:

MediaPlayerService::AudioOutput::start

MediaPlayerService的内部类AudioOutput继承于MediaPlayerBase::AudioSink,因此调用MediaPlayerBase::AudioSink的start方法会调用到MediaPlayerService::AudioOutput::start方法:

sp<AudioTrack>          mTrack;
//frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
status_t MediaPlayerService::AudioOutput::start()
{
    ALOGV("start");
    Mutex::Autolock lock(mLock);
    if (mCallbackData != NULL) {
        mCallbackData->endTrackSwitch();
    }
    if (mTrack != 0) {
        mTrack->setVolume(mLeftVolume, mRightVolume); //调用AudioTrack的setVolume方法,设置AudioTrackVolume
        mTrack->setAuxEffectSendLevel(mSendLevel); 
        status_t status = mTrack->start(); //调用AudioTrack的start方法
        if (status == NO_ERROR) {
            mVolumeHandler->setStarted();
        }
        return status;
    }
    return NO_INIT;
}

在上面方法中会调用AudioTrack的setVolume方法,设置AudioTrackVolume,然后调用AudioTrack的start方法开始AudioTrack,这部分内容属于AudioTrack的start流程,AudioTrack的Play流程一致:

待更新

notifyIfMediaRenderingStarted_l

调用notifyIfMediaRenderingStarted_l方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
    if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
        mAudioRenderingStartGeneration == mAudioDrainGeneration) {
        mRenderingDataDelivered = true;
        if (mPaused) {
            return;
        }
        mVideoRenderingStartGeneration = -1;
        mAudioRenderingStartGeneration = -1;


        sp<AMessage> notify = mNotify->dup();
        notify->setInt32("what", kWhatMediaRenderingStart); 
        notify->post(); //发送kWhatMediaRenderingStart消息
    }
}

发送kWhatRendererNotify消息(消息的创建是在onStart中),what参数为kWhatMediaRenderingStart,通知NuPlayer,Render开始了,在NuPlayer的onMessageReceived中处理这个消息:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
    switch (msg->what()) {
        case kWhatRendererNotify:
        {
            int32_t requesterGeneration = mRendererGeneration - 1;
            CHECK(msg->findInt32("generation", &requesterGeneration));
            if (requesterGeneration != mRendererGeneration) {
                ALOGV("got message from old renderer, generation(%d:%d)",
                        requesterGeneration, mRendererGeneration);
                return;
            }


            int32_t what;
            CHECK(msg->findInt32("what", &what));


            if (what == Renderer::kWhatEOS) {
                int32_t audio;
                CHECK(msg->findInt32("audio", &audio));


                int32_t finalResult;
                CHECK(msg->findInt32("finalResult", &finalResult));


                if (audio) {
                    mAudioEOS = true;
                } else {
                    mVideoEOS = true;
                }


                if (finalResult == ERROR_END_OF_STREAM) {
                    ALOGV("reached %s EOS", audio ? "audio" : "video");
                } else {
                    ALOGE("%s track encountered an error (%d)",
                         audio ? "audio" : "video", finalResult);


                    notifyListener(
                            MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, finalResult);
                }


                if ((mAudioEOS || mAudioDecoder == NULL)
                        && (mVideoEOS || mVideoDecoder == NULL)) {
                    notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0);
                }
            } else if (what == Renderer::kWhatFlushComplete) {
                int32_t audio;
                CHECK(msg->findInt32("audio", &audio));


                if (audio) {
                    mAudioEOS = false;
                } else {
                    mVideoEOS = false;
                }


                ALOGV("renderer %s flush completed.", audio ? "audio" : "video");
                if (audio && (mFlushingAudio == NONE || mFlushingAudio == FLUSHED
                        || mFlushingAudio == SHUT_DOWN)) {
                    // Flush has been handled by tear down.
                    break;
                }
                handleFlushComplete(audio, false /* isDecoder */);
                finishFlushIfPossible();
            } else if (what == Renderer::kWhatVideoRenderingStart) {
                notifyListener(MEDIA_INFO, MEDIA_INFO_RENDERING_START, 0);
            } else if (what == Renderer::kWhatMediaRenderingStart) {
                ALOGV("media rendering started");
                notifyListener(MEDIA_STARTED, 0, 0); //调用notifyListener方法,发送MEDIA_STARTED消息
            } else if (what == Renderer::kWhatAudioTearDown) {
                int32_t reason;
                CHECK(msg->findInt32("reason", &reason));
                ALOGV("Tear down audio with reason %d.", reason);
                if (reason == Renderer::kDueToTimeout && !(mPaused && mOffloadAudio)) {
                    // TimeoutWhenPaused is only for offload mode.
                    ALOGW("Received a stale message for teardown, mPaused(%d), mOffloadAudio(%d)",
                          mPaused, mOffloadAudio);
                    break;
                }
                int64_t positionUs;
                if (!msg->findInt64("positionUs", &positionUs)) {
                    positionUs = mPreviousSeekTimeUs;
                }


                restartAudio(
                        positionUs, reason == Renderer::kForceNonOffload /* forceNonOffload */,
                        reason != Renderer::kDueToTimeout /* needsToCreateAudioDecoder */);
            }
            break;
        }
}

调用notifyListener方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::notifyListener(int msg, int ext1, int ext2, const Parcel *in) {
    if (mDriver == NULL) {
        return;
    }


    sp<NuPlayerDriver> driver = mDriver.promote();


    if (driver == NULL) {
        return;
    }


    driver->notifyListener(msg, ext1, ext2, in); //调用NuPlayerDriver的notifyListener方法,发送MEDIA_STARTED消息
}

调用NuPlayerDriver的notifyListener方法,发送MEDIA_STARTED消息:

void NuPlayerDriver::notifyListener(
        int msg, int ext1, int ext2, const Parcel *in) {
    Mutex::Autolock autoLock(mLock);
    notifyListener_l(msg, ext1, ext2, in);
}

调用notifyListener_l方法:

void NuPlayerDriver::notifyListener_l(
        int msg, int ext1, int ext2, const Parcel *in) {
    ALOGV("notifyListener_l(%p), (%d, %d, %d, %d), loop setting(%d, %d)",
            this, msg, ext1, ext2, (in == NULL ? -1 : (int)in->dataSize()), mAutoLoop, mLooping);
    switch (msg) {
        case MEDIA_PLAYBACK_COMPLETE:
        {
            if (mState != STATE_RESET_IN_PROGRESS) {
                if (mAutoLoop) {
                    audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
                    if (mAudioSink != NULL) {
                        streamType = mAudioSink->getAudioStreamType();
                    }
                    if (streamType == AUDIO_STREAM_NOTIFICATION) {
                        ALOGW("disabling auto-loop for notification");
                        mAutoLoop = false;
                    }
                }
                if (mLooping || mAutoLoop) {
                    mPlayer->seekToAsync(0);
                    if (mAudioSink != NULL) {
                        // The renderer has stopped the sink at the end in order to play out
                        // the last little bit of audio. If we're looping, we need to restart it.
                        mAudioSink->start();
                    }
                    // don't send completion event when looping
                    return;
                }
                if (property_get_bool("persist.debug.sf.stats", false)) {
                    Vector<String16> args;
                    dump(-1, args);
                }
                mPlayer->pause();
                mState = STATE_PAUSED;
            }
            FALLTHROUGH_INTENDED;
        }


        case MEDIA_ERROR:
        {
            // when we have an error, add it to the analytics for this playback.
            // ext1 is our primary 'error type' value. Only add ext2 when non-zero.
            // [test against msg is due to fall through from previous switch value]
            if (msg == MEDIA_ERROR) {
                Mutex::Autolock autoLock(mMetricsLock);
                if (mMetricsItem != NULL) {
                    mMetricsItem->setInt32(kPlayerError, ext1);
                    if (ext2 != 0) {
                        mMetricsItem->setInt32(kPlayerErrorCode, ext2);
                    }
                    mMetricsItem->setCString(kPlayerErrorState, stateString(mState).c_str());
                }
            }
            mAtEOS = true;
            break;
        }


        default:
            break;
    }
    mLock.unlock();
    sendEvent(msg, ext1, ext2, in); //发送消息
    mLock.lock();
}

追后消息会在MediaPlayer的内部类EventHandler的handleMessage中处理:

//frameworks/base/media/java/android/media/MediaPlayer.java
public class MediaPlayer extends PlayerBase implements SubtitleController.Listener, VolumeAutomation, AudioRouting {
    private class EventHandler extends Handler
    {
        private MediaPlayer mMediaPlayer;


        public EventHandler(MediaPlayer mp, Looper looper) {
            super(looper);
            mMediaPlayer = mp;
        }


        @Override
        public void handleMessage(Message msg) {
            if (mMediaPlayer.mNativeContext == 0) {
                Log.w(TAG, "mediaplayer went away with unhandled events");
                return;
            }
            switch(msg.what) {
            case MEDIA_STARTED:
                // fall through
            case MEDIA_PAUSED:
                {
                    TimeProvider timeProvider = mTimeProvider;
                    if (timeProvider != null) {
                        timeProvider.onPaused(msg.what == MEDIA_PAUSED);
                    }
                }
                break;
            default:
                Log.e(TAG, "Unknown message type " + msg.what);
                return;
            }
        }
    }

MediaPlayerService::AudioOutput::setPlaybackRate

MediaPlayerService的内部类AudioOutput继承于MediaPlayerBase::AudioSink,因此调用MediaPlayerBase::AudioSink的setPlaybackRate方法会调用到MediaPlayerService::AudioOutput::setPlaybackRate方法:

sp<AudioTrack>          mTrack;
//frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
status_t MediaPlayerService::AudioOutput::setPlaybackRate(const AudioPlaybackRate &rate)
{
    ALOGV("setPlaybackRate(%f %f %d %d)",
                rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
    Mutex::Autolock lock(mLock);
    if (mTrack == 0) {
        // remember rate so that we can set it when the track is opened
        mPlaybackRate = rate;
        return OK;
    }
    status_t res = mTrack->setPlaybackRate(rate); //调用AudioTrack的setPlaybackRate方法
    if (res != NO_ERROR) {
        return res;
    }
    // rate.mSpeed is always greater than 0 if setPlaybackRate succeeded
    CHECK_GT(rate.mSpeed, 0.f);
    mPlaybackRate = rate;
    if (mSampleRateHz != 0) {
        mMsecsPerFrame = 1E3f / (rate.mSpeed * mSampleRateHz);
    }
    return res;
}

调用AudioTrack的setPlaybackRate方法,这部分属于AudioTrak:

待更新

MediaClock::setPlaybackRate

调用MediaClock的setPlaybackRate,设置波特率:

//frameworks/av/media/libstagefright/MediaClock.cpp
void MediaClock::setPlaybackRate(float rate) {
    CHECK_GE(rate, 0.0);
    Mutex::Autolock autoLock(mLock);
    if (mAnchorTimeRealUs == -1) {
        mPlaybackRate = rate;
        return;
    }


    int64_t nowUs = ALooper::GetNowUs();
    int64_t nowMediaUs = mAnchorTimeMediaUs + (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
    if (nowMediaUs < 0) {
        ALOGW("setRate: anchor time should not be negative, set to 0.");
        nowMediaUs = 0;
    }
    updateAnchorTimesAndPlaybackRate_l(nowMediaUs, nowUs, rate); //更新锚点时间和播放速率


    if (rate > 0.0) {
        ++mGeneration;
        processTimers_l(); 
    }
}

调用updateAnchorTimesAndPlaybackRate_l,更新锚点时间和播放速率:

//frameworks/av/media/libstagefright/MediaClock.cpp
void MediaClock::updateAnchorTimesAndPlaybackRate_l(int64_t anchorTimeMediaUs,
        int64_t anchorTimeRealUs, float playbackRate) {
    if (mAnchorTimeMediaUs != anchorTimeMediaUs
            || mAnchorTimeRealUs != anchorTimeRealUs
            || mPlaybackRate != playbackRate) {
        mAnchorTimeMediaUs = anchorTimeMediaUs;
        mAnchorTimeRealUs = anchorTimeRealUs;
        mPlaybackRate = playbackRate;
        notifyDiscontinuity_l();
    }
}

调用processTimers_l方法:

//frameworks/av/media/libstagefright/MediaClock.cpp
void MediaClock::processTimers_l() {
    int64_t nowMediaTimeUs;
    status_t status = getMediaTime_l(
            ALooper::GetNowUs(), &nowMediaTimeUs, false /* allowPastMaxTime */);


    if (status != OK) {
        return;
    }


    int64_t nextLapseRealUs = INT64_MAX;
    std::multimap<int64_t, Timer> notifyList;
    auto it = mTimers.begin();
    while (it != mTimers.end()) {
        double diff = it->mAdjustRealUs * (double)mPlaybackRate
            + it->mMediaTimeUs - nowMediaTimeUs;
        int64_t diffMediaUs;
        if (diff > (double)INT64_MAX) {
            diffMediaUs = INT64_MAX;
        } else if (diff < (double)INT64_MIN) {
            diffMediaUs = INT64_MIN;
        } else {
            diffMediaUs = diff;
        }


        if (diffMediaUs <= 0) {
            notifyList.emplace(diffMediaUs, *it);
            it = mTimers.erase(it);
        } else {
            if (mPlaybackRate != 0.0
                && (double)diffMediaUs < (double)INT64_MAX * (double)mPlaybackRate) {
                int64_t targetRealUs = diffMediaUs / (double)mPlaybackRate;
                if (targetRealUs < nextLapseRealUs) {
                    nextLapseRealUs = targetRealUs;
                }
            }
            ++it;
        }
    }


    auto itNotify = notifyList.begin();
    while (itNotify != notifyList.end()) {
        itNotify->second.mNotify->setInt32("reason", TIMER_REASON_REACHED);
        itNotify->second.mNotify->post();
        itNotify = notifyList.erase(itNotify);
    }


    if (mTimers.empty() || mPlaybackRate == 0.0 || mAnchorTimeMediaUs < 0
        || nextLapseRealUs == INT64_MAX) {
        return;
    }


    sp<AMessage> msg = new AMessage(kWhatTimeIsUp, this);
    msg->setInt32("generation", mGeneration);
    msg->post(nextLapseRealUs); //发送kWhatTimeIsUp消息
}

发送kWhatTimeIsUp消息,消息会在onMessageReceived中处理:

//frameworks/av/media/libstagefright/MediaClock.cpp
void MediaClock::onMessageReceived(const sp<AMessage> &msg) {
    switch (msg->what()) {
        case kWhatTimeIsUp:
        {
            int32_t generation;
            CHECK(msg->findInt32("generation", &generation));


            Mutex::Autolock autoLock(mLock);
            if (generation != mGeneration) {
                break;
            }
            processTimers_l(); //调用processTimers_l,形成一个循环
            break;
        }


        default:
            TRESPASS();
            break;
    }
}

NuPlayer onStart

NuPlayer的onStart方法:

sp<Source> mSource;
sp<DecoderBase> mVideoDecoder;
sp<DecoderBase> mAudioDecoder;
sp<MediaPlayerBase::AudioSink> mAudioSink;
sp<Renderer> mRenderer;
sp<ALooper> mRendererLooper;
//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::onStart(int64_t startPositionUs, MediaPlayerSeekMode mode) {
    ALOGV("onStart: mCrypto: %p (%d)", mCrypto.get(),
            (mCrypto != NULL ? mCrypto->getStrongCount() : 0));


    if (!mSourceStarted) {
        mSourceStarted = true;
        mSource->start(); //调用Source的start方法
    }
    if (startPositionUs > 0) {
        performSeek(startPositionUs, mode); //调用performSeek方法
        if (mSource->getFormat(false /* audio */) == NULL) {
            return;
        }
    }


    mOffloadAudio = false;
    mAudioEOS = false;
    mVideoEOS = false;
    mStarted = true;
    mPaused = false;


    uint32_t flags = 0;


    if (mSource->isRealTime()) {
        flags |= Renderer::FLAG_REAL_TIME;
    }


    bool hasAudio = (mSource->getFormat(true /* audio */) != NULL); //调用Source的getFormat方法
    bool hasVideo = (mSource->getFormat(false /* audio */) != NULL); //调用Source的getFormat方法
    if (!hasAudio && !hasVideo) {
        ALOGE("no metadata for either audio or video source");
        mSource->stop(); //调用Source的stop方法
        mSourceStarted = false;
        notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_MALFORMED);
        return;
    }
    ALOGV_IF(!hasAudio, "no metadata for audio source");  // video only stream


    sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */); //调用Source的getFormatMeta方法


    audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
    if (mAudioSink != NULL) {
        streamType = mAudioSink->getAudioStreamType(); //调用AudioSink的getAudioStreamType方法
    }


    mOffloadAudio =
        canOffloadStream(audioMeta, hasVideo, mSource->isStreaming(), streamType)
                && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f); //调用canOffloadStream方法


    // Modular DRM: Disabling audio offload if the source is protected
    if (mOffloadAudio && mIsDrmProtected) {
        mOffloadAudio = false;
        ALOGV("onStart: Disabling mOffloadAudio now that the source is protected.");
    }


    if (mOffloadAudio) {
        flags |= Renderer::FLAG_OFFLOAD_AUDIO;
    }


    sp<AMessage> notify = new AMessage(kWhatRendererNotify, this); //new一个AMessage对象,Message为kWhatRendererNotify
    ++mRendererGeneration;
    notify->setInt32("generation", mRendererGeneration);
    mRenderer = new Renderer(mAudioSink, mMediaClock, notify, flags); //创建Renderer对象
    mRendererLooper = new ALooper; //创建一个ALooper
    mRendererLooper->setName("NuPlayerRenderer"); //设置Looper名字
    mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO); //开始这个Looper
    mRendererLooper->registerHandler(mRenderer); //注册Handler


    status_t err = mRenderer->setPlaybackSettings(mPlaybackSettings); //调用Renderer的setPlaybackSettings方法
    if (err != OK) {
        mSource->stop(); //调用Source的stop方法,停止NuPlayer Source
        mSourceStarted = false;
        notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
        return;
    }


    float rate = getFrameRate();
    if (rate > 0) {
        mRenderer->setVideoFrameRate(rate); //调用Renderer的setVideoFrameRate方法,设置视频帧率
    }


    if (mVideoDecoder != NULL) {
        mVideoDecoder->setRenderer(mRenderer); //调用DecoderBase的setRenderer方法,设置视频渲染器
    }
    if (mAudioDecoder != NULL) {
        mAudioDecoder->setRenderer(mRenderer); //调用DecoderBase的setRenderer方法,设置音频渲染器
    }


    startPlaybackTimer("onstart");


    postScanSources(); //调用postScanSources方法,初始化audio/video的解码器
}

这个方法的主要处理如下:

1、调用Source的start方法,开始NuPlayer Source

2、调用performSeek方法

3、创建Renderer对象

4、创建一个RendererLooper,并开始这个Looper

5、调用Renderer的setPlaybackSettings方法,配置Playback速率等内容

6、调用Renderer的setVideoFrameRate方法,设置视频帧率

7、调用VideoDecoder的setRenderer方法,设置视频渲染器

8、调用AudioDecoder的setRenderer方法,设置音频渲染器

9、调用postScanSources方法,初始化audio/video的解码器

下面我们分别进行分析:

NuPlayer::GenericSource::start

调用Source的start方法,GenericSource继承于Source,因此会调用GenericSource的start方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
void NuPlayer::GenericSource::start() {
    Mutex::Autolock _l(mLock);
    ALOGI("start");


    if (mAudioTrack.mSource != NULL) {
        postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);  //把读取buffer的消息post出去
    }


    if (mVideoTrack.mSource != NULL) {
        postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);  //把读取buffer的消息post出去
    }


    mStarted = true;
}

调用postReadBuffer方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
    if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
        mPendingReadBufferTypes |= (1 << trackType);
        sp<AMessage> msg = new AMessage(kWhatReadBuffer, this); //创建AMessage对象,消息为kWhatReadBuffer
        msg->setInt32("trackType", trackType);
        msg->post(); //发送kWhatReadBuffer消息
    }
}

发送的kWhatReadBuffer消息会在onMessageReceived中处理:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
void NuPlayer::GenericSource::onMessageReceived(const sp<AMessage> &msg) {
    Mutex::Autolock _l(mLock);
    switch (msg->what()) {
      case kWhatReadBuffer:
      {
          onReadBuffer(msg);
          break;
      }
    }
}

调用onReadBuffer方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
void NuPlayer::GenericSource::onReadBuffer(const sp<AMessage>& msg) {
    int32_t tmpType;
    CHECK(msg->findInt32("trackType", &tmpType));
    media_track_type trackType = (media_track_type)tmpType;
    mPendingReadBufferTypes &= ~(1 << trackType);
    readBuffer(trackType);
}

调用readBuffer方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
void NuPlayer::GenericSource::readBuffer(
        media_track_type trackType, int64_t seekTimeUs, MediaPlayerSeekMode mode,
        int64_t *actualTimeUs, bool formatChange) {
    Track *track;
    size_t maxBuffers = 1;
    switch (trackType) {
        case MEDIA_TRACK_TYPE_VIDEO:
            track = &mVideoTrack;
            maxBuffers = 8;  // too large of a number may influence seeks
            break;
        case MEDIA_TRACK_TYPE_AUDIO:
            track = &mAudioTrack;
            maxBuffers = 64;
            break;
        case MEDIA_TRACK_TYPE_SUBTITLE:
            track = &mSubtitleTrack;
            break;
        case MEDIA_TRACK_TYPE_TIMEDTEXT:
            track = &mTimedTextTrack;
            break;
        default:
            TRESPASS();
    }


    if (track->mSource == NULL) {
        return;
    }


    if (actualTimeUs) {
        *actualTimeUs = seekTimeUs;
    }


    MediaSource::ReadOptions options;


    bool seeking = false;
    if (seekTimeUs >= 0) {
        options.setSeekTo(seekTimeUs, mode);
        seeking = true;
    }


    const bool couldReadMultiple = (track->mSource->supportReadMultiple());


    if (couldReadMultiple) {
        options.setNonBlocking();
    }


    int32_t generation = getDataGeneration(trackType);
    for (size_t numBuffers = 0; numBuffers < maxBuffers; ) {
        Vector<MediaBufferBase *> mediaBuffers;
        status_t err = NO_ERROR;


        sp<IMediaSource> source = track->mSource;
        mLock.unlock();
        if (couldReadMultiple) {
            err = source->readMultiple(
                    &mediaBuffers, maxBuffers - numBuffers, &options);
        } else {
            MediaBufferBase *mbuf = NULL;
            err = source->read(&mbuf, &options);
            if (err == OK && mbuf != NULL) {
                mediaBuffers.push_back(mbuf);
            }
        }
        mLock.lock();


        options.clearNonPersistent();


        size_t id = 0;
        size_t count = mediaBuffers.size();


        // in case track has been changed since we don't have lock for some time.
        if (generation != getDataGeneration(trackType)) {
            for (; id < count; ++id) {
                mediaBuffers[id]->release();
            }
            break;
        }


        for (; id < count; ++id) {
            int64_t timeUs;
            MediaBufferBase *mbuf = mediaBuffers[id];
            if (!mbuf->meta_data().findInt64(kKeyTime, &timeUs)) {
                mbuf->meta_data().dumpToLog();
                track->mPackets->signalEOS(ERROR_MALFORMED);
                break;
            }
            if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
                mAudioTimeUs = timeUs;
            } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
                mVideoTimeUs = timeUs;
            }


            queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);


            sp<ABuffer> buffer = mediaBufferToABuffer(mbuf, trackType);
            if (numBuffers == 0 && actualTimeUs != nullptr) {
                *actualTimeUs = timeUs;
            }
            if (seeking && buffer != nullptr) {
                sp<AMessage> meta = buffer->meta();
                if (meta != nullptr && mode == MediaPlayerSeekMode::SEEK_CLOSEST
                        && seekTimeUs > timeUs) {
                    sp<AMessage> extra = new AMessage;
                    extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
                    meta->setMessage("extra", extra);
                }
            }


            track->mPackets->queueAccessUnit(buffer);
            formatChange = false;
            seeking = false;
            ++numBuffers;
        }
        if (id < count) {
            // Error, some mediaBuffer doesn't have kKeyTime.
            for (; id < count; ++id) {
                mediaBuffers[id]->release();
            }
            break;
        }


        if (err == WOULD_BLOCK) {
            break;
        } else if (err == INFO_FORMAT_CHANGED) {
#if 0
            track->mPackets->queueDiscontinuity(
                    ATSParser::DISCONTINUITY_FORMATCHANGE,
                    NULL,
                    false /* discard */);
#endif
        } else if (err != OK) {
            queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
            track->mPackets->signalEOS(err);
            break;
        }
    }


    if (mIsStreaming
        && (trackType == MEDIA_TRACK_TYPE_VIDEO || trackType == MEDIA_TRACK_TYPE_AUDIO)) {
        status_t finalResult;
        int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);


        // TODO: maxRebufferingMarkMs could be larger than
        // mBufferingSettings.mResumePlaybackMarkMs
        int64_t markUs = (mPreparing ? mBufferingSettings.mInitialMarkMs
            : mBufferingSettings.mResumePlaybackMarkMs) * 1000LL;
        if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
            if (mPreparing || mSentPauseOnBuffering) {
                Track *counterTrack =
                    (trackType == MEDIA_TRACK_TYPE_VIDEO ? &mAudioTrack : &mVideoTrack);
                if (counterTrack->mSource != NULL) {
                    durationUs = counterTrack->mPackets->getBufferedDurationUs(&finalResult);
                }
                if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
                    if (mPreparing) {
                        notifyPrepared();
                        mPreparing = false;
                    } else {
                        sendCacheStats();
                        mSentPauseOnBuffering = false;
                        sp<AMessage> notify = dupNotify();
                        notify->setInt32("what", kWhatResumeOnBufferingEnd);
                        notify->post();
                    }
                }
            }
            return;
        }


        postReadBuffer(trackType);
    }
}

NuPlayer::performSeek

接下来分析performSeek方法:

sp<Source> mSource;
//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::performSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
    ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), mode=%d",
          (long long)seekTimeUs, seekTimeUs / 1E6, mode);


    if (mSource == NULL) {
        // This happens when reset occurs right before the loop mode
        // asynchronously seeks to the start of the stream.
        LOG_ALWAYS_FATAL_IF(mAudioDecoder != NULL || mVideoDecoder != NULL,
                "mSource is NULL and decoders not NULL audio(%p) video(%p)",
                mAudioDecoder.get(), mVideoDecoder.get());
        return;
    }
    mPreviousSeekTimeUs = seekTimeUs;
    mSource->seekTo(seekTimeUs, mode); //调用Source的seekTo方法
    ++mTimedTextGeneration;


    // everything's flushed, continue playback.
}

调用Source的seekTo方法,NuPlayer::GenericSource继承Source,因此会调用NuPlayer::GenericSource的seekTo方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
    ALOGV("seekTo: %lld, %d", (long long)seekTimeUs, mode);
    sp<AMessage> msg = new AMessage(kWhatSeek, this); //创建一个AMessage对象,Message为seekTimeUs
    msg->setInt64("seekTimeUs", seekTimeUs);
    msg->setInt32("mode", mode);


    // Need to call readBuffer on |mLooper| to ensure the calls to
    // IMediaSource::read* are serialized. Note that IMediaSource::read*
    // is called without |mLock| acquired and MediaSource is not thread safe.
    sp<AMessage> response;
    status_t err = msg->postAndAwaitResponse(&response); //发送kWhatSeek消息并且等待Response
    if (err == OK && response != NULL) {
        CHECK(response->findInt32("err", &err));
    }


    return err;
}

发送kWhatSeek消息,会在NuPlayer::GenericSource::onMessageReceived中收到这个消息:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
void NuPlayer::GenericSource::onMessageReceived(const sp<AMessage> &msg) {
    Mutex::Autolock _l(mLock);
    switch (msg->what()) {
      case kWhatSeek:
      {
          onSeek(msg);
          break;
      }
    }
}

调用onSeek方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
void NuPlayer::GenericSource::onSeek(const sp<AMessage>& msg) {
    int64_t seekTimeUs;
    int32_t mode;
    CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
    CHECK(msg->findInt32("mode", &mode));


    sp<AMessage> response = new AMessage;
    status_t err = doSeek(seekTimeUs, (MediaPlayerSeekMode)mode);
    response->setInt32("err", err);


    sp<AReplyToken> replyID;
    CHECK(msg->senderAwaitsResponse(&replyID));
    response->postReply(replyID); //返回response
}

调用doSeek方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/GenericSource.cpp
status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
    if (mVideoTrack.mSource != NULL) {
        ++mVideoDataGeneration;


        int64_t actualTimeUs;
        readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);


        if (mode != MediaPlayerSeekMode::SEEK_CLOSEST) {
            seekTimeUs = std::max<int64_t>(0, actualTimeUs);
        }
        mVideoLastDequeueTimeUs = actualTimeUs;
    }


    if (mAudioTrack.mSource != NULL) {
        ++mAudioDataGeneration;
        readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs, MediaPlayerSeekMode::SEEK_CLOSEST);
        mAudioLastDequeueTimeUs = seekTimeUs;
    }


    if (mSubtitleTrack.mSource != NULL) {
        mSubtitleTrack.mPackets->clear();
        mFetchSubtitleDataGeneration++;
    }


    if (mTimedTextTrack.mSource != NULL) {
        mTimedTextTrack.mPackets->clear();
        mFetchTimedTextDataGeneration++;
    }


    ++mPollBufferingGeneration;
    schedulePollBuffering();
    return OK;
}

NuPlayer::Renderer

创建Renderer对象,Renderer的构造方法如下:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
NuPlayer::Renderer::Renderer(
        const sp<MediaPlayerBase::AudioSink> &sink,
        const sp<MediaClock> &mediaClock,
        const sp<AMessage> &notify,
        uint32_t flags)
    : mAudioSink(sink),
      mUseVirtualAudioSink(false),
      mNotify(notify),
      mFlags(flags),
      mNumFramesWritten(0),
      mDrainAudioQueuePending(false),
      mDrainVideoQueuePending(false),
      mAudioQueueGeneration(0),
      mVideoQueueGeneration(0),
      mAudioDrainGeneration(0),
      mVideoDrainGeneration(0),
      mAudioEOSGeneration(0),
      mMediaClock(mediaClock),
      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
      mAudioFirstAnchorTimeMediaUs(-1),
      mAnchorTimeMediaUs(-1),
      mAnchorNumFramesWritten(-1),
      mVideoLateByUs(0LL),
      mNextVideoTimeMediaUs(-1),
      mHasAudio(false),
      mHasVideo(false),
      mNotifyCompleteAudio(false),
      mNotifyCompleteVideo(false),
      mSyncQueues(false),
      mPaused(false),
      mPauseDrainAudioAllowedUs(0),
      mVideoSampleReceived(false),
      mVideoRenderingStarted(false),
      mVideoRenderingStartGeneration(0),
      mAudioRenderingStartGeneration(0),
      mRenderingDataDelivered(false),
      mNextAudioClockUpdateTimeUs(-1),
      mLastAudioMediaTimeUs(-1),
      mAudioOffloadPauseTimeoutGeneration(0),
      mAudioTornDown(false),
      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
      mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
      mTotalBuffersQueued(0),
      mLastAudioBufferDrained(0),
      mUseAudioCallback(false),
      mWakeLock(new AWakeLock()) {
    CHECK(mediaClock != NULL);
    mPlaybackRate = mPlaybackSettings.mSpeed;
    mMediaClock->setPlaybackRate(mPlaybackRate); //设置播放速率
    (void)mSyncFlag.test_and_set();
}

NuPlayer::Renderer::setPlaybackSettings

调用Renderer的setPlaybackSettings方法,配置Renderer的Playback:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this); //创建AMessage对象,Message为kWhatConfigPlayback
    writeToAMessage(msg, rate);
    sp<AMessage> response;
    status_t err = msg->postAndAwaitResponse(&response); //发送kWhatConfigPlayback消息并等待Response
    if (err == OK && response != NULL) {
        CHECK(response->findInt32("err", &err));
    }
    return err;
}

发送kWhatConfigPlayback消息,会在NuPlayer::Renderer::onMessageReceived中接收:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
    switch (msg->what()) {
        case kWhatConfigPlayback:
        {
            sp<AReplyToken> replyID;
            CHECK(msg->senderAwaitsResponse(&replyID));
            AudioPlaybackRate rate;
            readFromAMessage(msg, &rate);
            status_t err = onConfigPlayback(rate); //调用onConfigPlayback方法
            sp<AMessage> response = new AMessage;
            response->setInt32("err", err);
            response->postReply(replyID); //返回response
            break;
        }
    }
}

调用onConfigPlayback方法:

sp<MediaPlayerBase::AudioSink> mAudioSink;
const sp<MediaClock> mMediaClock;
//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
    if (rate.mSpeed == 0.f) {
        onPause();
        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
        // have to correspond to the any non-0 speed (e.g old speed). Keep
        // settings nonetheless, using the old speed, in case audiosink changes.
        AudioPlaybackRate newRate = rate;
        newRate.mSpeed = mPlaybackSettings.mSpeed;
        mPlaybackSettings = newRate;
        return OK;
    }


    if (mAudioSink != NULL && mAudioSink->ready()) {
        status_t err = mAudioSink->setPlaybackRate(rate); //调用AudioSink的setPlaybackRate方法,设置波特率
        if (err != OK) {
            return err;
        }
    }
    mPlaybackSettings = rate;
    mPlaybackRate = rate.mSpeed;
    mMediaClock->setPlaybackRate(mPlaybackRate);
    return OK;
}

这个方法的主要处理如下:

调用MediaPlayerBase::AudioSink的setPlaybackRate方法

调用MediaClock的setPlaybackRate方法

下面我们分别分析:

MediaPlayerService::AudioOutput::setPlaybackRate

MediaPlayerService的内部类AudioOutput继承于MediaPlayerBase::AudioSink,因此调用MediaPlayerBase::AudioSink的setPlaybackRate方法会调用到MediaPlayerService::AudioOutput::setPlaybackRate方法:

sp<AudioTrack>          mTrack;
//frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
status_t MediaPlayerService::AudioOutput::setPlaybackRate(const AudioPlaybackRate &rate)
{
    ALOGV("setPlaybackRate(%f %f %d %d)",
                rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
    Mutex::Autolock lock(mLock);
    if (mTrack == 0) {
        // remember rate so that we can set it when the track is opened
        mPlaybackRate = rate;
        return OK;
    }
    status_t res = mTrack->setPlaybackRate(rate);  //调用AudioTrack的setPlaybackRate方法
    if (res != NO_ERROR) {
        return res;
    }
    // rate.mSpeed is always greater than 0 if setPlaybackRate succeeded
    CHECK_GT(rate.mSpeed, 0.f);
    mPlaybackRate = rate;
    if (mSampleRateHz != 0) {
        mMsecsPerFrame = 1E3f / (rate.mSpeed * mSampleRateHz);
    }
    return res;
}

调用AudioTrack的setPlaybackRate方法,之后就是AudioTrack的处理了,这里就不再介绍了。

MediaClock::setPlaybackRate

调用MediaClock的setPlaybackRate方法:

//frameworks/av/media/libstagefright/MediaClock.cpp
void MediaClock::setPlaybackRate(float rate) {
    CHECK_GE(rate, 0.0);
    Mutex::Autolock autoLock(mLock);
    if (mAnchorTimeRealUs == -1) {
        mPlaybackRate = rate;
        return;
    }


    int64_t nowUs = ALooper::GetNowUs();
    int64_t nowMediaUs = mAnchorTimeMediaUs + (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
    if (nowMediaUs < 0) {
        ALOGW("setRate: anchor time should not be negative, set to 0.");
        nowMediaUs = 0;
    }
    updateAnchorTimesAndPlaybackRate_l(nowMediaUs, nowUs, rate); 


    if (rate > 0.0) {
        ++mGeneration;
        processTimers_l();
    }
}

调用processTimers_l方法:

//frameworks/av/media/libstagefright/MediaClock.cpp
void MediaClock::processTimers_l() {
    int64_t nowMediaTimeUs;
    status_t status = getMediaTime_l(
            ALooper::GetNowUs(), &nowMediaTimeUs, false /* allowPastMaxTime */);


    if (status != OK) {
        return;
    }


    int64_t nextLapseRealUs = INT64_MAX;
    std::multimap<int64_t, Timer> notifyList;
    auto it = mTimers.begin();
    while (it != mTimers.end()) {
        double diff = it->mAdjustRealUs * (double)mPlaybackRate
            + it->mMediaTimeUs - nowMediaTimeUs;
        int64_t diffMediaUs;
        if (diff > (double)INT64_MAX) {
            diffMediaUs = INT64_MAX;
        } else if (diff < (double)INT64_MIN) {
            diffMediaUs = INT64_MIN;
        } else {
            diffMediaUs = diff;
        }


        if (diffMediaUs <= 0) {
            notifyList.emplace(diffMediaUs, *it);
            it = mTimers.erase(it);
        } else {
            if (mPlaybackRate != 0.0
                && (double)diffMediaUs < (double)INT64_MAX * (double)mPlaybackRate) {
                int64_t targetRealUs = diffMediaUs / (double)mPlaybackRate;
                if (targetRealUs < nextLapseRealUs) {
                    nextLapseRealUs = targetRealUs;
                }
            }
            ++it;
        }
    }


    auto itNotify = notifyList.begin();
    while (itNotify != notifyList.end()) {
        itNotify->second.mNotify->setInt32("reason", TIMER_REASON_REACHED);
        itNotify->second.mNotify->post();
        itNotify = notifyList.erase(itNotify);
    }


    if (mTimers.empty() || mPlaybackRate == 0.0 || mAnchorTimeMediaUs < 0
        || nextLapseRealUs == INT64_MAX) {
        return;
    }


    sp<AMessage> msg = new AMessage(kWhatTimeIsUp, this);
    msg->setInt32("generation", mGeneration);
    msg->post(nextLapseRealUs);
}

NuPlayer::Renderer::setVideoFrameRate

待更新

NuPlayer::DecoderBase::setRenderer

DecoderBase的setRenderer方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
void NuPlayer::DecoderBase::setRenderer(const sp<Renderer> &renderer) {
    sp<AMessage> msg = new AMessage(kWhatSetRenderer, this); //创建一个AMessage对象,Message为kWhatSetRenderer
    msg->setObject("renderer", renderer);
    msg->post(); //发送kWhatSetRenderer消息
}

发送kWhatSetRenderer消息,发送的消息会在NuPlayer::DecoderBase::onMessageReceived中处理:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
void NuPlayer::DecoderBase::onMessageReceived(const sp<AMessage> &msg) {
    switch (msg->what()) {
        case kWhatSetRenderer:
        {
            sp<RefBase> obj;
            CHECK(msg->findObject("renderer", &obj));
            onSetRenderer(static_cast<Renderer *>(obj.get())); //调用onSetRenderer方法
            break;
        }
    }
}

调用onSetRenderer方法,有其子类NuPlayer::Decoder实现:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
    mRenderer = renderer;
}

postScanSources

调用postScanSources方法,初始化audio/video的解码器:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::postScanSources() {
    if (mScanSourcesPending) {
        return;
    }


    sp<AMessage> msg = new AMessage(kWhatScanSources, this);
    msg->setInt32("generation", mScanSourcesGeneration);
    msg->post(); //发送kWhatScanSources消息


    mScanSourcesPending = true;
}

发送kWhatScanSources消息,在onMessageReceived方法中处理这个消息:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
void NuPlayer::onMessageReceived(const sp<AMessage> &msg) {
    switch (msg->what()) {
        case kWhatScanSources:
        {
            int32_t generation;
            CHECK(msg->findInt32("generation", &generation));
            if (generation != mScanSourcesGeneration) {
                // Drop obsolete msg.
                break;
            }


            mScanSourcesPending = false;


            ALOGV("scanning sources haveAudio=%d, haveVideo=%d",
                 mAudioDecoder != NULL, mVideoDecoder != NULL);


            bool mHadAnySourcesBefore =
                (mAudioDecoder != NULL) || (mVideoDecoder != NULL);
            bool rescan = false;


            // initialize video before audio because successful initialization of
            // video may change deep buffer mode of audio.
            if (mSurface != NULL) {
                if (instantiateDecoder(false, &mVideoDecoder) == -EWOULDBLOCK) { //调用instantiateDecoder初始化VideoDecoder
                    rescan = true;
                }
            }


            // Don't try to re-open audio sink if there's an existing decoder.
            if (mAudioSink != NULL && mAudioDecoder == NULL) {
                if (instantiateDecoder(true, &mAudioDecoder) == -EWOULDBLOCK) { //调用instantiateDecoder初始化AudioDecoder
                    rescan = true;
                }
            }


            if (!mHadAnySourcesBefore
                    && (mAudioDecoder != NULL || mVideoDecoder != NULL)) {
                // This is the first time we've found anything playable.


                if (mSourceFlags & Source::FLAG_DYNAMIC_DURATION) {
                    schedulePollDuration();
                }
            }


            status_t err;
            if ((err = mSource->feedMoreTSData()) != OK) {
                if (mAudioDecoder == NULL && mVideoDecoder == NULL) {
                    // We're not currently decoding anything (no audio or
                    // video tracks found) and we just ran out of input data.


                    if (err == ERROR_END_OF_STREAM) {
                        notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0); //通知MEDIA_PLAYBACK_COMPLETE
                    } else {
                        notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
                    }
                }
                break;
            }


            if (rescan) {
                msg->post(100000LL);
                mScanSourcesPending = true;
            }
            break;
        }


        default:
            TRESPASS();
            break;
    }
}

调用instantiateDecoder初始化AudioDecoder和VideoDecoder:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
status_t NuPlayer::instantiateDecoder(
        bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange) {
    // The audio decoder could be cleared by tear down. If still in shut down
    // process, no need to create a new audio decoder.
    if (*decoder != NULL || (audio && mFlushingAudio == SHUT_DOWN)) {
        return OK;
    }


    sp<AMessage> format = mSource->getFormat(audio);


    if (format == NULL) {
        return UNKNOWN_ERROR;
    } else {
        status_t err;
        if (format->findInt32("err", &err) && err) {
            return err;
        }
    }


    format->setInt32("priority", 0 /* realtime */);


    if (mDataSourceType == DATA_SOURCE_TYPE_RTP) {
        ALOGV("instantiateDecoder: set decoder error free on stream corrupt.");
        format->setInt32("corrupt-free", true);
    }


    if (!audio) {
        AString mime;
        CHECK(format->findString("mime", &mime));


        sp<AMessage> ccNotify = new AMessage(kWhatClosedCaptionNotify, this); 
        if (mCCDecoder == NULL) {
            mCCDecoder = new CCDecoder(ccNotify);
        }


        if (mSourceFlags & Source::FLAG_SECURE) {
            format->setInt32("secure", true);
        }


        if (mSourceFlags & Source::FLAG_PROTECTED) {
            format->setInt32("protected", true);
        }


        float rate = getFrameRate();
        if (rate > 0) {
            format->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
        }
    }


    Mutex::Autolock autoLock(mDecoderLock);


    if (audio) {
        sp<AMessage> notify = new AMessage(kWhatAudioNotify, this);
        ++mAudioDecoderGeneration;
        notify->setInt32("generation", mAudioDecoderGeneration);


        if (checkAudioModeChange) {
            determineAudioModeChange(format);
        }
        if (mOffloadAudio) {
            mSource->setOffloadAudio(true /* offload */);


            const bool hasVideo = (mSource->getFormat(false /*audio */) != NULL);
            format->setInt32("has-video", hasVideo);
            *decoder = new DecoderPassThrough(notify, mSource, mRenderer); //new一个Decoder
            ALOGV("instantiateDecoder audio DecoderPassThrough  hasVideo: %d", hasVideo);
        } else {
            mSource->setOffloadAudio(false /* offload */);


            *decoder = new Decoder(notify, mSource, mPID, mUID, mRenderer); //new一个Decoder
            ALOGV("instantiateDecoder audio Decoder");
        }
        mAudioDecoderError = false;
    } else {
        sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
        ++mVideoDecoderGeneration;
        notify->setInt32("generation", mVideoDecoderGeneration);


        *decoder = new Decoder(
                notify, mSource, mPID, mUID, mRenderer, mSurface, mCCDecoder); //new一个Decoder
        mVideoDecoderError = false;


        // enable FRC if high-quality AV sync is requested, even if not
        // directly queuing to display, as this will even improve textureview
        // playback.
        {
            if (property_get_bool("persist.sys.media.avsync", false)) {
                format->setInt32("auto-frc", 1);
            }
        }
    }
    (*decoder)->init(); //调用Decoder的init


    // Modular DRM
    if (mIsDrmProtected) {
        format->setPointer("crypto", mCrypto.get());
        ALOGV("instantiateDecoder: mCrypto: %p (%d) isSecure: %d", mCrypto.get(),
                (mCrypto != NULL ? mCrypto->getStrongCount() : 0),
                (mSourceFlags & Source::FLAG_SECURE) != 0);
    }


    (*decoder)->configure(format); //调用DecoderBase的configure


    if (!audio) {
        sp<AMessage> params = new AMessage();
        float rate = getFrameRate();
        if (rate > 0) {
            params->setFloat("frame-rate-total", rate);
        }


        sp<MetaData> fileMeta = getFileMeta();
        if (fileMeta != NULL) {
            int32_t videoTemporalLayerCount;
            if (fileMeta->findInt32(kKeyTemporalLayerCount, &videoTemporalLayerCount)
                    && videoTemporalLayerCount > 0) {
                params->setInt32("temporal-layer-count", videoTemporalLayerCount);
            }
        }


        if (params->countEntries() > 0) {
            (*decoder)->setParameters(params); //调用DecoderBase的setParameters
        }
    }
    return OK;
}

上面方法主要处理如下:

1、通过new的方式创建一个DecoderBase

2、调用DecoderBase的init方法,初始化DecoderBase

3、调用DecoderBase的configure方法,配置DecoderBase

4、调用DecoderBase的setParameters方法,设置DecoderBase参数

下面分别进行分析:

NuPlayer::DecoderBase

DecoderBase的构造方法如下:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
NuPlayer::DecoderBase::DecoderBase(const sp<AMessage> &notify)
    :  mNotify(notify),
       mBufferGeneration(0),
       mPaused(false),
       mStats(new AMessage),
       mRequestInputBuffersPending(false) {
    // Every decoder has its own looper because MediaCodec operations
    // are blocking, but NuPlayer needs asynchronous operations.
    mDecoderLooper = new ALooper; //创建一个Looper
    mDecoderLooper->setName("NPDecoder");
    mDecoderLooper->start(false, false, ANDROID_PRIORITY_AUDIO); //start这个Looper
}

NuPlayer::DecoderBase::init

调用Decoder的init方法,在Decoder类中没有找到init方法,Decoder类继承于DecoderBase,DecoderBase有init方法,因此会调用父类的init方法:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
void NuPlayer::DecoderBase::init() {
    mDecoderLooper->registerHandler(this); //调用DecoderLooper的registerHandler方法,注册Handler
}

NuPlayer::DecoderBase::configure

调用DecoderBase的configure方法,配置DecoderBase:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
void NuPlayer::DecoderBase::configure(const sp<AMessage> &format) {
    sp<AMessage> msg = new AMessage(kWhatConfigure, this);
    msg->setMessage("format", format);
    msg->post(); //发送kWhatConfigure消息
}

发送kWhatConfigure消息,消息在onMessageReceived中处理:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
void NuPlayer::DecoderBase::onMessageReceived(const sp<AMessage> &msg) {


    switch (msg->what()) {
        case kWhatConfigure:
        {
            sp<AMessage> format;
            CHECK(msg->findMessage("format", &format));
            onConfigure(format); //调用onConfigure方法,这个方法子类实现
            break;
        }
        default:
            TRESPASS();
            break;
    }
}

调用onConfigure方法,这个方法子类实现:

sp<MediaCodec> mCodec;
//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
    CHECK(mCodec == NULL);


    mFormatChangePending = false;
    mTimeChangePending = false;


    ++mBufferGeneration;


    AString mime;
    CHECK(format->findString("mime", &mime));


    mIsAudio = !strncasecmp("audio/", mime.c_str(), 6);
    mIsVideoAVC = !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str());


    mComponentName = mime;
    mComponentName.append(" decoder");
    ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());


    mCodec = MediaCodec::CreateByType(
            mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid, mUid, format); //创建MediaCodec
    int32_t secure = 0;
    if (format->findInt32("secure", &secure) && secure != 0) {
        if (mCodec != NULL) {
            mCodec->getName(&mComponentName);
            mComponentName.append(".secure");
            mCodec->release();
            ALOGI("[%s] creating", mComponentName.c_str());
            mCodec = MediaCodec::CreateByComponentName(
                    mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid, mUid); //创建MediaCodec
        }
    }
    if (mCodec == NULL) {
        ALOGE("Failed to create %s%s decoder",
                (secure ? "secure " : ""), mime.c_str());
        handleError(UNKNOWN_ERROR);
        return;
    }
    mIsSecure = secure;


    mCodec->getName(&mComponentName);


    status_t err;
    if (mSurface != NULL) {
        // disconnect from surface as MediaCodec will reconnect
        err = nativeWindowDisconnect(mSurface.get(), "onConfigure");
        // We treat this as a warning, as this is a preparatory step.
        // Codec will try to connect to the surface, which is where
        // any error signaling will occur.
        ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err);
    }


    // Modular DRM
    void *pCrypto;
    if (!format->findPointer("crypto", &pCrypto)) {
        pCrypto = NULL;
    }
    sp<ICrypto> crypto = (ICrypto*)pCrypto;
    // non-encrypted source won't have a crypto
    mIsEncrypted = (crypto != NULL);
    // configure is called once; still using OR in case the behavior changes.
    mIsEncryptedObservedEarlier = mIsEncryptedObservedEarlier || mIsEncrypted;
    ALOGV("onConfigure mCrypto: %p (%d)  mIsSecure: %d",
            crypto.get(), (crypto != NULL ? crypto->getStrongCount() : 0), mIsSecure);


    err = mCodec->configure(
            format, mSurface, crypto, 0 /* flags */); //设置MediaCodec


    if (err != OK) {
        ALOGE("Failed to configure [%s] decoder (err=%d)", mComponentName.c_str(), err);
        mCodec->release();
        mCodec.clear();
        handleError(err);
        return;
    }
    rememberCodecSpecificData(format);


    // the following should work in configured state
    CHECK_EQ((status_t)OK, mCodec->getOutputFormat(&mOutputFormat));
    CHECK_EQ((status_t)OK, mCodec->getInputFormat(&mInputFormat));


    {
        Mutex::Autolock autolock(mStatsLock);
        mStats->setString("mime", mime.c_str());
        mStats->setString("component-name", mComponentName.c_str());
    }


    if (!mIsAudio) {
        int32_t width, height;
        if (mOutputFormat->findInt32("width", &width)
                && mOutputFormat->findInt32("height", &height)) {
            Mutex::Autolock autolock(mStatsLock);
            mStats->setInt32("width", width);
            mStats->setInt32("height", height);
        }
    }


    sp<AMessage> reply = new AMessage(kWhatCodecNotify, this);
    mCodec->setCallback(reply);


    err = mCodec->start(); //开始解码
    if (err != OK) {
        ALOGE("Failed to start [%s] decoder (err=%d)", mComponentName.c_str(), err);
        mCodec->release();
        mCodec.clear();
        handleError(err);
        return;
    }


    releaseAndResetMediaBuffers();


    mPaused = false;
    mResumePending = false;
}

上面方法主要处理如下:

1、通过MediaCodec::CreateByType或MediaCodec::CreateByComponentName创建MediaCodec

2、调用MediaCodec的configure方法,配置MediaCodec

3、调用MediaCodec的start方法,开始解码

这部分内容我们在MediaCodec中继续分析。

NuPlayer::DecoderBase::setParameters

调用DecoderBase的setParameters方法,设置DecoderBase参数:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
void NuPlayer::DecoderBase::setParameters(const sp<AMessage> &params) {
    sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
    msg->setMessage("params", params);
    msg->post();
}

发送kWhatSetParameters消息,消息会在onMessageReceived中处理:

//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
void NuPlayer::DecoderBase::onMessageReceived(const sp<AMessage> &msg) {


    switch (msg->what()) {
        case kWhatSetParameters:
        {
            sp<AMessage> params;
            CHECK(msg->findMessage("params", &params));
            onSetParameters(params);
            break;
        }
        default:
            TRESPASS();
            break;
    }
}

调用onSetParameters方法,这个方法由子类实现:

sp<MediaCodec> mCodec;
//frameworks/av/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
void NuPlayer::Decoder::onSetParameters(const sp<AMessage> &params) {
    bool needAdjustLayers = false;
    float frameRateTotal;
    if (params->findFloat("frame-rate-total", &frameRateTotal)
            && mFrameRateTotal != frameRateTotal) {
        needAdjustLayers = true;
        mFrameRateTotal = frameRateTotal;
    }


    int32_t numVideoTemporalLayerTotal;
    if (params->findInt32("temporal-layer-count", &numVideoTemporalLayerTotal)
            && numVideoTemporalLayerTotal >= 0
            && numVideoTemporalLayerTotal <= kMaxNumVideoTemporalLayers
            && mNumVideoTemporalLayerTotal != numVideoTemporalLayerTotal) {
        needAdjustLayers = true;
        mNumVideoTemporalLayerTotal = std::max(numVideoTemporalLayerTotal, 1);
    }


    if (needAdjustLayers && mNumVideoTemporalLayerTotal > 1) {
        // TODO: For now, layer fps is calculated for some specific architectures.
        // But it really should be extracted from the stream.
        mVideoTemporalLayerAggregateFps[0] =
            mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - 1));
        for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
            mVideoTemporalLayerAggregateFps[i] =
                mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - i))
                + mVideoTemporalLayerAggregateFps[i - 1];
        }
    }


    float playbackSpeed;
    if (params->findFloat("playback-speed", &playbackSpeed)
            && mPlaybackSpeed != playbackSpeed) {
        needAdjustLayers = true;
        mPlaybackSpeed = playbackSpeed;
    }


    if (needAdjustLayers) {
        float decodeFrameRate = mFrameRateTotal;
        // enable temporal layering optimization only if we know the layering depth
        if (mNumVideoTemporalLayerTotal > 1) {
            int32_t layerId;
            for (layerId = 0; layerId < mNumVideoTemporalLayerTotal - 1; ++layerId) {
                if (mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed
                        >= kDisplayRefreshingRate * 0.9) {
                    break;
                }
            }
            mNumVideoTemporalLayerAllowed = layerId + 1;
            decodeFrameRate = mVideoTemporalLayerAggregateFps[layerId];
        }
        ALOGV("onSetParameters: allowed layers=%d, decodeFps=%g",
                mNumVideoTemporalLayerAllowed, decodeFrameRate);


        if (mCodec == NULL) {
            ALOGW("onSetParameters called before codec is created.");
            return;
        }


        sp<AMessage> codecParams = new AMessage();
        codecParams->setFloat("operating-rate", decodeFrameRate * mPlaybackSpeed);
        mCodec->setParameters(codecParams);
    }
}

调用MediaCodec的setParameters方法,这部分我们在MediaCodec中分析。

到这里我们就分析完NuPlayer的start流程了,主处理奥克start NuPlayerSource,通过MediaClock设置播放速率,然后设置Renderer,最后初始化Decoder并开始解码,完整的流程图如下:

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值