本文分析prepare和prepareAsyn。
JNI层的prepare*
static void
android_media_MediaPlayer_prepare(JNIEnv *env, jobject thiz)
{
sp<MediaPlayer> mp = getMediaPlayer(env, thiz);
if (mp == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException", NULL);
return;
}
// Handle the case where the display surface was set before the mp was
// initialized. We try again to make it stick.
sp<IGraphicBufferProducer> st = getVideoSurfaceTexture(env, thiz);
mp->setVideoSurfaceTexture(st);
process_media_player_call( env, thiz, mp->prepare(), "java/io/IOException", "Prepare failed." );
}
static void
android_media_MediaPlayer_prepareAsync(JNIEnv *env, jobject thiz)
{
sp<MediaPlayer> mp = getMediaPlayer(env, thiz);
if (mp == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException", NULL);
return;
}
// Handle the case where the display surface was set before the mp was
// initialized. We try again to make it stick.
sp<IGraphicBufferProducer> st = getVideoSurfaceTexture(env, thiz);
mp->setVideoSurfaceTexture(st);
process_media_player_call( env, thiz, mp->prepareAsync(), "java/io/IOException", "Prepare Async failed." );
}
两个函数都调用了getVideoSurfaceTexture()
static sp<IGraphicBufferProducer>
getVideoSurfaceTexture(JNIEnv* env, jobject thiz) {
IGraphicBufferProducer * const p = (IGraphicBufferProducer*)env->GetIntField(thiz, fields.surface_texture);
return sp<IGraphicBufferProducer>(p);
}
获取了Java层的mNativeSurfaceTexture
private int mNativeSurfaceTexture; // accessed by native methods
接着调用了mp->prepare和mp->prepareAsync。这里我们只分析mp->prepareAsync,它的操作结果经过回调通知给Java层的。
status_t MediaPlayer::prepareAsync()
{
ALOGV("prepareAsync");
Mutex::Autolock _l(mLock);
return prepareAsync_l();
}
它里面有一个互斥锁,之后调用prepareAsync_l();
// must call with lock held
status_t MediaPlayer::prepareAsync_l()
{
if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
mPlayer->setAudioStreamType(mStreamType);
mCurrentState = MEDIA_PLAYER_PREPARING;
return mPlayer->prepareAsync();
}
ALOGE("prepareAsync called in state %d", mCurrentState);
return INVALID_OPERATION;
}
首先是mp->setAudioStreamType,在IMediaPlayer.cpp中对应的transact操作是SET_AUDIO_STREAM_TYPE
case SET_AUDIO_STREAM_TYPE: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
reply->writeInt32(setAudioStreamType((audio_stream_type_t) data.readInt32()));
return NO_ERROR;
} break;
对应到BnMediaPlayer侧则是MediaPlayerService::Client::setAudioStreamType
status_t MediaPlayerService::Client::setAudioStreamType(audio_stream_type_t type)
{
ALOGV("[%d] setAudioStreamType(%d)", mConnId, type);
// TODO: for hardware output, call player instead
Mutex::Autolock l(mLock);
if (mAudioOutput != 0) mAudioOutput->setAudioStreamType(type);
return NO_ERROR;
}
AudioOutput->setAudioStreamType
void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; }
这里讲一下AudiOutput,它是class AudioOutput : public MediaPlayerBase::AudioSink,这个AudioSink将在后面AwesomePlayer里用到,也就是调用的AudioOutput。
在setDataSource_pre中我们已经将mAudioOutput设置到AwesomePlayer中了。
sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
player_type playerType)
{
ALOGV("player type = %d", playerType);
// create the right type of player
sp<MediaPlayerBase> p = createPlayer(playerType);
if (p == NULL) {
return p;
}
if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid());
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
return p;
}
接着我们继续看prepareAsync函数,mp->prepareAsync()对应的到BnMediaPlayer的操作是MediaPlayerService::Client::prepareAsync
case PREPARE_ASYNC: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
reply->writeInt32(prepareAsync());
return NO_ERROR;
} break;
MediaPlayerService::Client::prepareAsync
status_t MediaPlayerService::Client::prepareAsync()
{
ALOGV("[%d] prepareAsync", mConnId);
sp<MediaPlayerBase> p = getPlayer();
if (p == 0) return UNKNOWN_ERROR;
status_t ret = p->prepareAsync();
#if CALLBACK_ANTAGONIZER
ALOGD("start Antagonizer");
if (ret == NO_ERROR) mAntagonizer->start();
#endif
return ret;
}
这里调用了AwesomePlayer的prepareAsync函数
status_t AwesomePlayer::prepareAsync() {
ATRACE_CALL();
Mutex::Autolock autoLock(mLock);
if (mFlags & PREPARING) {
return UNKNOWN_ERROR; // async prepare already pending
}
mIsAsyncPrepare = true;
return prepareAsync_l();
}
先判断mFlags是不是preparing。初始化AwesomePlayer时将mFlags赋值为0,所以这个判断不会是true,继续看下面代码
AwesomePlayer::prepareAsync_l()
status_t AwesomePlayer::prepareAsync_l() {
if (mFlags & PREPARING) {
return UNKNOWN_ERROR; // async prepare already pending
}
if (!mQueueStarted) {
mQueue.start();
mQueueStarted = true;
}
modifyFlags(PREPARING, SET);
mAsyncPrepareEvent = new AwesomeEvent(
this, &AwesomePlayer::onPrepareAsyncEvent);
mQueue.postEvent(mAsyncPrepareEvent);
return OK;
}
首先还是判断mFlags,此时不是preparing。接着启动mQueue(类TimedEventQueue)。
之后修改mFlags的状态为PREPARING,表示现在正在准备处理文件的音视频流。
这里出现了一个AwesomeEvent,这是要闹什么?再看后面的代码,原来是定义了一个event对象,然后放到之前启动的mQueue中进行处理。
参见之前的TimedEventQueue分析文章,queue中处理的结果就是调用AwesomePlayer::onPrepareAsyncEvent函数。
继续看AwesomePlayer::onPrepareAsyncEvent
void AwesomePlayer::onPrepareAsyncEvent() {
Mutex::Autolock autoLock(mLock);
beginPrepareAsync_l();
}
AwesomePlayer::beginPrepareAsync_l()
void AwesomePlayer::beginPrepareAsync_l() {
if (mFlags & PREPARE_CANCELLED) {
ALOGI("prepare was cancelled before doing anything");
abortPrepare(UNKNOWN_ERROR);
return;
}
if (mUri.size() > 0) {
status_t err = finishSetDataSource_l();
if (err != OK) {
abortPrepare(err);
return;
}
}
if (mVideoTrack != NULL && mVideoSource == NULL) {
status_t err = initVideoDecoder();
if (err != OK) {
abortPrepare(err);
return;
}
}
if (mAudioTrack != NULL && mAudioSource == NULL) {
status_t err = initAudioDecoder();
if (err != OK) {
abortPrepare(err);
return;
}
}
modifyFlags(PREPARING_CONNECTED, SET);
if (isStreamingHTTP()) {
postBufferingEvent_l();
} else {
finishAsyncPrepare_l();
}
}
这里我们分析文件类型的,uri的流媒体以后再说。代码分别初始化audio和video。
初始化video解码器:
AwesomePlayer::initVideoDecoder
status_t AwesomePlayer::initVideoDecoder(uint32_t flags) {
ATRACE_CALL();
// Either the application or the DRM system can independently say
// that there must be a hardware-protected path to an external video sink.
// For now we always require a hardware-protected path to external video sink
// if content is DRMed, but eventually this could be optional per DRM agent.
// When the application wants protection, then
// (USE_SURFACE_ALLOC && (mSurface != 0) &&
// (mSurface->getFlags() & ISurfaceComposer::eProtectedByApp))
// will be true, but that part is already handled by SurfaceFlinger.
#ifdef DEBUG_HDCP
// For debugging, we allow a system property to control the protected usage.
// In case of uninitialized or unexpected property, we default to "DRM only".
bool setProtectionBit = false;
char value[PROPERTY_VALUE_MAX];
if (property_get("persist.sys.hdcp_checking", value, NULL)) {
if (!strcmp(value, "never")) {
// nop
} else if (!strcmp(value, "always")) {
setProtectionBit = true;
} else if (!strcmp(value, "drm-only")) {
if (mDecryptHandle != NULL) {
setProtectionBit = true;
}
// property value is empty, or unexpected value
} else {
if (mDecryptHandle != NULL) {
setProtectionBit = true;
}
}
// can' read property value
} else {
if (mDecryptHandle != NULL) {
setProtectionBit = true;
}
}
// note that usage bit is already cleared, so no need to clear it in the "else" case
if (setProtectionBit) {
flags |= OMXCodec::kEnableGrallocUsageProtected;
}
#else
if (mDecryptHandle != NULL) {
flags |= OMXCodec::kEnableGrallocUsageProtected;
}
#endif
ALOGV("initVideoDecoder flags=0x%x", flags);
mVideoSource = OMXCodec::Create(
mClient.interface(), mVideoTrack->getFormat(),
false, // createEncoder
mVideoTrack,
NULL, flags, USE_SURFACE_ALLOC ? mNativeWindow : NULL);
if (mVideoSource != NULL) {
int64_t durationUs;
if (mVideoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
Mutex::Autolock autoLock(mMiscStateLock);
if (mDurationUs < 0 || durationUs > mDurationUs) {
mDurationUs = durationUs;
}
}
status_t err = mVideoSource->start();
if (err != OK) {
ALOGE("failed to start video source");
mVideoSource.clear();
return err;
}
}
if (mVideoSource != NULL) {
const char *componentName;
CHECK(mVideoSource->getFormat()
->findCString(kKeyDecoderComponent, &componentName));
{
Mutex::Autolock autoLock(mStatsLock);
TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mVideoTrackIndex);
stat->mDecoderName = componentName;
}
static const char *kPrefix = "OMX.Nvidia.";
static const char *kSuffix = ".decode";
static const size_t kSuffixLength = strlen(kSuffix);
size_t componentNameLength = strlen(componentName);
if (!strncmp(componentName, kPrefix, strlen(kPrefix))
&& componentNameLength >= kSuffixLength
&& !strcmp(&componentName[
componentNameLength - kSuffixLength], kSuffix)) {
modifyFlags(SLOW_DECODER_HACK, SET);
}
}
return mVideoSource != NULL ? OK : UNKNOWN_ERROR;
}
initVideoDecoder的核心就是OMXCodec::Create。
initAudioDecoder()
status_t AwesomePlayer::initAudioDecoder() {
ATRACE_CALL();
sp<MetaData> meta = mAudioTrack->getFormat();
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
// Check whether there is a hardware codec for this stream
// This doesn't guarantee that the hardware has a free stream
// but it avoids us attempting to open (and re-open) an offload
// stream to hardware that doesn't have the necessary codec
audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
if (mAudioSink != NULL) {
streamType = mAudioSink->getAudioStreamType();
}
mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL),
isStreamingHTTP(), streamType);
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
ALOGV("createAudioPlayer: bypass OMX (raw)");
mAudioSource = mAudioTrack;
} else {
// If offloading we still create a OMX decoder as a fall-back
// but we don't start it
mOmxSource = OMXCodec::Create(
mClient.interface(), mAudioTrack->getFormat(),
false, // createEncoder
mAudioTrack);
if (mOffloadAudio) {
ALOGV("createAudioPlayer: bypass OMX (offload)");
mAudioSource = mAudioTrack;
} else {
mAudioSource = mOmxSource;
}
}
if (mAudioSource != NULL) {
int64_t durationUs;
if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
Mutex::Autolock autoLock(mMiscStateLock);
if (mDurationUs < 0 || durationUs > mDurationUs) {
mDurationUs = durationUs;
}
}
status_t err = mAudioSource->start();
if (err != OK) {
mAudioSource.clear();
mOmxSource.clear();
return err;
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
// For legacy reasons we're simply going to ignore the absence
// of an audio decoder for QCELP instead of aborting playback
// altogether.
return OK;
}
if (mAudioSource != NULL) {
Mutex::Autolock autoLock(mStatsLock);
TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
const char *component;
if (!mAudioSource->getFormat()
->findCString(kKeyDecoderComponent, &component)) {
component = "none";
}
stat->mDecoderName = component;
}
return mAudioSource != NULL ? OK : UNKNOWN_ERROR;
}
initAudioDecoder的核心函数同样是OMXCodec::Create.
因为AwesomePlayer底层使用的是OMX的编解码器,所以在获取音视频信息是就需要使用OMX模块。
OMX部分下一篇将解释一下。
回到beginPrepareAsync_l。
void AwesomePlayer::beginPrepareAsync_l() {
if (mFlags & PREPARE_CANCELLED) {
ALOGI("prepare was cancelled before doing anything");
abortPrepare(UNKNOWN_ERROR);
return;
}
if (mUri.size() > 0) {
status_t err = finishSetDataSource_l();
if (err != OK) {
abortPrepare(err);
return;
}
}
if (mVideoTrack != NULL && mVideoSource == NULL) {
status_t err = initVideoDecoder();
if (err != OK) {
abortPrepare(err);
return;
}
}
if (mAudioTrack != NULL && mAudioSource == NULL) {
status_t err = initAudioDecoder();
if (err != OK) {
abortPrepare(err);
return;
}
}
modifyFlags(PREPARING_CONNECTED, SET);
if (isStreamingHTTP()) {
postBufferingEvent_l();
} else {
finishAsyncPrepare_l();
}
}
因为我们是文件,所以继续走finishAsyncPrepare_l
finishAsyncPrepare_l
void AwesomePlayer::finishAsyncPrepare_l() {
if (mIsAsyncPrepare) {
if (mVideoSource == NULL) {
notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0);
} else {
notifyVideoSize_l();
}
notifyListener_l(MEDIA_PREPARED);
}
mPrepareResult = OK;
modifyFlags((PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED), CLEAR);
modifyFlags(PREPARED, SET);
mAsyncPrepareEvent = NULL;
mPreparedCondition.broadcast();
if (mAudioTearDown) {
if (mPrepareResult == OK) {
if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
seekTo_l(mAudioTearDownPosition);
}
if (mAudioTearDownWasPlaying) {
modifyFlags(CACHE_UNDERRUN, CLEAR);
play_l();
}
}
mAudioTearDown = false;
}
}
首先通知上层,发送MEDIA_SET_VIDEO_SIZE消息。
MediaPlayer.cpp
case MEDIA_SET_VIDEO_SIZE:
ALOGV("New video size %d x %d", ext1, ext2);
mVideoWidth = ext1;
mVideoHeight = ext2;
MediaPlayer.java
case MEDIA_SET_VIDEO_SIZE:
if (mOnVideoSizeChangedListener != null)
mOnVideoSizeChangedListener.onVideoSizeChanged(mMediaPlayer, msg.arg1, msg.arg2);
之后是通知MEDIA_PREPARED
MediaPlayer.cpp
case MEDIA_PREPARED:
ALOGV("prepared");
mCurrentState = MEDIA_PLAYER_PREPARED;
if (mPrepareSync) {
ALOGV("signal application thread");
mPrepareSync = false;
mPrepareStatus = NO_ERROR;
mSignal.signal();
}
break;
MediaPlayer.java
case MEDIA_PREPARED:
scanInternalSubtitleTracks();
if (mOnPreparedListener != null)
mOnPreparedListener.onPrepared(mMediaPlayer);
return;
接着清楚AwesomePlayer的状态,并设置为PREPARED。
然后通过mPreparedCondition.broadcast();,发送广播。通知等待这个条件的地方。
status_t AwesomePlayer::prepare_l() {
if (mFlags & PREPARED) {
return OK;
}
if (mFlags & PREPARING) {
return UNKNOWN_ERROR;
}
mIsAsyncPrepare = false;
status_t err = prepareAsync_l();
if (err != OK) {
return err;
}
while (mFlags & PREPARING) {
mPreparedCondition.wait(mLock);
}
return mPrepareResult;
}
原来在prepare_l中函数一直等待这个条件的执行。prepareAsync_l中启动的mQueue消息队列使得执行进入另一个pthread线程,如果希望两个线程同步,那么可以使用condition方式进行通知。
prepare_l函数最后返回mPrepareResult,mPrepareResult在finishAsyncPrepare_l中mPrepareResult = OK;。
至此prepare的流程已经分析完成。下一篇将分析OMX模块。