首先从MediaCodec的configure方法开始:
//frameworks/base/media/java/android/meida/MediaCodec.java
final public class MediaCodec {
public void configure(
@Nullable MediaFormat format,
@Nullable Surface surface, @Nullable MediaCrypto crypto,
@ConfigureFlag int flags) {
configure(format, surface, crypto, null, flags);
}
}
调用重载方法:
//frameworks/base/media/java/android/meida/MediaCodec.java
final public class MediaCodec {
private void configure(
@Nullable MediaFormat format, @Nullable Surface surface,
@Nullable MediaCrypto crypto, @Nullable IHwBinder descramblerBinder,
@ConfigureFlag int flags) {
if (crypto != null && descramblerBinder != null) {
throw new IllegalArgumentException("Can't use crypto and descrambler together!");
}
String[] keys = null;
Object[] values = null;
if (format != null) {
Map<String, Object> formatMap = format.getMap();
keys = new String[formatMap.size()];
values = new Object[formatMap.size()];
int i = 0;
for (Map.Entry<String, Object> entry: formatMap.entrySet()) {
if (entry.getKey().equals(MediaFormat.KEY_AUDIO_SESSION_ID)) {
int sessionId = 0;
try {
sessionId = (Integer)entry.getValue();
}
catch (Exception e) {
throw new IllegalArgumentException("Wrong Session ID Parameter!");
}
keys[i] = "audio-hw-sync";
values[i] = AudioSystem.getAudioHwSyncForSession(sessionId);
} else {
keys[i] = entry.getKey();
values[i] = entry.getValue();
}
++i;
}
}
mHasSurface = surface != null;
mCrypto = crypto;
synchronized (mBufferLock) {
if ((flags & CONFIGURE_FLAG_USE_BLOCK_MODEL) != 0) {
mBufferMode = BUFFER_MODE_BLOCK;
} else {
mBufferMode = BUFFER_MODE_LEGACY;
}
}
native_configure(keys, values, surface, crypto, descramblerBinder, flags); //调用native方法
}
}
通过查询调用的是android_media_MediaCodec_native_configure方法:
//frameworks/base/media/jni/android_media_MediaCodec.cpp
static void android_media_MediaCodec_native_configure(
JNIEnv *env,
jobject thiz,
jobjectArray keys, jobjectArray values,
jobject jsurface,
jobject jcrypto,
jobject descramblerBinderObj,
jint flags) {
sp<JMediaCodec> codec = getMediaCodec(env, thiz);
if (codec == NULL || codec->initCheck() != OK) {
throwExceptionAsNecessary(env, INVALID_OPERATION);
return;
}
sp<AMessage> format;
status_t err = ConvertKeyValueArraysToMessage(env, keys, values, &format);
if (err != OK) {
jniThrowException(env, "java/lang/IllegalArgumentException", NULL);
return;
}
sp<IGraphicBufferProducer> bufferProducer;
if (jsurface != NULL) {
sp<Surface> surface(android_view_Surface_getSurface(env, jsurface));
if (surface != NULL) {
bufferProducer = surface->getIGraphicBufferProducer();
} else {
jniThrowException(
env,
"java/lang/IllegalArgumentException",
"The surface has been released");
return;
}
}
sp<ICrypto> crypto;
if (jcrypto != NULL) {
crypto = JCrypto::GetCrypto(env, jcrypto);
}
sp<IDescrambler> descrambler;
if (descramblerBinderObj != NULL) {
descrambler = GetDescrambler(env, descramblerBinderObj);
}
err = codec->configure(format, bufferProducer, crypto, descrambler, flags); //调用JMediaCodec的configure方法
throwExceptionAsNecessary(env, err);
}
调用JMediaCodec的configure方法:
sp<MediaCodec> mCodec;
//frameworks/base/media/jni/android_media_MediaCodec.cpp
status_t JMediaCodec::configure(
const sp<AMessage> &format,
const sp<IGraphicBufferProducer> &bufferProducer,
const sp<ICrypto> &crypto,
const sp<IDescrambler> &descrambler,
int flags) {
sp<Surface> client;
if (bufferProducer != NULL) {
mSurfaceTextureClient =
new Surface(bufferProducer, true /* controlledByApp */);
} else {
mSurfaceTextureClient.clear();
}
constexpr int32_t CONFIGURE_FLAG_ENCODE = 1;
AString mime;
CHECK(format->findString("mime", &mime));
mGraphicOutput = (mime.startsWithIgnoreCase("video/") || mime.startsWithIgnoreCase("image/"))
&& !(flags & CONFIGURE_FLAG_ENCODE);
mHasCryptoOrDescrambler = (crypto != nullptr) || (descrambler != nullptr);
mCrypto = crypto;
return mCodec->configure(
format, mSurfaceTextureClient, crypto, descrambler, flags); //调用MediaCodec的configure方法
}
调用MediaCodec的configure方法:
//frameworks/av/media/libstagefright/MediaCodec.cpp
status_t MediaCodec::configure(
const sp<AMessage> &format,
const sp<Surface> &surface,
const sp<ICrypto> &crypto,
const sp<IDescrambler> &descrambler,
uint32_t flags) {
sp<AMessage> msg = new AMessage(kWhatConfigure, this);
// TODO: validity check log-session-id: it should be a 32-hex-digit.
format->findString("log-session-id", &mLogSessionId);
if (mMetricsHandle != 0) {
int32_t profile = 0;
if (format->findInt32("profile", &profile)) {
mediametrics_setInt32(mMetricsHandle, kCodecProfile, profile);
}
int32_t level = 0;
if (format->findInt32("level", &level)) {
mediametrics_setInt32(mMetricsHandle, kCodecLevel, level);
}
mediametrics_setInt32(mMetricsHandle, kCodecEncoder,
(flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
}
if (mDomain == DOMAIN_VIDEO || mDomain == DOMAIN_IMAGE) {
format->findInt32("width", &mWidth);
format->findInt32("height", &mHeight);
if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
mRotationDegrees = 0;
}
if (mMetricsHandle != 0) {
mediametrics_setInt32(mMetricsHandle, kCodecWidth, mWidth);
mediametrics_setInt32(mMetricsHandle, kCodecHeight, mHeight);
mediametrics_setInt32(mMetricsHandle, kCodecRotation, mRotationDegrees);
int32_t maxWidth = 0;
if (format->findInt32("max-width", &maxWidth)) {
mediametrics_setInt32(mMetricsHandle, kCodecMaxWidth, maxWidth);
}
int32_t maxHeight = 0;
if (format->findInt32("max-height", &maxHeight)) {
mediametrics_setInt32(mMetricsHandle, kCodecMaxHeight, maxHeight);
}
int32_t colorFormat = -1;
if (format->findInt32("color-format", &colorFormat)) {
mediametrics_setInt32(mMetricsHandle, kCodecColorFormat, colorFormat);
}
if (mDomain == DOMAIN_VIDEO) {
float frameRate = -1.0;
if (format->findFloat("frame-rate", &frameRate)) {
mediametrics_setDouble(mMetricsHandle, kCodecFrameRate, frameRate);
}
float captureRate = -1.0;
if (format->findFloat("capture-rate", &captureRate)) {
mediametrics_setDouble(mMetricsHandle, kCodecCaptureRate, captureRate);
}
float operatingRate = -1.0;
if (format->findFloat("operating-rate", &operatingRate)) {
mediametrics_setDouble(mMetricsHandle, kCodecOperatingRate, operatingRate);
}
int32_t priority = -1;
if (format->findInt32("priority", &priority)) {
mediametrics_setInt32(mMetricsHandle, kCodecPriority, priority);
}
}
int32_t colorStandard = -1;
if (format->findInt32(KEY_COLOR_STANDARD, &colorStandard)) {
mediametrics_setInt32(mMetricsHandle, kCodecConfigColorStandard, colorStandard);
}
int32_t colorRange = -1;
if (format->findInt32(KEY_COLOR_RANGE, &colorRange)) {
mediametrics_setInt32(mMetricsHandle, kCodecConfigColorRange, colorRange);
}
int32_t colorTransfer = -1;
if (format->findInt32(KEY_COLOR_TRANSFER, &colorTransfer)) {
mConfigColorTransfer = colorTransfer;
mediametrics_setInt32(mMetricsHandle, kCodecConfigColorTransfer, colorTransfer);
}
HDRStaticInfo info;
if (ColorUtils::getHDRStaticInfoFromFormat(format, &info)
&& ColorUtils::isHDRStaticInfoValid(&info)) {
mHDRStaticInfo = true;
}
}
// Prevent possible integer overflow in downstream code.
if (mWidth < 0 || mHeight < 0 ||
(uint64_t)mWidth * mHeight > (uint64_t)INT32_MAX / 4) {
ALOGE("Invalid size(s), width=%d, height=%d", mWidth, mHeight);
return BAD_VALUE;
}
} else {
if (mMetricsHandle != 0) {
int32_t channelCount;
if (format->findInt32(KEY_CHANNEL_COUNT, &channelCount)) {
mediametrics_setInt32(mMetricsHandle, kCodecChannelCount, channelCount);
}
int32_t sampleRate;
if (format->findInt32(KEY_SAMPLE_RATE, &sampleRate)) {
mediametrics_setInt32(mMetricsHandle, kCodecSampleRate, sampleRate);
}
}
}
if (flags & CONFIGURE_FLAG_ENCODE) {
int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty,
enableMediaFormatShapingDefault);
if (!enableShaping) {
ALOGI("format shaping disabled, property '%s'", enableMediaFormatShapingProperty);
if (mMetricsHandle != 0) {
mediametrics_setInt32(mMetricsHandle, kCodecShapingEnhanced, -1);
}
} else {
(void) shapeMediaFormat(format, flags);
// XXX: do we want to do this regardless of shaping enablement?
mapFormat(mComponentName, format, nullptr, false);
}
}
// push min/max QP to MediaMetrics after shaping
if (mDomain == DOMAIN_VIDEO && mMetricsHandle != 0) {
int32_t qpIMin = -1;
if (format->findInt32("video-qp-i-min", &qpIMin)) {
mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPIMin, qpIMin);
}
int32_t qpIMax = -1;
if (format->findInt32("video-qp-i-max", &qpIMax)) {
mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPIMax, qpIMax);
}
int32_t qpPMin = -1;
if (format->findInt32("video-qp-p-min", &qpPMin)) {
mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPPMin, qpPMin);
}
int32_t qpPMax = -1;
if (format->findInt32("video-qp-p-max", &qpPMax)) {
mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPPMax, qpPMax);
}
int32_t qpBMin = -1;
if (format->findInt32("video-qp-b-min", &qpBMin)) {
mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPBMin, qpBMin);
}
int32_t qpBMax = -1;
if (format->findInt32("video-qp-b-max", &qpBMax)) {
mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPBMax, qpBMax);
}
}
updateLowLatency(format);
msg->setMessage("format", format);
msg->setInt32("flags", flags);
msg->setObject("surface", surface);
if (crypto != NULL || descrambler != NULL) {
if (crypto != NULL) {
msg->setPointer("crypto", crypto.get());
} else {
msg->setPointer("descrambler", descrambler.get());
}
if (mMetricsHandle != 0) {
mediametrics_setInt32(mMetricsHandle, kCodecCrypto, 1);
}
} else if (mFlags & kFlagIsSecure) {
ALOGW("Crypto or descrambler should be given for secure codec");
}
// save msg for reset
mConfigureMsg = msg;
sp<AMessage> callback = mCallback;
status_t err;
std::vector<MediaResourceParcel> resources;
resources.push_back(MediaResource::CodecResource(mFlags & kFlagIsSecure,
toMediaResourceSubType(mDomain)));
// Don't know the buffer size at this point, but it's fine to use 1 because
// the reclaimResource call doesn't consider the requester's buffer size for now.
resources.push_back(MediaResource::GraphicMemoryResource(1));
for (int i = 0; i <= kMaxRetry; ++i) {
sp<AMessage> response;
err = PostAndAwaitResponse(msg, &response); //发送kWhatConfigure消息并等待回复
if (err != OK && err != INVALID_OPERATION) {
if (isResourceError(err) && !mResourceManagerProxy->reclaimResource(resources)) {
break;
}
// MediaCodec now set state to UNINITIALIZED upon any fatal error.
// To maintain backward-compatibility, do a reset() to put codec
// back into INITIALIZED state.
// But don't reset if the err is INVALID_OPERATION, which means
// the configure failure is due to wrong state.
ALOGE("configure failed with err 0x%08x, resetting...", err);
status_t err2 = reset();
if (err2 != OK) {
ALOGE("retrying configure: failed to reset codec (%08x)", err2);
break;
}
if (callback != nullptr) {
err2 = setCallback(callback);
if (err2 != OK) {
ALOGE("retrying configure: failed to set callback (%08x)", err2);
break;
}
}
}
if (!isResourceError(err)) {
break;
}
}
return err;
}
发送kWhatConfigure消息并等待回复,发送的消息onMessageReceived中处理:
sp<CodecBase> mCodec;
//frameworks/av/media/libstagefright/MediaCodec.cpp
void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatConfigure:
{
if (mState != INITIALIZED) {
PostReplyWithError(msg, INVALID_OPERATION);
break;
}
if (mReplyID) {
mDeferredMessages.push_back(msg);
break;
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<RefBase> obj;
CHECK(msg->findObject("surface", &obj));
sp<AMessage> format;
CHECK(msg->findMessage("format", &format));
int32_t push;
if (msg->findInt32("push-blank-buffers-on-shutdown", &push) && push != 0) {
mFlags |= kFlagPushBlankBuffersOnShutdown;
}
if (obj != NULL) {
if (!format->findInt32(KEY_ALLOW_FRAME_DROP, &mAllowFrameDroppingBySurface)) {
// allow frame dropping by surface by default
mAllowFrameDroppingBySurface = true;
}
format->setObject("native-window", obj);
status_t err = handleSetSurface(static_cast<Surface *>(obj.get())); //设置Surface
if (err != OK) {
PostReplyWithError(replyID, err);
break;
}
} else {
// we are not using surface so this variable is not used, but initialize sensibly anyway
mAllowFrameDroppingBySurface = false;
handleSetSurface(NULL); //设置Surface
}
uint32_t flags;
CHECK(msg->findInt32("flags", (int32_t *)&flags));
if (flags & CONFIGURE_FLAG_USE_BLOCK_MODEL) {
if (!(mFlags & kFlagIsAsync)) {
PostReplyWithError(replyID, INVALID_OPERATION);
break;
}
mFlags |= kFlagUseBlockModel;
}
mReplyID = replyID;
setState(CONFIGURING); //设置状态为CONFIGURING
void *crypto;
if (!msg->findPointer("crypto", &crypto)) {
crypto = NULL;
}
ALOGV("kWhatConfigure: Old mCrypto: %p (%d)",
mCrypto.get(), (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
mCrypto = static_cast<ICrypto *>(crypto);
mBufferChannel->setCrypto(mCrypto);
ALOGV("kWhatConfigure: New mCrypto: %p (%d)",
mCrypto.get(), (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
void *descrambler;
if (!msg->findPointer("descrambler", &descrambler)) {
descrambler = NULL;
}
mDescrambler = static_cast<IDescrambler *>(descrambler);
mBufferChannel->setDescrambler(mDescrambler); //设置解码器
format->setInt32("flags", flags);
if (flags & CONFIGURE_FLAG_ENCODE) {
format->setInt32("encoder", true);
mFlags |= kFlagIsEncoder;
}
extractCSD(format);
int32_t tunneled;
if (format->findInt32("feature-tunneled-playback", &tunneled) && tunneled != 0) {
ALOGI("Configuring TUNNELED video playback.");
mTunneled = true;
} else {
mTunneled = false;
}
// If mTunnelPeekState is still in kLegacyMode at this point,
// configure the codec in legacy mode
// 如果此时 mTunnelPeekState 仍处于 kLegacyMode 状态,请在传统模式下配置编解码器
if (mTunneled && (mTunnelPeekState == TunnelPeekState::kLegacyMode)) {
sp<AMessage> params = new AMessage;
params->setInt32("android._tunnel-peek-set-legacy", 1);
onSetParameters(params);
}
int32_t background = 0;
if (format->findInt32("android._background-mode", &background) && background) {
androidSetThreadPriority(gettid(), ANDROID_PRIORITY_BACKGROUND);
}
mCodec->initiateConfigureComponent(format);
break;
}
}
上述方法的主要处理如下:
1、调用MediaCodec的handleSetSurface方法,设置Surface
2、调用ACodecBufferChannel的setCrypto方法,设置加密
3、调用ACodecBufferChannel的setDescrambler方法,设置解码器
4、调用MediaCodec的onSetParameters方法,设置参数
5、调用ACodec的initiateConfigureComponent方法,启动配置组件
下面分别进行分析:
MediaCodec::handleSetSurface
调用MediaCodec的handleSetSurface方法,设置Surface:
//frameworks/av/media/libstagefright/MediaCodec.cpp
status_t MediaCodec::handleSetSurface(const sp<Surface> &surface) {
status_t err = OK;
if (mSurface != NULL) {
(void)disconnectFromSurface(); //断开Surface连接
}
if (surface != NULL) {
err = connectToSurface(surface); //连接Surface
if (err == OK) {
mSurface = surface;
}
}
return err;
}
MediaCodec::connectToSurface
//frameworks/av/media/libstagefright/MediaCodec.cpp
status_t MediaCodec::connectToSurface(const sp<Surface> &surface) {
status_t err = OK;
if (surface != NULL) {
uint64_t oldId, newId;
if (mSurface != NULL
&& surface->getUniqueId(&newId) == NO_ERROR
&& mSurface->getUniqueId(&oldId) == NO_ERROR
&& newId == oldId) {
ALOGI("[%s] connecting to the same surface. Nothing to do.", mComponentName.c_str());
return ALREADY_EXISTS;
}
// in case we don't connect, ensure that we don't signal the surface is
// connected to the screen
mIsSurfaceToScreen = false;
err = nativeWindowConnect(surface.get(), "connectToSurface");
if (err == OK) {
// Require a fresh set of buffers after each connect by using a unique generation
// number. Rely on the fact that max supported process id by Linux is 2^22.
// PID is never 0 so we don't have to worry that we use the default generation of 0.
// TODO: come up with a unique scheme if other producers also set the generation number.
static uint32_t mSurfaceGeneration = 0;
uint32_t generation = (getpid() << 10) | (++mSurfaceGeneration & ((1 << 10) - 1));
surface->setGenerationNumber(generation);
ALOGI("[%s] setting surface generation to %u", mComponentName.c_str(), generation);
// HACK: clear any free buffers. Remove when connect will automatically do this.
// This is needed as the consumer may be holding onto stale frames that it can reattach
// to this surface after disconnect/connect, and those free frames would inherit the new
// generation number. Disconnecting after setting a unique generation prevents this.
nativeWindowDisconnect(surface.get(), "connectToSurface(reconnect)");
err = nativeWindowConnect(surface.get(), "connectToSurface(reconnect)");
}
if (err != OK) {
ALOGE("nativeWindowConnect returned an error: %s (%d)", strerror(-err), err);
} else {
if (!mAllowFrameDroppingBySurface) {
disableLegacyBufferDropPostQ(surface);
}
// keep track whether or not the buffers of the connected surface go to the screen
int result = 0;
surface->query(NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER, &result);
mIsSurfaceToScreen = result != 0;
}
}
// do not return ALREADY_EXISTS unless surfaces are the same
return err == ALREADY_EXISTS ? BAD_VALUE : err;
}
MediaCodec::disconnectFromSurface
/frameworks/av/media/libstagefright/MediaCodec.cpp
status_t MediaCodec::disconnectFromSurface() {
status_t err = OK;
if (mSurface != NULL) {
// Resetting generation is not technically needed, but there is no need to keep it either
mSurface->setGenerationNumber(0);
err = nativeWindowDisconnect(mSurface.get(), "disconnectFromSurface");
if (err != OK) {
ALOGW("nativeWindowDisconnect returned an error: %s (%d)", strerror(-err), err);
}
// assume disconnected even on error
mSurface.clear();
mIsSurfaceToScreen = false;
}
return err;
}
nativeWindowConnect
上面两个方法会调用nativeWindowConnect和nativeWindowDisconnect方法:
//frameworks/av/media/libstagefright/SurfaceUtils.cpp
status_t nativeWindowConnect(ANativeWindow *surface, const char *reason) {
ALOGD("connecting to surface %p, reason %s", surface, reason);
status_t err = native_window_api_connect(surface, NATIVE_WINDOW_API_MEDIA);
ALOGE_IF(err != OK, "Failed to connect to surface %p, err %d", surface, err);
return err;
}
nativeWindowDisconnect
调用native_window_api_connect方法,该方法在/frameworks/native/libs/nativewindow/include/system/window.h中定义,这部分属于window相关内容,我们不再这里分析。
frameworks/av/media/libstagefright/SurfaceUtils.cpp
status_t nativeWindowDisconnect(ANativeWindow *surface, const char *reason) {
ALOGD("disconnecting from surface %p, reason %s", surface, reason);
status_t err = native_window_api_disconnect(surface, NATIVE_WINDOW_API_MEDIA);
ALOGE_IF(err != OK, "Failed to disconnect from surface %p, err %d", surface, err);
return err;
}
调用native_window_api_disconnect方法,该方法在/frameworks/native/libs/nativewindow/include/system/window.h中定义,这部分属于window相关内容,我们不再这里分析。
ACodecBufferChannel::setCrypto
调用ACodecBufferChannel的setCrypto方法,设置加密:
//frameworks/av/media/libstagefright/ACodecBufferChannel.cpp
void ACodecBufferChannel::setCrypto(const sp<ICrypto> &crypto) {
if (mCrypto != nullptr) {
for (std::pair<wp<HidlMemory>, int32_t> entry : mHeapSeqNumMap) {
mCrypto->unsetHeap(entry.second);
}
mHeapSeqNumMap.clear();
if (mHeapSeqNum >= 0) {
mCrypto->unsetHeap(mHeapSeqNum);
mHeapSeqNum = -1;
}
}
mCrypto = crypto;
}
ACodecBufferChannel::setDescrambler
调用ACodecBufferChannel的setDescrambler方法,设置解码器:
//frameworks/av/media/libstagefright/ACodecBufferChannel.cpp
void ACodecBufferChannel::setDescrambler(const sp<IDescrambler> &descrambler) {
mDescrambler = descrambler;
}
MediaCodec::onSetParameters
调用MediaCodec的onSetParameters方法,设置参数:
sp<CodecBase> mCodec;
//frameworks/av/media/libstagefright/MediaCodec.cpp
status_t MediaCodec::onSetParameters(const sp<AMessage> ¶ms) {
updateLowLatency(params);
mapFormat(mComponentName, params, nullptr, false);
updateTunnelPeek(params);
mCodec->signalSetParameters(params); //调用CodecBase的signalSetParameters方法
return OK;
}
调用CodecBase的signalSetParameters方法:
//frameworks/av/media/libstagefright/ACodec.cpp
void ACodec::signalSetParameters(const sp<AMessage> ¶ms) {
sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
msg->setMessage("params", params);
msg->post(); //发送kWhatSetParameters消息
}
发送kWhatSetParameters消息,消息会在onMessageReceived中处理:
//frameworks/av/media/libstagefright/ACodec.cpp
bool ACodec::LoadedToIdleState::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatSetParameters:
{
mCodec->deferMessage(msg);
return true;
}
}
}
//frameworks/av/media/libstagefright/ACodec.cpp
bool ACodec::ExecutingState::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatSetParameters:
{
sp<AMessage> params;
CHECK(msg->findMessage("params", ¶ms));
status_t err = mCodec->setParameters(params);
sp<AMessage> reply;
if (msg->findMessage("reply", &reply)) {
reply->setInt32("err", err);
reply->post();
}
handled = true;
break;
}
}
}
//frameworks/av/media/libstagefright/ACodec.cpp
bool ACodec::OutputPortSettingsChangedState::onMessageReceived(
const sp<AMessage> &msg) {
case kWhatSetParameters:
{
sp<AMessage> params;
CHECK(msg->findMessage("params", ¶ms));
sp<ABuffer> hdr10PlusInfo;
if (params->findBuffer("hdr10-plus-info", &hdr10PlusInfo)) {
if (hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
(void)mCodec->setHdr10PlusInfo(hdr10PlusInfo);
}
params->removeEntryAt(params->findEntryByName("hdr10-plus-info"));
if (params->countEntries() == 0) {
msg->removeEntryAt(msg->findEntryByName("params"));
}
}
if (msg->countEntries() > 0) {
mCodec->deferMessage(msg);
}
handled = true;
break;
}
}
}
ACodec::initiateConfigureComponent
调用ACodec的initiateConfigureComponent方法,启动配置组件:
//frameworks/av/media/libstagefright/ACodec.cpp
void ACodec::initiateConfigureComponent(const sp<AMessage> &msg) {
msg->setWhat(kWhatConfigureComponent);
msg->setTarget(this);
msg->post(); //发送kWhatConfigureComponent消息
}
发送kWhatConfigureComponent消息,消息会在onMessageReceived中处理:
//frameworks/av/media/libstagefright/ACodec.cpp
bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) {
bool handled = false;
switch (msg->what()) {
case ACodec::kWhatConfigureComponent:
{
onConfigureComponent(msg);
handled = true;
break;
}
}
}
调用onConfigureComponent方法:
//frameworks/av/media/libstagefright/ACodec.cpp
ACodec *mCodec;
bool ACodec::LoadedState::onConfigureComponent(
const sp<AMessage> &msg) {
ALOGV("onConfigureComponent");
CHECK(mCodec->mOMXNode != NULL);
status_t err = OK;
AString mime;
if (!msg->findString("mime", &mime)) {
err = BAD_VALUE;
} else {
err = mCodec->configureCodec(mime.c_str(), msg);
}
if (err != OK) {
ALOGE("[%s] configureCodec returning error %d",
mCodec->mComponentName.c_str(), err);
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
return false;
}
mCodec->mCallback->onComponentConfigured(mCodec->mInputFormat, mCodec->mOutputFormat); //调用回调方法,通知上层配置完成
return true;
}
调用ACodec的configureCodec方法:
sp<DataConverter> mConverter[2];
//frameworks/av/media/libstagefright/ACodec.cpp
status_t ACodec::configureCodec(
const char *mime, const sp<AMessage> &msg) {
int32_t encoder;
if (!msg->findInt32("encoder", &encoder)) {
encoder = false;
}
sp<AMessage> inputFormat = new AMessage;
sp<AMessage> outputFormat = new AMessage;
mConfigFormat = msg;
mIsEncoder = encoder;
mIsVideo = !strncasecmp(mime, "video/", 6);
mIsImage = !strncasecmp(mime, "image/", 6);
mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
status_t err = setComponentRole(encoder /* isEncoder */, mime); //设置组件角色
if (err != OK) {
return err;
}
OMX_VIDEO_CONTROLRATETYPE bitrateMode;
int32_t bitrate = 0, quality;
// FLAC encoder or video encoder in constant quality mode doesn't need a
// bitrate, other encoders do.
if (encoder) {
if (mIsVideo || mIsImage) {
if (!findVideoBitrateControlInfo(msg, &bitrateMode, &bitrate, &quality)) {
return INVALID_OPERATION;
}
} else if (strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)
&& !msg->findInt32("bitrate", &bitrate)) {
return INVALID_OPERATION;
}
}
// propagate bitrate to the output so that the muxer has it
if (encoder && msg->findInt32("bitrate", &bitrate)) {
// Technically ISO spec says that 'bitrate' should be 0 for VBR even though it is the
// average bitrate. We've been setting both bitrate and max-bitrate to this same value.
outputFormat->setInt32("bitrate", bitrate);
outputFormat->setInt32("max-bitrate", bitrate);
}
int32_t storeMeta;
if (encoder) {
IOMX::PortMode mode = IOMX::kPortModePresetByteBuffer;
if (msg->findInt32("android._input-metadata-buffer-type", &storeMeta)
&& storeMeta != kMetadataBufferTypeInvalid) {
if (storeMeta == kMetadataBufferTypeNativeHandleSource) {
mode = IOMX::kPortModeDynamicNativeHandle;
} else if (storeMeta == kMetadataBufferTypeANWBuffer ||
storeMeta == kMetadataBufferTypeGrallocSource) {
mode = IOMX::kPortModeDynamicANWBuffer;
} else {
return BAD_VALUE;
}
}
err = setPortMode(kPortIndexInput, mode); //设置Port模式
if (err != OK) {
return err;
}
if (mode != IOMX::kPortModePresetByteBuffer) {
uint32_t usageBits;
if (mOMXNode->getParameter(
(OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
&usageBits, sizeof(usageBits)) == OK) {
inputFormat->setInt32(
"using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
}
}
}
int32_t lowLatency = 0;
if (msg->findInt32("low-latency", &lowLatency)) {
err = setLowLatency(lowLatency);
if (err != OK) {
return err;
}
}
int32_t prependSPSPPS = 0;
if (encoder && mIsVideo
&& msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS)
&& prependSPSPPS != 0) {
OMX_INDEXTYPE index;
err = mOMXNode->getExtensionIndex(
"OMX.google.android.index.prependSPSPPSToIDRFrames", &index);
if (err == OK) {
PrependSPSPPSToIDRFramesParams params;
InitOMXParams(¶ms);
params.bEnable = OMX_TRUE;
err = mOMXNode->setParameter(index, ¶ms, sizeof(params));
}
if (err != OK) {
ALOGE("Encoder could not be configured to emit SPS/PPS before "
"IDR frames. (err %d)", err);
return err;
}
}
// Only enable metadata mode on encoder output if encoder can prepend
// sps/pps to idr frames, since in metadata mode the bitstream is in an
// opaque handle, to which we don't have access.
if (encoder && mIsVideo) {
OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
&& msg->findInt32("android._store-metadata-in-buffers-output", &storeMeta)
&& storeMeta != 0);
if (mFlags & kFlagIsSecure) {
enable = OMX_TRUE;
}
err = setPortMode(kPortIndexOutput, enable ?
IOMX::kPortModePresetSecureBuffer : IOMX::kPortModePresetByteBuffer);
if (err != OK) {
return err;
}
if (!msg->findInt64(
KEY_REPEAT_PREVIOUS_FRAME_AFTER, &mRepeatFrameDelayUs)) {
mRepeatFrameDelayUs = -1LL;
}
if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
float captureRate;
if (msg->findAsFloat(KEY_CAPTURE_RATE, &captureRate)) {
mCaptureFps = captureRate;
} else {
mCaptureFps = -1.0;
}
}
if (!msg->findInt32(
KEY_CREATE_INPUT_SURFACE_SUSPENDED,
(int32_t*)&mCreateInputBuffersSuspended)) {
mCreateInputBuffersSuspended = false;
}
}
if (encoder && (mIsVideo || mIsImage)) {
// only allow 32-bit value, since we pass it as U32 to OMX.
if (!msg->findInt64(KEY_MAX_PTS_GAP_TO_ENCODER, &mMaxPtsGapUs)) {
mMaxPtsGapUs = 0LL;
} else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < INT32_MIN) {
ALOGW("Unsupported value for max pts gap %lld", (long long) mMaxPtsGapUs);
mMaxPtsGapUs = 0LL;
}
if (!msg->findFloat(KEY_MAX_FPS_TO_ENCODER, &mMaxFps)) {
mMaxFps = -1;
}
// notify GraphicBufferSource to allow backward frames
if (mMaxPtsGapUs < 0LL) {
mMaxFps = -1;
}
}
// NOTE: we only use native window for video decoders
sp<RefBase> obj;
bool haveNativeWindow = msg->findObject("native-window", &obj)
&& obj != NULL && mIsVideo && !encoder;
mUsingNativeWindow = haveNativeWindow;
if (mIsVideo && !encoder) {
inputFormat->setInt32("adaptive-playback", false);
int32_t usageProtected;
if (msg->findInt32("protected", &usageProtected) && usageProtected) {
if (!haveNativeWindow) {
ALOGE("protected output buffers must be sent to an ANativeWindow");
return PERMISSION_DENIED;
}
mFlags |= kFlagIsGrallocUsageProtected;
mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
}
}
if (mFlags & kFlagIsSecure) {
// use native_handles for secure input buffers
err = setPortMode(kPortIndexInput, IOMX::kPortModePresetSecureBuffer);
if (err != OK) {
ALOGI("falling back to non-native_handles");
setPortMode(kPortIndexInput, IOMX::kPortModePresetByteBuffer);
err = OK; // ignore error for now
}
OMX_INDEXTYPE index;
if (mOMXNode->getExtensionIndex(
"OMX.google.android.index.preregisterMetadataBuffers", &index) == OK) {
OMX_CONFIG_BOOLEANTYPE param;
InitOMXParams(¶m);
param.bEnabled = OMX_FALSE;
if (mOMXNode->getParameter(index, ¶m, sizeof(param)) == OK) {
if (param.bEnabled == OMX_TRUE) {
mFlags |= kFlagPreregisterMetadataBuffers;
}
}
}
}
if (haveNativeWindow) {
sp<ANativeWindow> nativeWindow =
static_cast<ANativeWindow *>(static_cast<Surface *>(obj.get()));
// START of temporary support for automatic FRC - THIS WILL BE REMOVED
int32_t autoFrc;
if (msg->findInt32("auto-frc", &autoFrc)) {
bool enabled = autoFrc;
OMX_CONFIG_BOOLEANTYPE config;
InitOMXParams(&config);
config.bEnabled = (OMX_BOOL)enabled;
status_t temp = mOMXNode->setConfig(
(OMX_INDEXTYPE)OMX_IndexConfigAutoFramerateConversion,
&config, sizeof(config));
if (temp == OK) {
outputFormat->setInt32("auto-frc", enabled);
} else if (enabled) {
ALOGI("codec does not support requested auto-frc (err %d)", temp);
}
}
// END of temporary support for automatic FRC
int32_t tunneled;
if (msg->findInt32("feature-tunneled-playback", &tunneled) &&
tunneled != 0) {
ALOGI("Configuring TUNNELED video playback.");
mTunneled = true;
int32_t audioHwSync = 0;
if (!msg->findInt32("audio-hw-sync", &audioHwSync)) {
ALOGW("No Audio HW Sync provided for video tunnel");
}
err = configureTunneledVideoPlayback(audioHwSync, nativeWindow);
if (err != OK) {
ALOGE("configureTunneledVideoPlayback(%d,%p) failed!",
audioHwSync, nativeWindow.get());
return err;
}
int32_t maxWidth = 0, maxHeight = 0;
if (msg->findInt32("max-width", &maxWidth) &&
msg->findInt32("max-height", &maxHeight)) {
err = mOMXNode->prepareForAdaptivePlayback(
kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
if (err != OK) {
ALOGW("[%s] prepareForAdaptivePlayback failed w/ err %d",
mComponentName.c_str(), err);
// allow failure
err = OK;
} else {
inputFormat->setInt32("max-width", maxWidth);
inputFormat->setInt32("max-height", maxHeight);
inputFormat->setInt32("adaptive-playback", true);
}
}
} else {
ALOGV("Configuring CPU controlled video playback.");
mTunneled = false;
// Explicity reset the sideband handle of the window for
// non-tunneled video in case the window was previously used
// for a tunneled video playback.
err = native_window_set_sideband_stream(nativeWindow.get(), NULL);
if (err != OK) {
ALOGE("set_sideband_stream(NULL) failed! (err %d).", err);
return err;
}
err = setPortMode(kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer);
if (err != OK) {
// if adaptive playback has been requested, try JB fallback
// NOTE: THIS FALLBACK MECHANISM WILL BE REMOVED DUE TO ITS
// LARGE MEMORY REQUIREMENT
// we will not do adaptive playback on software accessed
// surfaces as they never had to respond to changes in the
// crop window, and we don't trust that they will be able to.
int usageBits = 0;
bool canDoAdaptivePlayback;
if (nativeWindow->query(
nativeWindow.get(),
NATIVE_WINDOW_CONSUMER_USAGE_BITS,
&usageBits) != OK) {
canDoAdaptivePlayback = false;
} else {
canDoAdaptivePlayback =
(usageBits &
(GRALLOC_USAGE_SW_READ_MASK |
GRALLOC_USAGE_SW_WRITE_MASK)) == 0;
}
int32_t maxWidth = 0, maxHeight = 0;
if (canDoAdaptivePlayback &&
msg->findInt32("max-width", &maxWidth) &&
msg->findInt32("max-height", &maxHeight)) {
ALOGV("[%s] prepareForAdaptivePlayback(%dx%d)",
mComponentName.c_str(), maxWidth, maxHeight);
err = mOMXNode->prepareForAdaptivePlayback(
kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
ALOGW_IF(err != OK,
"[%s] prepareForAdaptivePlayback failed w/ err %d",
mComponentName.c_str(), err);
if (err == OK) {
inputFormat->setInt32("max-width", maxWidth);
inputFormat->setInt32("max-height", maxHeight);
inputFormat->setInt32("adaptive-playback", true);
}
}
// allow failure
err = OK;
} else {
ALOGV("[%s] setPortMode on output to %s succeeded",
mComponentName.c_str(), asString(IOMX::kPortModeDynamicANWBuffer));
CHECK(storingMetadataInDecodedBuffers());
inputFormat->setInt32("adaptive-playback", true);
}
int32_t push;
if (msg->findInt32("push-blank-buffers-on-shutdown", &push)
&& push != 0) {
mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
}
}
int32_t rotationDegrees;
if (msg->findInt32("rotation-degrees", &rotationDegrees)) {
mRotationDegrees = rotationDegrees;
} else {
mRotationDegrees = 0;
}
}
AudioEncoding pcmEncoding = kAudioEncodingPcm16bit;
(void)msg->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
// invalid encodings will default to PCM-16bit in setupRawAudioFormat.
if (mIsVideo || mIsImage) {
// determine need for software renderer
bool usingSwRenderer = false;
if (haveNativeWindow) {
bool requiresSwRenderer = false;
OMX_PARAM_U32TYPE param;
InitOMXParams(¶m);
param.nPortIndex = kPortIndexOutput;
status_t err = mOMXNode->getParameter(
(OMX_INDEXTYPE)OMX_IndexParamVideoAndroidRequiresSwRenderer,
¶m, sizeof(param));
if (err == OK && param.nU32 == 1) {
requiresSwRenderer = true;
}
if (mComponentName.startsWith("OMX.google.") || requiresSwRenderer) {
usingSwRenderer = true;
haveNativeWindow = false;
(void)setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
} else if (!storingMetadataInDecodedBuffers()) {
err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetANWBuffer);
if (err != OK) {
return err;
}
}
}
if (encoder) {
err = setupVideoEncoder(mime, msg, outputFormat, inputFormat);
} else {
err = setupVideoDecoder(mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
}
if (err != OK) {
return err;
}
if (haveNativeWindow) {
mNativeWindow = static_cast<Surface *>(obj.get());
// fallback for devices that do not handle flex-YUV for native buffers
int32_t requestedColorFormat = OMX_COLOR_FormatUnused;
if (msg->findInt32("color-format", &requestedColorFormat) &&
requestedColorFormat == OMX_COLOR_FormatYUV420Flexible) {
status_t err = getPortFormat(kPortIndexOutput, outputFormat);
if (err != OK) {
return err;
}
int32_t colorFormat = OMX_COLOR_FormatUnused;
OMX_U32 flexibleEquivalent = OMX_COLOR_FormatUnused;
if (!outputFormat->findInt32("color-format", &colorFormat)) {
ALOGE("ouptut port did not have a color format (wrong domain?)");
return BAD_VALUE;
}
ALOGD("[%s] Requested output format %#x and got %#x.",
mComponentName.c_str(), requestedColorFormat, colorFormat);
if (!IsFlexibleColorFormat(
mOMXNode, colorFormat, haveNativeWindow, &flexibleEquivalent)
|| flexibleEquivalent != (OMX_U32)requestedColorFormat) {
// device did not handle flex-YUV request for native window, fall back
// to SW renderer
ALOGI("[%s] Falling back to software renderer", mComponentName.c_str());
mNativeWindow.clear();
mNativeWindowUsageBits = 0;
haveNativeWindow = false;
usingSwRenderer = true;
// TODO: implement adaptive-playback support for bytebuffer mode.
// This is done by SW codecs, but most HW codecs don't support it.
err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
inputFormat->setInt32("adaptive-playback", false);
if (mFlags & kFlagIsGrallocUsageProtected) {
// fallback is not supported for protected playback
err = PERMISSION_DENIED;
} else if (err == OK) {
err = setupVideoDecoder(
mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
}
}
}
}
if (usingSwRenderer) {
outputFormat->setInt32("using-sw-renderer", 1);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG) ||
!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)) {
int32_t numChannels, sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
// Since we did not always check for these, leave them optional
// and have the decoder figure it all out.
err = OK;
} else {
err = setupRawAudioFormat(
encoder ? kPortIndexInput : kPortIndexOutput,
sampleRate,
numChannels);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
int32_t numChannels, sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
int32_t isADTS, aacProfile;
int32_t sbrMode;
int32_t maxOutputChannelCount;
int32_t pcmLimiterEnable;
drcParams_t drc;
if (!msg->findInt32("is-adts", &isADTS)) {
isADTS = 0;
}
if (!msg->findInt32("aac-profile", &aacProfile)) {
aacProfile = OMX_AUDIO_AACObjectNull;
}
if (!msg->findInt32("aac-sbr-mode", &sbrMode)) {
sbrMode = -1;
}
if (!msg->findInt32("aac-max-output-channel_count", &maxOutputChannelCount)) {
// check non AAC-specific key
if (!msg->findInt32("max-output-channel-count", &maxOutputChannelCount)) {
maxOutputChannelCount = -1;
}
}
if (!msg->findInt32("aac-pcm-limiter-enable", &pcmLimiterEnable)) {
// value is unknown
pcmLimiterEnable = -1;
}
if (!msg->findInt32("aac-encoded-target-level", &drc.encodedTargetLevel)) {
// value is unknown
drc.encodedTargetLevel = -1;
}
if (!msg->findInt32("aac-drc-cut-level", &drc.drcCut)) {
// value is unknown
drc.drcCut = -1;
}
if (!msg->findInt32("aac-drc-boost-level", &drc.drcBoost)) {
// value is unknown
drc.drcBoost = -1;
}
if (!msg->findInt32("aac-drc-heavy-compression", &drc.heavyCompression)) {
// value is unknown
drc.heavyCompression = -1;
}
if (!msg->findInt32("aac-target-ref-level", &drc.targetRefLevel)) {
// value is unknown
drc.targetRefLevel = -2;
}
if (!msg->findInt32("aac-drc-effect-type", &drc.effectType)) {
// value is unknown
drc.effectType = -2; // valid values are -1 and over
}
if (!msg->findInt32("aac-drc-album-mode", &drc.albumMode)) {
// value is unknown
drc.albumMode = -1; // valid values are 0 and 1
}
if (!msg->findInt32("aac-drc-output-loudness", &drc.outputLoudness)) {
// value is unknown
drc.outputLoudness = -1;
}
err = setupAACCodec(
encoder, numChannels, sampleRate, bitrate, aacProfile,
isADTS != 0, sbrMode, maxOutputChannelCount, drc,
pcmLimiterEnable);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
err = setupAMRCodec(encoder, false /* isWAMR */, bitrate);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
err = setupAMRCodec(encoder, true /* isWAMR */, bitrate);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_ALAW)
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_MLAW)) {
// These are PCM-like formats with a fixed sample rate but
// a variable number of channels.
int32_t numChannels;
if (!msg->findInt32("channel-count", &numChannels)) {
err = INVALID_OPERATION;
} else {
int32_t sampleRate;
if (!msg->findInt32("sample-rate", &sampleRate)) {
sampleRate = 8000;
}
err = setupG711Codec(encoder, sampleRate, numChannels);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_OPUS)) {
int32_t numChannels = 1, sampleRate = 48000;
if (msg->findInt32("channel-count", &numChannels) &&
msg->findInt32("sample-rate", &sampleRate)) {
err = setupOpusCodec(encoder, sampleRate, numChannels);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
// numChannels needs to be set to properly communicate PCM values.
int32_t numChannels = 2, sampleRate = 44100, compressionLevel = -1;
if (encoder &&
(!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate))) {
ALOGE("missing channel count or sample rate for FLAC encoder");
err = INVALID_OPERATION;
} else {
if (encoder) {
if (!msg->findInt32(
"complexity", &compressionLevel) &&
!msg->findInt32(
"flac-compression-level", &compressionLevel)) {
compressionLevel = 5; // default FLAC compression level
} else if (compressionLevel < 0) {
ALOGW("compression level %d outside [0..8] range, "
"using 0",
compressionLevel);
compressionLevel = 0;
} else if (compressionLevel > 8) {
ALOGW("compression level %d outside [0..8] range, "
"using 8",
compressionLevel);
compressionLevel = 8;
}
}
err = setupFlacCodec(
encoder, numChannels, sampleRate, compressionLevel, pcmEncoding);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
int32_t numChannels, sampleRate;
if (encoder
|| !msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels, pcmEncoding);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
int32_t numChannels;
int32_t sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
err = setupAC3Codec(encoder, numChannels, sampleRate);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_EAC3)) {
int32_t numChannels;
int32_t sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
err = setupEAC3Codec(encoder, numChannels, sampleRate);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4)) {
int32_t numChannels;
int32_t sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
err = setupAC4Codec(encoder, numChannels, sampleRate);
}
}
if (err != OK) {
return err;
}
if (!msg->findInt32("encoder-delay", &mEncoderDelay)) {
mEncoderDelay = 0;
}
if (!msg->findInt32("encoder-padding", &mEncoderPadding)) {
mEncoderPadding = 0;
}
if (msg->findInt32("channel-mask", &mChannelMask)) {
mChannelMaskPresent = true;
} else {
mChannelMaskPresent = false;
}
int32_t isCorruptFree = 0;
if (msg->findInt32("corrupt-free", &isCorruptFree)) {
mIsStreamCorruptFree = isCorruptFree == 1 ? true : false;
ALOGV("corrupt-free=[%d]", mIsStreamCorruptFree);
}
int32_t maxInputSize;
if (msg->findInt32("max-input-size", &maxInputSize)) {
err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize);
err = OK; // ignore error
} else if (!strcmp("OMX.Nvidia.aac.decoder", mComponentName.c_str())) {
err = setMinBufferSize(kPortIndexInput, 8192); // XXX
err = OK; // ignore error
}
int32_t priority;
if (msg->findInt32("priority", &priority)) {
err = setPriority(priority);
err = OK; // ignore error
}
int32_t rateInt = -1;
float rateFloat = -1;
if (!msg->findFloat("operating-rate", &rateFloat)) {
msg->findInt32("operating-rate", &rateInt);
rateFloat = (float)rateInt; // 16MHz (FLINTMAX) is OK for upper bound.
}
if (rateFloat > 0) {
err = setOperatingRate(rateFloat, mIsVideo);
err = OK; // ignore errors
}
if (err == OK) {
err = setVendorParameters(msg);
if (err != OK) {
return err;
}
}
// NOTE: both mBaseOutputFormat and mOutputFormat are outputFormat to signal first frame.
mBaseOutputFormat = outputFormat;
mLastOutputFormat.clear();
err = getPortFormat(kPortIndexInput, inputFormat);
if (err == OK) {
err = getPortFormat(kPortIndexOutput, outputFormat);
if (err == OK) {
mInputFormat = inputFormat;
mOutputFormat = outputFormat;
}
}
// create data converters if needed
if (!mIsVideo && !mIsImage && err == OK) {
AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
if (encoder) {
(void)mInputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
mConverter[kPortIndexInput] = AudioConverter::Create(pcmEncoding, codecPcmEncoding);
if (mConverter[kPortIndexInput] != NULL) {
ALOGD("%s: encoder %s input format pcm encoding converter from %d to %d",
__func__, mComponentName.c_str(), pcmEncoding, codecPcmEncoding);
mInputFormat->setInt32("pcm-encoding", pcmEncoding);
}
} else {
(void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
if (mConverter[kPortIndexOutput] != NULL) {
ALOGD("%s: decoder %s output format pcm encoding converter from %d to %d",
__func__, mComponentName.c_str(), codecPcmEncoding, pcmEncoding);
mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
}
}
}
return err;
}
以上方法主要调用如下方法:
1、setComponentRole
2、setPortMode
3、OMXNode的setConfig,这部分在OMX中分析
4、OMXNode的prepareForAdaptivePlayback,这部分在OMX中分析
5、setupVideoEncoder
6、setupAACCodec
7、setupOpusCodec
8、setupFlacCodec
9、setupRawAudioFormat
10、setupAC3Codec
11、setupEAC3Codec
12、setupAC4Codec
13、setMinBufferSize
14、setOperatingRate
15、setVendorParameters
16、AudioConverter的Create
下面分别进行分析:
ACodec::setComponentRole
//frameworks/av/media/libstagefright/ACodec.cpp
status_t ACodec::setComponentRole(
bool isEncoder, const char *mime) {
const char *role = GetComponentRole(isEncoder, mime);
if (role == NULL) {
return BAD_VALUE;
}
status_t err = SetComponentRole(mOMXNode, role);
if (err != OK) {
ALOGW("[%s] Failed to set standard component role '%s'.",
mComponentName.c_str(), role);
}
return err;
}
调用SetComponentRole方法:
//frameworks/av/media/libstagefright/omx/OMXUtils.cpp
status_t SetComponentRole(const sp<IOMXNode> &omxNode, const char *role) {
OMX_PARAM_COMPONENTROLETYPE roleParams;
InitOMXParams(&roleParams);
strncpy((char *)roleParams.cRole,
role, OMX_MAX_STRINGNAME_SIZE - 1);
roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
return omxNode->setParameter(
OMX_IndexParamStandardComponentRole,
&roleParams, sizeof(roleParams));
}
ACodec::setPortMode
调用IOMXNode的setParameter设置参数,OMX部分在OMX章节继续分析。
//frameworks/av/media/libstagefright/ACodec.cpp
status_t ACodec::setPortMode(int32_t portIndex, IOMX::PortMode mode) {
status_t err = mOMXNode->setPortMode(portIndex, mode);
if (err != OK) {
ALOGE("[%s] setPortMode on %s to %s failed w/ err %d",
mComponentName.c_str(),
portIndex == kPortIndexInput ? "input" : "output",
asString(mode),
err);
return err;
}
mPortMode[portIndex] = mode;
return OK;
}
ACodec::setupVideoEncoder
调用IOMXNode的setPortMode设置Port模式,OMX部分在OMX章节继续分析。
//frameworks/av/media/libstagefright/ACodec.cpp
status_t ACodec::setupVideoEncoder(
const char *mime, const sp<AMessage> &msg,
sp<AMessage> &outputFormat, sp<AMessage> &inputFormat) {
int32_t tmp;
if (!msg->findInt32("color-format", &tmp)) {
return INVALID_OPERATION;
}
OMX_COLOR_FORMATTYPE colorFormat =
static_cast<OMX_COLOR_FORMATTYPE>(tmp);
status_t err = setVideoPortFormatType(
kPortIndexInput, OMX_VIDEO_CodingUnused, colorFormat);
if (err != OK) {
ALOGE("[%s] does not support color format %d",
mComponentName.c_str(), colorFormat);
return err;
}
/* Input port configuration */
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
def.nPortIndex = kPortIndexInput;
err = mOMXNode->getParameter(
OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
OMX_VIDEO_CONTROLRATETYPE bitrateMode;
int32_t width, height, bitrate = 0, quality;
if (!msg->findInt32("width", &width)
|| !msg->findInt32("height", &height)
|| !findVideoBitrateControlInfo(
msg, &bitrateMode, &bitrate, &quality)) {
return INVALID_OPERATION;
}
video_def->nFrameWidth = width;
video_def->nFrameHeight = height;
int32_t stride;
if (!msg->findInt32("stride", &stride)) {
stride = width;
}
video_def->nStride = stride;
int32_t sliceHeight;
if (!msg->findInt32("slice-height", &sliceHeight)) {
sliceHeight = height;
}
video_def->nSliceHeight = sliceHeight;
def.nBufferSize = (video_def->nStride * video_def->nSliceHeight * 3) / 2;
float framerate;
if (!msg->findFloat("frame-rate", &framerate)) {
int32_t tmp;
if (!msg->findInt32("frame-rate", &tmp)) {
return INVALID_OPERATION;
}
mFps = (double)tmp;
} else {
mFps = (double)framerate;
}
// propagate framerate to the output so that the muxer has it
outputFormat->setInt32("frame-rate", (int32_t)mFps);
video_def->xFramerate = (OMX_U32)(mFps * 65536);
video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
// this is redundant as it was already set up in setVideoPortFormatType
// FIXME for now skip this only for flexible YUV formats
if (colorFormat != OMX_COLOR_FormatYUV420Flexible) {
video_def->eColorFormat = colorFormat;
}
err = mOMXNode->setParameter(
OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
ALOGE("[%s] failed to set input port definition parameters.",
mComponentName.c_str());
return err;
}
/* Output port configuration */
OMX_VIDEO_CODINGTYPE compressionFormat;
err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
if (err != OK) {
return err;
}
err = setVideoPortFormatType(
kPortIndexOutput, compressionFormat, OMX_COLOR_FormatUnused);
if (err != OK) {
ALOGE("[%s] does not support compression format %d",
mComponentName.c_str(), compressionFormat);
return err;
}
def.nPortIndex = kPortIndexOutput;
err = mOMXNode->getParameter(
OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
video_def->nFrameWidth = width;
video_def->nFrameHeight = height;
video_def->xFramerate = 0;
video_def->nBitrate = bitrate;
video_def->eCompressionFormat = compressionFormat;
video_def->eColorFormat = OMX_COLOR_FormatUnused;
err = mOMXNode->setParameter(
OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
ALOGE("[%s] failed to set output port definition parameters.",
mComponentName.c_str());
return err;
}
int32_t intraRefreshPeriod = 0;
if (msg->findInt32("intra-refresh-period", &intraRefreshPeriod)
&& intraRefreshPeriod >= 0) {
err = setIntraRefreshPeriod((uint32_t)intraRefreshPeriod, true);
if (err != OK) {
ALOGI("[%s] failed setIntraRefreshPeriod. Failure is fine since this key is optional",
mComponentName.c_str());
err = OK;
}
}
configureEncoderLatency(msg);
switch (compressionFormat) {
case OMX_VIDEO_CodingMPEG4:
err = setupMPEG4EncoderParameters(msg);
break;
case OMX_VIDEO_CodingH263:
err = setupH263EncoderParameters(msg);
break;
case OMX_VIDEO_CodingAVC:
err = setupAVCEncoderParameters(msg);
break;
case OMX_VIDEO_CodingHEVC:
case OMX_VIDEO_CodingImageHEIC:
err = setupHEVCEncoderParameters(msg, outputFormat);
break;
case OMX_VIDEO_CodingVP8:
case OMX_VIDEO_CodingVP9:
err = setupVPXEncoderParameters(msg, outputFormat);
break;
default:
break;
}
if (err != OK) {
return err;
}
// Set up color aspects on input, but propagate them to the output format, as they will
// not be read back from encoder.
err = setColorAspectsForVideoEncoder(msg, outputFormat, inputFormat);
if (err == ERROR_UNSUPPORTED) {
ALOGI("[%s] cannot encode color aspects. Ignoring.", mComponentName.c_str());
err = OK;
}
if (err != OK) {
return err;
}
err = setHDRStaticInfoForVideoCodec(kPortIndexInput, msg, outputFormat);
if (err == ERROR_UNSUPPORTED) { // support is optional
ALOGI("[%s] cannot encode HDR static metadata. Ignoring.", mComponentName.c_str());
err = OK;
}
if (err != OK) {
return err;
}
switch (compressionFormat) {
case OMX_VIDEO_CodingAVC:
case OMX_VIDEO_CodingHEVC:
err = configureTemporalLayers(msg, true /* inConfigure */, outputFormat);
if (err != OK) {
err = OK; // ignore failure
}
break;
case OMX_VIDEO_CodingVP8:
case OMX_VIDEO_CodingVP9:
// TODO: do we need to support android.generic layering? webrtc layering is
// already set up in setupVPXEncoderParameters.
break;
default:
break;
}
if (err == OK) {
ALOGI("setupVideoEncoder succeeded");
}
// Video should be encoded as stand straight because RTP protocol
// can provide rotation information only if CVO is supported.
// This needs to be added to support non-CVO case for video streaming scenario.
int32_t rotation = 0;
if (msg->findInt32("rotation-degrees", &rotation)) {
OMX_CONFIG_ROTATIONTYPE config;
InitOMXParams(&config);
config.nPortIndex = kPortIndexOutput;
status_t err = mOMXNode->getConfig(
(OMX_INDEXTYPE)OMX_IndexConfigCommonRotate, &config, sizeof(config));
if (err != OK) {
ALOGW("Failed to getConfig of OMX_IndexConfigCommonRotate(err %d)", err);
}
config.nRotation = rotation;
err = mOMXNode->setConfig(
(OMX_INDEXTYPE)OMX_IndexConfigCommonRotate, &config, sizeof(config));
ALOGD("Applying encoder-rotation=[%d] to video encoder.", config.nRotation);
if (err != OK) {
ALOGW("Failed to setConfig of OMX_IndexConfigCommonRotate(err %d)", err);
}
}
return err;
}
ACodec::setupAACCodec
//frameworks/av/media/libstagefright/ACodec.cpp
status_t ACodec::setupAACCodec(
bool encoder, int32_t numChannels, int32_t sampleRate,
int32_t bitRate, int32_t aacProfile, bool isADTS, int32_t sbrMode,
int32_t maxOutputChannelCount, const drcParams_t& drc,
int32_t pcmLimiterEnable) {
if (encoder && isADTS) {
return -EINVAL;
}
status_t err = setupRawAudioFormat(
encoder ? kPortIndexInput : kPortIndexOutput,
sampleRate,
numChannels);
if (err != OK) {
return err;
}
if (encoder) {
err = selectAudioPortFormat(kPortIndexOutput, OMX_AUDIO_CodingAAC);
if (err != OK) {
return err;
}
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = kPortIndexOutput;
err = mOMXNode->getParameter(
OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
def.format.audio.bFlagErrorConcealment = OMX_TRUE;
def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
err = mOMXNode->setParameter(
OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
OMX_AUDIO_PARAM_AACPROFILETYPE profile;
InitOMXParams(&profile);
profile.nPortIndex = kPortIndexOutput;
err = mOMXNode->getParameter(
OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (err != OK) {
return err;
}
profile.nChannels = numChannels;
profile.eChannelMode =
(numChannels == 1)
? OMX_AUDIO_ChannelModeMono: OMX_AUDIO_ChannelModeStereo;
profile.nSampleRate = sampleRate;
profile.nBitRate = bitRate;
profile.nAudioBandWidth = 0;
profile.nFrameLength = 0;
profile.nAACtools = OMX_AUDIO_AACToolAll;
profile.nAACERtools = OMX_AUDIO_AACERNone;
profile.eAACProfile = (OMX_AUDIO_AACPROFILETYPE) aacProfile;
profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
switch (sbrMode) {
case 0:
// disable sbr
profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
break;
case 1:
// enable single-rate sbr
profile.nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
break;
case 2:
// enable dual-rate sbr
profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
profile.nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
break;
case -1:
// enable both modes -> the codec will decide which mode should be used
profile.nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
profile.nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
break;
default:
// unsupported sbr mode
return BAD_VALUE;
}
err = mOMXNode->setParameter(
OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (err != OK) {
return err;
}
return err;
}
OMX_AUDIO_PARAM_AACPROFILETYPE profile;
InitOMXParams(&profile);
profile.nPortIndex = kPortIndexInput;
err = mOMXNode->getParameter(
OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (err != OK) {
return err;
}
profile.nChannels = numChannels;
profile.nSampleRate = sampleRate;
profile.eAACStreamFormat =
isADTS
? OMX_AUDIO_AACStreamFormatMP4ADTS
: OMX_AUDIO_AACStreamFormatMP4FF;
OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE presentation;
InitOMXParams(&presentation);
presentation.nMaxOutputChannels = maxOutputChannelCount;
presentation.nDrcCut = drc.drcCut;
presentation.nDrcBoost = drc.drcBoost;
presentation.nHeavyCompression = drc.heavyCompression;
presentation.nTargetReferenceLevel = drc.targetRefLevel;
presentation.nEncodedTargetLevel = drc.encodedTargetLevel;
presentation.nPCMLimiterEnable = pcmLimiterEnable;
presentation.nDrcEffectType = drc.effectType;
presentation.nDrcAlbumMode = drc.albumMode;
presentation.nDrcOutputLoudness = drc.outputLoudness;
status_t res = mOMXNode->setParameter(
OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (res == OK) {
// optional parameters, will not cause configuration failure
if (mOMXNode->setParameter(
(OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
&presentation, sizeof(presentation)) == ERROR_UNSUPPORTED) {
// prior to 9.0 we used a different config structure and index
OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation8;
InitOMXParams(&presentation8);
presentation8.nMaxOutputChannels = presentation.nMaxOutputChannels;
presentation8.nDrcCut = presentation.nDrcCut;
presentation8.nDrcBoost = presentation.nDrcBoost;
presentation8.nHeavyCompression = presentation.nHeavyCompression;
presentation8.nTargetReferenceLevel = presentation.nTargetReferenceLevel;
presentation8.nEncodedTargetLevel = presentation.nEncodedTargetLevel;
presentation8.nPCMLimiterEnable = presentation.nPCMLimiterEnable;
(void)mOMXNode->setParameter(
(OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
&presentation8, sizeof(presentation8));
}
} else {
ALOGW("did not set AudioAndroidAacPresentation due to error %d when setting AudioAac", res);
}
mSampleRate = sampleRate;
return res;
}
AudioConverter::Create
最后是创建AudioConverter,它的作用是在音频 PCM 格式之间进行转换:
//frameworks/av/media/libstagefright/DataConverter.cpp
AudioConverter* AudioConverter::Create(AudioEncoding source, AudioEncoding target) {
uint32_t sourceSampleSize = getAudioSampleSize(source);
uint32_t targetSampleSize = getAudioSampleSize(target);
if (sourceSampleSize && targetSampleSize && sourceSampleSize != targetSampleSize) {
return new AudioConverter(source, sourceSampleSize, target, targetSampleSize);
}
return NULL;
}