高通平台Camera录像过程-- 数据流浅析

             Camera录像的时候,采用了MediaRecorder,这里是对Camera录制过程中数据的流向做一个简单的代码跟读和分析,这边是基于高通平台,Android4.4源码:

     >>>>>>>>>>简单的从MediaRecorder的start()方法开始,下面是简单的调用过程图:

      

     >>>>>>>>>>>>>>下面是Camera数据从下往上回调的过程:

  

       >>>>>>>>>>>>>>下面是进行简单的代码跟读

          上层使用MediaRecorder录制时,配置好了相关的参数后,调用系统为我们提供的start()函数,开始录制......

  MediaRecorder.java

public native void start() throws IllegalStateException;

        下面是start方法在JNI层的定义和实现

android_media_MediaRecorder.cpp

{"start",                "()V",                             (void *)android_media_MediaRecorder_start},


static void
android_media_MediaRecorder_start(JNIEnv *env, jobject thiz)
{
    ALOGV("start");
    sp<MediaRecorder> mr = getMediaRecorder(env, thiz);
    process_media_recorder_call(env, <span style="color:#ff0000;">mr->start()</span>, "java/lang/RuntimeException", "start failed.");
}

       android_media_MediaRecorder.cpp 的android_media_MediaRecorder_start()方法中,通过mr->start(),调用了MediaRecorder的start()方法

MediaRecorder.cpp

status_t MediaRecorder::start()
{
    ALOGV("start");
    if (mMediaRecorder == NULL) {
        ALOGE("media recorder is not initialized yet");
        return INVALID_OPERATION;
    }
    if (!(mCurrentState & (MEDIA_RECORDER_PREPARED | MEDIA_RECORDER_PAUSED))) {
        ALOGE("start called in an invalid state: %d", mCurrentState);
        return INVALID_OPERATION;
    }


    status_t ret = mMediaRecorder->start();
    if (OK != ret) {
        ALOGE("start failed: %d", ret);
        mCurrentState = MEDIA_RECORDER_ERROR;
        return ret;
    }
    mCurrentState = MEDIA_RECORDER_RECORDING;
    return ret;
}

       而在MediaRecorder.cpp 的start()方法中,是继续通过mMediaRecorder->start();调用了MediaRecorderClient.cpp 中的start()方法:

MediaRecorderClient.cpp

MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service, pid_t pid)
{
    ALOGV("Client constructor");
    mPid = pid;
    mRecorder = new StagefrightRecorder;
    mMediaPlayerService = service;
}

status_t MediaRecorderClient::start()
{
    ALOGV("start");
    Mutex::Autolock lock(mLock);
    if (mRecorder == NULL) {
        ALOGE("recorder is not initialized");
        return NO_INIT;
    }
    return mRecorder->start();


}

      在MediaRecorderClient.cpp 中是通过mRecorder->start();调用了stagefrightRecorder.cpp的start()方法

stagefrightRecorder.cpp

status_t StagefrightRecorder::start() {
    CHECK_GE(mOutputFd, 0);


    if (mRecPaused == true) {
        status_t err = mWriter->start();
        if (err != OK) {
            ALOGE("Writer start in StagefrightRecorder pause failed");
            return err;
        }


        err = setSourcePause(false);
        if (err != OK) {
            ALOGE("Source start after pause failed");
            return err;
        }


        mRecPaused = false;
        return OK;
    }
    // Get UID here for permission checking
    mClientUid = IPCThreadState::self()->getCallingUid();
    if (mWriter != NULL) {
        ALOGE("File writer is not avaialble");
        return UNKNOWN_ERROR;
    }


    status_t status = OK;


    //check permissions
    if (mAppOpsManager.noteOp(AppOpsManager::OP_RECORD_AUDIO, mClientUid, mClientName)
        != AppOpsManager::MODE_ALLOWED) {
        return status;
    }


    if(AUDIO_SOURCE_FM_RX_A2DP == mAudioSource)
        return startFMA2DPWriter();
    switch (mOutputFormat) {
        case OUTPUT_FORMAT_DEFAULT:
        case OUTPUT_FORMAT_THREE_GPP:
        case OUTPUT_FORMAT_MPEG_4:
            status = startMPEG4Recording();
            break;


        case OUTPUT_FORMAT_AMR_NB:
        case OUTPUT_FORMAT_AMR_WB:
            status = startAMRRecording();
            break;


        case OUTPUT_FORMAT_AAC_ADIF:
        case OUTPUT_FORMAT_AAC_ADTS:
            status = startAACRecording();
            break;


        case OUTPUT_FORMAT_RTP_AVP:
            status = startRTPRecording();
            break;


        case OUTPUT_FORMAT_MPEG2TS:
            status = startMPEG2TSRecording();
            break;


#ifdef ENABLE_AV_ENHANCEMENTS
        case OUTPUT_FORMAT_QCP:
            status = startExtendedRecording( );
            break;
#endif


        case OUTPUT_FORMAT_WAVE:
            status = startWAVERecording( );
            break;


        default:
            ALOGE("Unsupported output file format: %d", mOutputFormat);
            status = UNKNOWN_ERROR;
            break;
    }


    if ((status == OK) && (!mStarted)) {
        mStarted = true;


        uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted;
        if (mAudioSource != AUDIO_SOURCE_CNT) {
            params |= IMediaPlayerService::kBatteryDataTrackAudio;
        }
        if (mVideoSource != VIDEO_SOURCE_LIST_END) {
            params |= IMediaPlayerService::kBatteryDataTrackVideo;
        }


        addBatteryData(params);
    }


    return status;
}

status_t StagefrightRecorder::startMPEG4Recording() {


    int32_t totalBitRate;
    status_t err = setupMPEG4Recording(
            mOutputFd, mVideoWidth, mVideoHeight,
            mVideoBitRate, &totalBitRate, &mWriter);
    if (err != OK) {
        return err;
 }


    int64_t startTimeUs = systemTime() / 1000;
    sp<MetaData> meta = new MetaData;
    setupMPEG4MetaData(startTimeUs, totalBitRate, &meta);


    err = mWriter->start(meta.get());
    if (err != OK) {
        return err;
    }


    return OK;
}

        在stagefrightRecorder.cpp的start方法中,会根据mOutputFormat 调用startMPEG4Recording( )方法,我们看到在startMPEG4Recording()方法中,会调用setupMPEG4Recording()方法去初始化编码器(setupVideoEncoder),当然也包括去初始化读写线程(MediaWriter);最终会通过mWriter->start()去调用到MPEG4Writer的start()方法;

status_t StagefrightRecorder::start() {
    CHECK_GE(mOutputFd, 0);


    if (mRecPaused == true) {
        status_t err = mWriter->start();
        if (err != OK) {
            ALOGE("Writer start in StagefrightRecorder pause failed");
            return err;
        }


        err = setSourcePause(false);
        if (err != OK) {
            ALOGE("Source start after pause failed");
            return err;
        }


        mRecPaused = false;
        return OK;
    }
    // Get UID here for permission checking
    mClientUid = IPCThreadState::self()->getCallingUid();
    if (mWriter != NULL) {
        ALOGE("File writer is not avaialble");
        return UNKNOWN_ERROR;
    }


    status_t status = OK;


    //check permissions
    if (mAppOpsManager.noteOp(AppOpsManager::OP_RECORD_AUDIO, mClientUid, mClientName)
        != AppOpsManager::MODE_ALLOWED) {
        return status;
    }


    if(AUDIO_SOURCE_FM_RX_A2DP == mAudioSource)
        return startFMA2DPWriter();
    switch (mOutputFormat) {
        case OUTPUT_FORMAT_DEFAULT:
        case OUTPUT_FORMAT_THREE_GPP:
        case OUTPUT_FORMAT_MPEG_4:
            status = startMPEG4Recording();
            break;


        case OUTPUT_FORMAT_AMR_NB:
        case OUTPUT_FORMAT_AMR_WB:
            status = startAMRRecording();
            break;


        case OUTPUT_FORMAT_AAC_ADIF:
        case OUTPUT_FORMAT_AAC_ADTS:
            status = startAACRecording();
            break;


        case OUTPUT_FORMAT_RTP_AVP:
            status = startRTPRecording();
            break;


        case OUTPUT_FORMAT_MPEG2TS:
            status = startMPEG2TSRecording();
            break;


#ifdef ENABLE_AV_ENHANCEMENTS
        case OUTPUT_FORMAT_QCP:
            status = startExtendedRecording( );
            break;
#endif


        case OUTPUT_FORMAT_WAVE:
            status = startWAVERecording( );
            break;


        default:
            ALOGE("Unsupported output file format: %d", mOutputFormat);
            status = UNKNOWN_ERROR;
            break;
    }


    if ((status == OK) && (!mStarted)) {
        mStarted = true;


        uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted;
        if (mAudioSource != AUDIO_SOURCE_CNT) {
            params |= IMediaPlayerService::kBatteryDataTrackAudio;
        }
        if (mVideoSource != VIDEO_SOURCE_LIST_END) {
            params |= IMediaPlayerService::kBatteryDataTrackVideo;
        }


        addBatteryData(params);
    }


    return status;
}

status_t StagefrightRecorder::startMPEG4Recording() {


    int32_t totalBitRate;
    status_t err = setupMPEG4Recording(
            mOutputFd, mVideoWidth, mVideoHeight,
            mVideoBitRate, &totalBitRate, &mWriter);
    if (err != OK) {
        return err;</span>
<span style="font-family:Comic Sans MS;font-size:18px;">    }


    int64_t startTimeUs = systemTime() / 1000;
    sp<MetaData> meta = new MetaData;
    setupMPEG4MetaData(startTimeUs, totalBitRate, &meta);


    err = mWriter->start(meta.get());
    if (err != OK) {
        return err;
    }


    return OK;
}

        而在MPEG4Writer.cpp中,会去初始化一个读写线程和轨迹线程,具体的流程可以看下我之前的博文“Android 高通平台Camera录制--MPEG4Writer.cpp 简单跟读” (http://blog.csdn.net/mr_zjc/article/details/46822833)。这里就不详讲了。

MPEG4Writer.cpp

status_t MPEG4Writer::start(MetaData *param) {
	
   ......
    status_t err = startWriterThread();
    if (err != OK) {
        return err;
    }

    err = startTracks(param);
    if (err != OK) {
        return err;
    }

  ......
}

status_t MPEG4Writer::startTracks(MetaData *params) {
    if (mTracks.empty()) {
        ALOGE("No source added");
        return INVALID_OPERATION;
    }


    for (List<Track *>::iterator it = mTracks.begin();
         it != mTracks.end(); ++it) {
        status_t err = (*it)->start(params);


        if (err != OK) {
            for (List<Track *>::iterator it2 = mTracks.begin();
                 it2 != it; ++it2) {
                (*it2)->stop();
            }


            return err;
        }
    }
    return OK;
}

status_t MPEG4Writer::Track::start(MetaData *params) {
  ......

    status_t err = mSource->start(meta.get());
    if (err != OK) {
        mDone = mReachedEOS = true;
        return err;
    }
  ......

    pthread_create(&mThread, &attr, ThreadWrapper, this);
    pthread_attr_destroy(&attr);


    mHFRRatio = ExtendedUtils::HFR::getHFRRatio(mMeta);


    return OK;
}
void *MPEG4Writer::Track::ThreadWrapper(void *me) {
    Track *track = static_cast<Track *>(me);


    status_t err = track->threadEntry();
    return (void *) err;
}

status_t MPEG4Writer::Track::threadEntry() { 
......
    while (!mDone && (err = mSource->read(&buffer)) == OK) {
       ......
     }
......
}


>>>>>>>>>下面就看下底层的Camera数据是如何一步步回调的

       CameraHardwareInterface.h是系统为我们提供的HWI层接口,Camera的数据回调会经过这边。MediaRecorder的录制数据回调方法是__data_cb_timestamp。不过这边高通平台在回调的数据中,只是提供了数据的地址,并不是把真正的数据回调上传。

CameraHardwareInterface.h

static void __data_cb_timestamp(nsecs_t timestamp, int32_t msg_type,
                             const camera_memory_t *data, unsigned index,
                             void *user)
    {
        ALOGD("%s", __FUNCTION__);
        CameraHardwareInterface *__this =
                static_cast<CameraHardwareInterface *>(user);
        // Start refcounting the heap object from here on.  When the clients
        // drop all references, it will be destroyed (as well as the enclosed
        // MemoryHeapBase.


		sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
	
        if (index >= mem->mNumBufs) {
            ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
                 index, mem->mNumBufs);
            return;
        }
         
		ALOGD("***** hihi:__data_cb_timestamp();mem->mBufSize is:%d;mem->mNumBufs is:%d;index is:%d",mem->mBufSize,mem->mNumBufs,index);
		
		__this->mDataCbTimestamp(timestamp, msg_type, mem->mBuffers[index], __this->mCbUser);
    }

      CameraHardwareInterface.h录制数据往上回调,下一步是回调到CameraClient.cpp ,相对应的录制数据的回调方法是dataCallBackTimeStamp();

CameraClient.cpp

void CameraClient::dataCallbackTimestamp(nsecs_t timestamp,
        int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
  ......
     client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
  ......
}

void CameraClient::handleGenericNotify(int32_t msgType,
    int32_t ext1, int32_t ext2) {
    sp<ICameraClient> c = mRemoteCallback;
    mLock.unlock();
    if (c != 0) {
        c->notifyCallback(msgType, ext1, ext2);
    }
}

     接着往上回调是到了CameraSource.cpp 的 PrioxyListener::dataCallbackTimestamp();

CameraSource.cpp

void CameraSource::ProxyListener::dataCallbackTimestamp(
        nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) {
    mSource->dataCallbackTimestamp(timestamp / 1000, msgType, dataPtr);
}

void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
        int32_t msgType, const sp<IMemory> &data) {
   
	
	Mutex::Autolock autoLock(mLock);
    if (!mStarted || (mNumFramesReceived == 0 && timestampUs < mStartTimeUs)) {
        ALOGV("Drop frame at %lld/%lld us", timestampUs, mStartTimeUs);
        releaseOneRecordingFrame(data);
        return;
    }


    if (mRecPause == true) {
        if(!mFramesReceived.empty()) {
            ALOGV("releaseQueuedFrames - #Queued Frames : %d", mFramesReceived.size());
            releaseQueuedFrames();
        }
        ALOGV("release One Video Frame for Pause : %lld us", timestampUs);
        releaseOneRecordingFrame(data);
        mPauseEndTimeUs = timestampUs;
        return;
    }
    timestampUs -= mPauseAdjTimeUs;
    ALOGV("dataCallbackTimestamp: AdjTimestamp %lld us", timestampUs);
    if (mNumFramesReceived > 0) {
        CHECK(timestampUs > mLastFrameTimestampUs);
        if (timestampUs - mLastFrameTimestampUs > mGlitchDurationThresholdUs) {
            ++mNumGlitches;
        }
    }


    // May need to skip frame or modify timestamp. Currently implemented
    // by the subclass CameraSourceTimeLapse.
    if (skipCurrentFrame(timestampUs)) {
        releaseOneRecordingFrame(data);
        return;
    }


    mLastFrameTimestampUs = timestampUs;
    if (mNumFramesReceived == 0) {
        mFirstFrameTimeUs = timestampUs;
        // Initial delay
        if (mStartTimeUs > 0) {
            if (timestampUs < mStartTimeUs) {
                // Frame was captured before recording was started
                // Drop it without updating the statistical data.
                releaseOneRecordingFrame(data);
                return;
            }
            mStartTimeUs = timestampUs - mStartTimeUs;
        }
    }
    ++mNumFramesReceived;


    CHECK(data != NULL && data->size() > 0);

    mFramesReceived.push_back(data);
    int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
    mFrameTimes.push_back(timeUs);
    ALOGV("initial delay: %lld, current time stamp: %lld",
        mStartTimeUs, timeUs);
    mFrameAvailableCondition.signal();
}

   我们看到在dataCallbackTimestamp()中会通过 mFrameAvailableCondition这个信号量和read函数这边去做配合。

status_t CameraSource::read(
        MediaBuffer **buffer, const ReadOptions *options) {
    ALOGD("read");


    *buffer = NULL;


    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        return ERROR_UNSUPPORTED;
    }


    sp<IMemory> frame;
    int64_t frameTime;


    {
        Mutex::Autolock autoLock(mLock);
        while (mStarted && mFramesReceived.empty()) {
            if (NO_ERROR !=
                mFrameAvailableCondition.waitRelative(mLock,
                    mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
                if (mCameraRecordingProxy != 0 &&
                    !mCameraRecordingProxy->asBinder()->isBinderAlive()) {
                    ALOGW("camera recording proxy is gone");
                    return ERROR_END_OF_STREAM;
                }
                ALOGW("Timed out waiting for incoming camera video frames: %lld us",
                    mLastFrameTimestampUs);
            }
        }
        if (!mStarted) {
            return OK;
        }
        frame = *mFramesReceived.begin();
        mFramesReceived.erase(mFramesReceived.begin());


        frameTime = *mFrameTimes.begin();
        mFrameTimes.erase(mFrameTimes.begin());
        mFramesBeingEncoded.push_back(frame);
		
	    ALOGD("***** CameraSource::read();frame->size() is:%d",frame->size());
		
        *buffer = new MediaBuffer(frame->pointer(), frame->size());
        (*buffer)->setObserver(this);
        (*buffer)->add_ref();
        (*buffer)->meta_data()->setInt64(kKeyTime, frameTime);
    }
    return OK;
}

  • 1
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小驰成长圈

谢谢老板,今晚吃鸡~

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值