[Android N]MediaRecorder系列之StagefrightRecorder录制TS流flow

CameraSource::start

先看CameraSource的start函数:

status_t CameraSource::start(MetaData *meta) {
    ALOGV("start");
    CHECK(!mStarted);
    if (mInitCheck != OK) {
        ALOGE("CameraSource is not initialized yet");
        return mInitCheck;
    }

    char value[PROPERTY_VALUE_MAX];
    if (property_get("media.stagefright.record-stats", value, NULL)
        && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
        mCollectStats = true;
    }

    mStartTimeUs = 0;
    mNumInputBuffers = 0;
    mEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
    mEncoderDataSpace = HAL_DATASPACE_V0_BT709;

    if (meta) {
        //这里这个startTimeUs很重要,这里记录了开始录制的时间点,后面打时间戳的时候会用到
        int64_t startTimeUs;
        if (meta->findInt64(kKeyTime, &startTimeUs)) {
            mStartTimeUs = startTimeUs;
        }

        int32_t nBuffers;
        if (meta->findInt32(kKeyNumBuffers, &nBuffers)) {
            CHECK_GT(nBuffers, 0);
            mNumInputBuffers = nBuffers;
        }

        // apply encoder color format if specified
        if (meta->findInt32(kKeyPixelFormat, &mEncoderFormat)) {
            ALOGI("Using encoder format: %#x", mEncoderFormat);
        }
        if (meta->findInt32(kKeyColorSpace, &mEncoderDataSpace)) {
            ALOGI("Using encoder data space: %#x", mEncoderDataSpace);
        }
    }

    status_t err;
     //这个才是真正的start的东西。
    if ((err = startCameraRecording()) == OK) {
        mStarted = true;
    }

    return err;
}
status_t CameraSource::startCameraRecording() {
    ALOGV("startCameraRecording");
    // Reset the identity to the current thread because media server owns the
    // camera and recording is started by the applications. The applications
    // will connect to the camera in ICameraRecordingProxy::startRecording.
    int64_t token = IPCThreadState::self()->clearCallingIdentity();
    status_t err;

    if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
        // Initialize buffer queue.
        err = initBufferQueue(mVideoSize.width, mVideoSize.height, mEncoderFormat,
                (android_dataspace_t)mEncoderDataSpace,
                mNumInputBuffers > 0 ? mNumInputBuffers : 1);
        if (err != OK) {
            ALOGE("%s: Failed to initialize buffer queue: %s (err=%d)", __FUNCTION__,
                    strerror(-err), err);
            return err;
        }
    } else {
        if (mNumInputBuffers > 0) {
            err = mCamera->sendCommand(
                CAMERA_CMD_SET_VIDEO_BUFFER_COUNT, mNumInputBuffers, 0);

            // This could happen for CameraHAL1 clients; thus the failure is
            // not a fatal error
            if (err != OK) {
                ALOGW("Failed to set video buffer count to %d due to %d",
                    mNumInputBuffers, err);
            }
        }
        //发送CMD去设置camer的参数
        err = mCamera->sendCommand(
            CAMERA_CMD_SET_VIDEO_FORMAT, mEncoderFormat, mEncoderDataSpace);

        // This could happen for CameraHAL1 clients; thus the failure is
        // not a fatal error
        if (err != OK) {
            ALOGW("Failed to set video encoder format/dataspace to %d, %d due to %d",
                    mEncoderFormat, mEncoderDataSpace, err);
        }

        // Create memory heap to store buffers as VideoNativeMetadata.
        createVideoBufferMemoryHeap(sizeof(VideoNativeHandleMetadata), kDefaultVideoBufferCount);
    }

    err = OK;
    if (mCameraFlags & FLAGS_HOT_CAMERA) {
        mCamera->unlock();
        mCamera.clear();
        //让camera开始recorder
        if ((err = mCameraRecordingProxy->startRecording(
                new ProxyListener(this))) != OK) {
            ALOGE("Failed to start recording, received error: %s (%d)",
                    strerror(-err), err);
        }
    } else {
        mCamera->setListener(new CameraSourceListener(this));
        mCamera->startRecording();
        if (!mCamera->recordingEnabled()) {
            err = -EINVAL;
            ALOGE("Failed to start recording");
        }
    }
    IPCThreadState::self()->restoreCallingIdentity(token);
    return err;
}

再往下就是cameraHAL的部分了,camera的整个框架和结构比较复杂,这里就先直接先跳过,我们就理解成,这个start函数会直接call到cameraHAL,开始进行录制操作,然后我们就等camera喷数据回来。

CameraSourceListener的postDataTimestamp会收到数据抛回来的广播,数据就存在dataPtr

void CameraSourceListener::postDataTimestamp(
        nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) {

    sp<CameraSource> source = mSource.promote();
    if (source.get() != NULL) {
        source->dataCallbackTimestamp(timestamp/1000, msgType, dataPtr);
    }
}

source->dataCallbackTimestamp(timestamp/1000, msgType, dataPtr);就会跑入:

void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
        int32_t msgType __unused, const sp<IMemory> &data) {
    ALOGV("dataCallbackTimestamp: timestamp %lld us", (long long)timestampUs);
    Mutex::Autolock autoLock(mLock);

    if (shouldSkipFrameLocked(timestampUs)) {
        releaseOneRecordingFrame(data);
        return;
    }
    //这个flag表示有多少frame已经收到了
    ++mNumFramesReceived;

    CHECK(data != NULL && data->size() > 0);
    //mFramesReceived这个里面存着camera抓到的一帧,帧数据
    mFramesReceived.push_back(data);
    int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
    //mFrameTimes,打上时间戳
    mFrameTimes.push_back(timeUs);
    ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
        mStartTimeUs, timeUs);
    mFrameAvailableCondition.signal();
}

这个mFramesReceived先记住一下,等下后面read的时候就会用到了。

AudioSource::start

status_t AudioSource::start(MetaData *params) {
    Mutex::Autolock autoLock(mLock);
    if (mStarted) {
        return UNKNOWN_ERROR;
    }

    if (mInitCheck != OK) {
        return NO_INIT;
    }

    mTrackMaxAmplitude = false;
    mMaxAmplitude = 0;
    mInitialReadTimeUs = 0;
    mStartTimeUs = 0;
    int64_t startTimeUs;
    //mStartTimeUs 记下开始的时间,为后面打算时间戳做准备
    if (params && params->findInt64(kKeyTime, &startTimeUs)) {
        mStartTimeUs = startTimeUs;
    }
    //会跑到HAL的start,不跟了
    status_t err = mRecord->start();
    if (err == OK) {
        mStarted = true;
    } else {
        mRecord.clear();
    }


    return err;
}

然后就等数据报回来:

status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
    int64_t timeUs = systemTime() / 1000ll;
    // Estimate the real sampling time of the 1st sample in this buffer
    // from AudioRecord's latency. (Apply this adjustment first so that
    // the start time logic is not affected.)
    timeUs -= mRecord->latency() * 1000LL;

    ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
    Mutex::Autolock autoLock(mLock);
    if (!mStarted) {
        ALOGW("Spurious callback from AudioRecord. Drop the audio data.");
        return OK;
    }

    // Drop retrieved and previously lost audio data.
    if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) {
        (void) mRecord->getInputFramesLost();
        ALOGV("Drop audio data at %" PRId64 "/%" PRId64 " us", timeUs, mStartTimeUs);
        return OK;
    }

    if (mNumFramesReceived == 0 && mPrevSampleTimeUs == 0) {
        mInitialReadTimeUs = timeUs;
        // Initial delay
        if (mStartTimeUs > 0) {
            mStartTimeUs = timeUs - mStartTimeUs;
        } else {
            // Assume latency is constant.
            mStartTimeUs += mRecord->latency() * 1000;
        }

        mPrevSampleTimeUs = mStartTimeUs;
    }

    size_t numLostBytes = 0;
    if (mNumFramesReceived > 0) {  // Ignore earlier frame lost
        // getInputFramesLost() returns the number of lost frames.
        // Convert number of frames lost to number of bytes lost.
        numLostBytes = mRecord->getInputFramesLost() * mRecord->frameSize();
    }

    CHECK_EQ(numLostBytes & 1, 0u);
    CHECK_EQ(audioBuffer.size & 1, 0u);
    if (numLostBytes > 0) {
        // Loss of audio frames should happen rarely; thus the LOGW should
        // not cause a logging spam
        ALOGW("Lost audio record data: %zu bytes", numLostBytes);
    }

    while (numLostBytes > 0) {
        size_t bufferSize = numLostBytes;
        if (numLostBytes > kMaxBufferSize) {
            numLostBytes -= kMaxBufferSize;
            bufferSize = kMaxBufferSize;
        } else {
            numLostBytes = 0;
        }
        MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize);
        memset(lostAudioBuffer->data(), 0, bufferSize);
        lostAudioBuffer->set_range(0, bufferSize);
        queueInputBuffer_l(lostAudioBuffer, timeUs);
    }

    if (audioBuffer.size == 0) {
        ALOGW("Nothing is available from AudioRecord callback buffer");
        return OK;
    }

    const size_t bufferSize = audioBuffer.size;
    MediaBuffer *buffer = new MediaBuffer(bufferSize);
    memcpy((uint8_t *) buffer->data(),
            audioBuffer.i16, audioBuffer.size);
    buffer->set_range(0, bufferSize);
    //在这个函数中,把buffer装到mBuffersReceived
    queueInputBuffer_l(buffer, timeUs);
    return OK;
}

mBuffersReceived也记住一个,后面read的时候,就会用到了。

mFramesReceived和mBuffersReceived都是raw data,没有经过encode的数据,encode在后面的read中会再处理。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

jackson61

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值