前面分析了configure stream过程,接下来分析下如何request stream。
1、应用层request api调用:
private void createCameraPreviewSession() {
try {
SurfaceTexture texture = getSurfaceTexture();
if(texture == null){
Log.d(TAG, "createCameraPreviewSession: texture is null");
}
Surface surface = new Surface(texture);
mImageReader = ImageReader.newInstance(1920, 1080, ImageFormat.YUV_420_888, 4);
mImageReader.setOnImageAvailableListener(mOnImageAvailableListener, mBackgroundHandler);
yuvImageReader = ImageReader.newInstance(1280, 720, ImageFormat.YUV_420_888, 4);
yuvImageReader.setOnImageAvailableListener(mmOnImageAvailableListener, mBackgroundHandler);
mPreviewRequestBuilder= mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
mPreviewRequestBuilder.addTarget(surface);
mPreviewRequestBuilder.addTarget(mImageReader.getSurface());
mPreviewRequestBuilder.addTarget(yuvImageReader.getSurface());
mCameraDevice.createCaptureSession(Arrays.asList(surface, mImageReader.getSurface(), yuvImageReader.getSurface()),
new CameraCaptureSession.StateCallback() {
@Override
public void onConfigured(@NonNull CameraCaptureSession cameraCaptureSession) {
Log.d(TAG, "onConfigured: ");
// 相机已经关闭
if (null == mCameraDevice) {
return;
}
// 当session准备好后,我们开始显示预览
mCaptureSession = cameraCaptureSession;
try {
// 相机预览时应连续自动对焦
//mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_PICTURE);
// 设置闪光灯在必要时自动打开
//setAutoFlash(mPreviewRequestBuilder);
// 最终,显示相机预览
mPreviewRequest = mPreviewRequestBuilder.build();
mCaptureSession.setRepeatingRequest(mPreviewRequest,null, mBackgroundHandler);
} catch (CameraAccessException e) {
e.printStackTrace();
}
}
@Override
public void onConfigureFailed(@NonNull CameraCaptureSession cameraCaptureSession) {
Log.e(TAG, "CameraCaptureSession.StateCallback onConfigureFailed");
}
}, null
);
} catch (CameraAccessException e) {
e.printStackTrace();
}
}
当收到onConfigured回调时,将创建好的cameraCaptureSession赋值给全局变量mCaptureSession,然后调用CameraCaptureSessionImpl::setRepeatingRequest开始请求数据流:
public int setRepeatingRequest(CaptureRequest request, CaptureCallback callback,
Handler handler) throws CameraAccessException {
checkRepeatingRequest(request);
synchronized (mDeviceImpl.mInterfaceLock) {
checkNotClosed();
handler = checkHandler(handler, callback);
if (DEBUG) {
Log.v(TAG, mIdString + "setRepeatingRequest - request " + request + ", callback " +
callback + " handler" + " " + handler);
}
return addPendingSequence(mDeviceImpl.setRepeatingRequest(request,
createCaptureCallbackProxy(handler, callback), mDeviceExecutor));
}
}
接着,进入到CameraDeviceImpl::setRepeatingRequest中:
public int setRepeatingRequest(CaptureRequest request, CaptureCallback callback,
Executor executor) throws CameraAccessException {
List<CaptureRequest> requestList = new ArrayList<CaptureRequest>();
requestList.add(request);
return submitCaptureRequest(requestList, callback, executor, /*streaming*/true);
}
这里将request放进了requestList链表里,requestList的大小为1,接着进入到CameraDeviceImpl::submitCaptureRequest中:
synchronized(mInterfaceLock) {
checkIfCameraClosedOrInError();
if (repeating) {
stopRepeating();
}
SubmitInfo requestInfo;
CaptureRequest[] requestArray = requestList.toArray(new CaptureRequest[requestList.size()]);
// Convert Surface to streamIdx and surfaceIdx
for (CaptureRequest request : requestArray) {
request.convertSurfaceToStreamId(mConfiguredOutputs);
}
requestInfo = mRemoteDevice.submitRequestList(requestArray, repeating);
if (DEBUG) {
Log.v(TAG, "last frame number " + requestInfo.getLastFrameNumber());
}
for (CaptureRequest request : requestArray) {
request.recoverStreamIdToSurface();
}
if (callback != null) {
mCaptureCallbackMap.put(requestInfo.getRequestId(),
new CaptureCallbackHolder(
callback, requestList, executor, repeating, mNextSessionId - 1));
} else {
if (DEBUG) {
Log.d(TAG, "Listen for request " + requestInfo.getRequestId() + " is null");
}
}
if (repeating) {
if (mRepeatingRequestId != REQUEST_ID_NONE) {
checkEarlyTriggerSequenceComplete(mRepeatingRequestId,
requestInfo.getLastFrameNumber());
}
mRepeatingRequestId = requestInfo.getRequestId();
} else {
mRequestLastFrameNumbersList.add(
new RequestLastFrameNumbersHolder(requestList, requestInfo));
}
if (mIdle) {
mDeviceExecutor.execute(mCallOnActive);
}
mIdle = false;
return requestInfo.getRequestId();
}
}
其中,mRemoteDevice是远端代理,接下来的流程就是:ICameraDeviceUserWrapper.submitRequestList->AIDL->ICameraDeviceUser::submitRequestList->BpCameraDeviceUser::submitRequestList->CameraDeviceClient::submitRequestList,下面看下CameraDeviceClient::submitRequestList的实现:
binder::Status CameraDeviceClient::submitRequestList(
const std::vector<hardware::camera2::CaptureRequest>& requests,
bool streaming,
/*out*/
hardware::camera2::utils::SubmitInfo *submitInfo) {
ATRACE_CALL();
ALOGV("%s-start of function. Request list size %zu", __FUNCTION__, requests.size());
binder::Status res = binder::Status::ok();
status_t err;
if ( !(res = checkPidStatus(__FUNCTION__) ).isOk()) {
return res;
}
Mutex::Autolock icl(mBinderSerializationLock);
if (!mDevice.get()) {
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
// 这里的requests.size()为1,即只发送一个requests请求
if (requests.empty()) {
ALOGE("%s: Camera %s: Sent null request. Rejecting request.",
__FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Empty request list");
}
List<const CameraDeviceBase::PhysicalCameraSettingsList> metadataRequestList;
// 注意这个列表,下面会将填充好的surfaceMap添加进去
std::list<const SurfaceMap> surfaceMapList;
submitInfo->mRequestId = mRequestIdCounter;
uint32_t loopCounter = 0;
for (auto&& request: requests) {
if (request.mIsReprocess) {
if (!mInputStream.configured) {
ALOGE("%s: Camera %s: no input stream is configured.", __FUNCTION__,
mCameraIdStr.string());
return STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
"No input configured for camera %s but request is for reprocessing",
mCameraIdStr.string());
} else if (streaming) {
ALOGE("%s: Camera %s: streaming reprocess requests not supported.", __FUNCTION__,
mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Repeating reprocess requests not supported");
} else if (request.mPhysicalCameraSettings.size() > 1) {
ALOGE("%s: Camera %s: reprocess requests not supported for "
"multiple physical cameras.", __FUNCTION__,
mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Reprocess requests not supported for multiple cameras");
}
}
if (request.mPhysicalCameraSettings.empty()) {
ALOGE("%s: Camera %s: request doesn't contain any settings.", __FUNCTION__,
mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request doesn't contain any settings");
}
//The first capture settings should always match the logical camera id
String8 logicalId(request.mPhysicalCameraSettings.begin()->id.c_str());
if (mDevice->getId() != logicalId) {
ALOGE("%s: Camera %s: Invalid camera request settings.", __FUNCTION__,
mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Invalid camera request settings");
}
// 在tv上,这里我进行了打印 request.mSurfaceList.size() = 0,
// request.mStreamIdxList.size() = 3 (对应app传入的surface数量)
if (request.mSurfaceList.isEmpty() && request.mStreamIdxList.size() == 0) {
ALOGE("%s: Camera %s: Requests must have at least one surface target. "
"Rejecting request.", __FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request has no output targets");
}
/**
* Write in the output stream IDs and map from stream ID to surface ID
* which we calculate from the capture request's list of surface target
*/
SurfaceMap surfaceMap;
Vector<int32_t> outputStreamIds;
std::vector<std::string> requestedPhysicalIds;
ALOGE("WUKEKE %s requests.size = %d request.mSurfaceList.size = %d, request.mStreamIdxList.size = %d", __FUNCTION__, requests.size(), request.mSurfaceList.size(), request.mStreamIdxList.size());
//ALOGE("WUKEKE %s mStreamMap.size = %d, mConfiguredOutputs.size = %d", __FUNCTION__, mStreamMap.size(), mConfiguredOutputs.size());
if (request.mSurfaceList.size() > 0) {
// 这里不执行
for (sp<Surface> surface : request.mSurfaceList) {
if (surface == 0) continue;
int32_t streamId;
sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
res = insertGbpLocked(gbp, &surfaceMap, &outputStreamIds, &streamId);
if (!res.isOk()) {
return res;
}
ssize_t index = mConfiguredOutputs.indexOfKey(streamId);
if (index >= 0) {
String8 requestedPhysicalId(
mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
requestedPhysicalIds.push_back(requestedPhysicalId.string());
} else {
ALOGW("%s: Output stream Id not found among configured outputs!", __FUNCTION__);
}
}
} else {
// 执行这里
for (size_t i = 0; i < request.mStreamIdxList.size(); i++) {
int streamId = request.mStreamIdxList.itemAt(i);
int surfaceIdx = request.mSurfaceIdxList.itemAt(i);
ssize_t index = mConfiguredOutputs.indexOfKey(streamId);
if (index < 0) {
ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
" we have not called createStream on: stream %d",
__FUNCTION__, mCameraIdStr.string(), streamId);
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request targets Surface that is not part of current capture session");
}
// 在ImageReader_init中为每个surface创建了一个gbp,因此这里的gbps大小为1
const auto& gbps = mConfiguredOutputs.valueAt(index).getGraphicBufferProducers();
if ((size_t)surfaceIdx >= gbps.size()) {
ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
" we have not called createStream on: stream %d, surfaceIdx %d",
__FUNCTION__, mCameraIdStr.string(), streamId, surfaceIdx);
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request targets Surface has invalid surface index");
}
// 通过gbp找到相应的streamSurfaceId,放入到局部变量surfaceMap中
res = insertGbpLocked(gbps[surfaceIdx], &surfaceMap, &outputStreamIds, nullptr);
if (!res.isOk()) {
return res;
}
String8 requestedPhysicalId(
mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
requestedPhysicalIds.push_back(requestedPhysicalId.string());
}
}
CameraDeviceBase::PhysicalCameraSettingsList physicalSettingsList;
for (const auto& it : request.mPhysicalCameraSettings) {
if (it.settings.isEmpty()) {
ALOGE("%s: Camera %s: Sent empty metadata packet. Rejecting request.",
__FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request settings are empty");
}
String8 physicalId(it.id.c_str());
if (physicalId != mDevice->getId()) {
auto found = std::find(requestedPhysicalIds.begin(), requestedPhysicalIds.end(),
it.id);
if (found == requestedPhysicalIds.end()) {
ALOGE("%s: Camera %s: Physical camera id: %s not part of attached outputs.",
__FUNCTION__, mCameraIdStr.string(), physicalId.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Invalid physical camera id");
}
if (!mSupportedPhysicalRequestKeys.empty()) {
// Filter out any unsupported physical request keys.
CameraMetadata filteredParams(mSupportedPhysicalRequestKeys.size());
camera_metadata_t *meta = const_cast<camera_metadata_t *>(
filteredParams.getAndLock());
set_camera_metadata_vendor_id(meta, mDevice->getVendorTagId());
filteredParams.unlock(meta);
for (const auto& keyIt : mSupportedPhysicalRequestKeys) {
camera_metadata_ro_entry entry = it.settings.find(keyIt);
if (entry.count > 0) {
filteredParams.update(entry);
}
}
physicalSettingsList.push_back({it.id, filteredParams});
}
} else {
physicalSettingsList.push_back({it.id, it.settings});
}
}
if (!enforceRequestPermissions(physicalSettingsList.begin()->metadata)) {
// Callee logs
return STATUS_ERROR(CameraService::ERROR_PERMISSION_DENIED,
"Caller does not have permission to change restricted controls");
}
physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS,
&outputStreamIds[0], outputStreamIds.size());
if (request.mIsReprocess) {
physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_INPUT_STREAMS,
&mInputStream.id, 1);
}
physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_ID,
&(submitInfo->mRequestId), /*size*/1);
loopCounter++; // loopCounter starts from 1
ALOGV("%s: Camera %s: Creating request with ID %d (%d of %zu)",
__FUNCTION__, mCameraIdStr.string(), submitInfo->mRequestId,
loopCounter, requests.size());
metadataRequestList.push_back(physicalSettingsList);
// 将surfaceMapList放到一个更大的容器里保存,此时surfaceMapList.size为1
surfaceMapList.push_back(surfaceMap);
}
mRequestIdCounter++;
// streaming为true走预览流程,为false走拍照流程
if (streaming) {
err = mDevice->setStreamingRequestList(metadataRequestList, surfaceMapList,
&(submitInfo->mLastFrameNumber));
if (err != OK) {
String8 msg = String8::format(
"Camera %s: Got error %s (%d) after trying to set streaming request",
mCameraIdStr.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
msg.string());
} else {
Mutex::Autolock idLock(mStreamingRequestIdLock);
mStreamingRequestId = submitInfo->mRequestId;
}
} else {
err = mDevice->captureList(metadataRequestList, surfaceMapList,
&(submitInfo->mLastFrameNumber));
if (err != OK) {
String8 msg = String8::format(
"Camera %s: Got error %s (%d) after trying to submit capture request",
mCameraIdStr.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
msg.string());
}
ALOGV("%s: requestId = %d ", __FUNCTION__, submitInfo->mRequestId);
}
ALOGV("%s: Camera %s: End of function", __FUNCTION__, mCameraIdStr.string());
return res;
}
根据注释,在Parcel中创建surface的话会额外增加一次进程间通信,因此这里会设置request.mSurfaceList.size() = 0,然后从全局变量mConfiguredOutputs中找到相应的gbp,在根据gbp从mStreamMap中找到相应的streamSurfaceId(streamid和surfaceid键值对)。
在配置流的时候,会将app传入的surfaces放入到OutputConfigurations中进行管理(放入的好处就是后续能通过gbp重新create出surface),然后在执行CameraDeviceClient::createStream创建流时,会用mStreamMap数组将上述surfaceid,gbp,streamid进行绑定,用mConfiguredOutputs数组保存当次创建流时传入的OutputConfigurations对象和对应的streamId键值对,因此在request的时候就无需在parcel中重新创建surface,就可以获取到已经配置好的streamId,节省了通信时间。
接下来看看Camera3Device::setStreamingRequest的函数实现:
status_t Camera3Device::setStreamingRequestList(
const List<const PhysicalCameraSettingsList> &requestsList,
const std::list<const SurfaceMap> &surfaceMaps, int64_t *lastFrameNumber) {
ATRACE_CALL();
return submitRequestsHelper(requestsList, surfaceMaps, /*repeating*/true, lastFrameNumber);
}
status_t Camera3Device::submitRequestsHelper(
const List<const PhysicalCameraSettingsList> &requests,
const std::list<const SurfaceMap> &surfaceMaps,
bool repeating,
/*out*/
int64_t *lastFrameNumber) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
status_t res = checkStatusOkToCaptureLocked();
if (res != OK) {
// error logged by previous call
return res;
}
RequestList requestList;
// 创建request请求
res = convertMetadataListToRequestListLocked(requests, surfaceMaps,
repeating, /*out*/&requestList);
if (res != OK) {
// error logged by previous call
return res;
}
ALOGE("WUKEKE %s SurfaceMaps.size = %d, requestList.size = %d", __FUNCTION__, surfaceMaps.size(), requestList.size());
if (repeating) {
// 通过thread线程发送request请求
res = mRequestThread->setRepeatingRequests(requestList, lastFrameNumber);
} else {
res = mRequestThread->queueRequestList(requestList, lastFrameNumber);
}
if (res == OK) {
waitUntilStateThenRelock(/*active*/true, kActiveTimeout);
if (res != OK) {
SET_ERR_L("Can't transition to active in %f seconds!",
kActiveTimeout/1e9);
}
ALOGV("Camera %s: Capture request %" PRId32 " enqueued", mId.string(),
(*(requestList.begin()))->mResultExtras.requestId);
} else {
CLOGE("Cannot queue request. Impossible.");
return BAD_VALUE;
}
return res;
}
这里执行了两个重要步骤:
① 创建request请求:convertMetadataListToRequestListLocked();
② 发送request请求:setRepeatingRequests。
先看Camera3Device::convertMetadataListToRequestListLocked的具体实现:
status_t Camera3Device::convertMetadataListToRequestListLocked(
const List<const PhysicalCameraSettingsList> &metadataList,
const std::list<const SurfaceMap> &surfaceMaps,
bool repeating,
RequestList *requestList) {
if (requestList == NULL) {
CLOGE("requestList cannot be NULL.");
return BAD_VALUE;
}
int32_t burstId = 0;
List<const PhysicalCameraSettingsList>::const_iterator metadataIt = metadataList.begin();
// surfaceMaps的size为1,这里只执行一次
std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
++metadataIt, ++surfaceMapIt) {
// 通过setUpRequestLocked来创建request
sp<CaptureRequest> newRequest = setUpRequestLocked(*metadataIt, *surfaceMapIt);
if (newRequest == 0) {
CLOGE("Can't create capture request");
return BAD_VALUE;
}
newRequest->mRepeating = repeating;
// Setup burst Id and request Id
newRequest->mResultExtras.burstId = burstId++;
if (metadataIt->begin()->metadata.exists(ANDROID_REQUEST_ID)) {
if (metadataIt->begin()->metadata.find(ANDROID_REQUEST_ID).count == 0) {
CLOGE("RequestID entry exists; but must not be empty in metadata");
return BAD_VALUE;
}
newRequest->mResultExtras.requestId = metadataIt->begin()->metadata.find(
ANDROID_REQUEST_ID).data.i32[0];
} else {
CLOGE("RequestID does not exist in metadata");
return BAD_VALUE;
}
// 这里的requestList的size也就为1了
requestList->push_back(newRequest);
ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
}
if (metadataIt != metadataList.end() || surfaceMapIt != surfaceMaps.end()) {
ALOGE("%s: metadataList and surfaceMaps are not the same size!", __FUNCTION__);
return BAD_VALUE;
}
// Setup batch size if this is a high speed video recording request.
if (mIsConstrainedHighSpeedConfiguration && requestList->size() > 0) {
auto firstRequest = requestList->begin();
for (auto& outputStream : (*firstRequest)->mOutputStreams) {
if (outputStream->isVideoStream()) {
(*firstRequest)->mBatchSize = requestList->size();
break;
}
}
}
return OK;
}
再来看看Camera3Device::setUpRequestLocked的实现:
sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
const PhysicalCameraSettingsList &request, const SurfaceMap &surfaceMap) {
status_t res;
if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
// This point should only be reached via API1 (API2 must explicitly call configureStreams)
// so unilaterally select normal operating mode.
res = filterParamsAndConfigureLocked(request.begin()->metadata,
CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE);
// Stream configuration failed. Client might try other configuraitons.
if (res != OK) {
CLOGE("Can't set up streams: %s (%d)", strerror(-res), res);
return NULL;
} else if (mStatus == STATUS_UNCONFIGURED) {
// Stream configuration successfully configure to empty stream configuration.
CLOGE("No streams configured");
return NULL;
}
}
sp<CaptureRequest> newRequest = createCaptureRequest(request, surfaceMap);
return newRequest;
}
最后是在Camera3Device::createCaptureRequest中创建的request,看其具体实现:
sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
const PhysicalCameraSettingsList &request, const SurfaceMap &surfaceMap) {
ATRACE_CALL();
status_t res;
// 创建一个CaptureRequest对象,下面就是对其进行填充
sp<CaptureRequest> newRequest = new CaptureRequest;
newRequest->mSettingsList = request;
camera_metadata_entry_t inputStreams =
newRequest->mSettingsList.begin()->metadata.find(ANDROID_REQUEST_INPUT_STREAMS);
// tv中没有配置inputStream
if (inputStreams.count > 0) {
if (mInputStream == NULL ||
mInputStream->getId() != inputStreams.data.i32[0]) {
CLOGE("Request references unknown input stream %d",
inputStreams.data.u8[0]);
return NULL;
}
// Lazy completion of stream configuration (allocation/registration)
// on first use
if (mInputStream->isConfiguring()) {
res = mInputStream->finishConfiguration();
if (res != OK) {
SET_ERR_L("Unable to finish configuring input stream %d:"
" %s (%d)",
mInputStream->getId(), strerror(-res), res);
return NULL;
}
}
// Check if stream is being prepared
if (mInputStream->isPreparing()) {
CLOGE("Request references an input stream that's being prepared!");
return NULL;
}
newRequest->mInputStream = mInputStream;
newRequest->mSettingsList.begin()->metadata.erase(ANDROID_REQUEST_INPUT_STREAMS);
}
camera_metadata_entry_t streams =
newRequest->mSettingsList.begin()->metadata.find(ANDROID_REQUEST_OUTPUT_STREAMS);
if (streams.count == 0) {
CLOGE("Zero output streams specified!");
return NULL;
}
for (size_t i = 0; i < streams.count; i++) {
int idx = mOutputStreams.indexOfKey(streams.data.i32[i]);
if (idx == NAME_NOT_FOUND) {
CLOGE("Request references unknown stream %d",
streams.data.u8[i]);
return NULL;
}
// creatStream会给每个surface new一个Camera3outputStream对象,放进mOutputStreams中
sp<Camera3OutputStreamInterface> stream =
mOutputStreams.editValueAt(idx);
// It is illegal to include a deferred consumer output stream into a request
auto iter = surfaceMap.find(streams.data.i32[i]);
if (iter != surfaceMap.end()) {
const std::vector<size_t>& surfaces = iter->second;
for (const auto& surface : surfaces) {
if (stream->isConsumerConfigurationDeferred(surface)) {
CLOGE("Stream %d surface %zu hasn't finished configuration yet "
"due to deferred consumer", stream->getId(), surface);
return NULL;
}
}
// 将surface填充进newRequest->mOutputSurfaces中
newRequest->mOutputSurfaces[i] = surfaces;
}
// Lazy completion of stream configuration (allocation/registration)
// on first use
if (stream->isConfiguring()) {
res = stream->finishConfiguration();
if (res != OK) {
SET_ERR_L("Unable to finish configuring stream %d: %s (%d)",
stream->getId(), strerror(-res), res);
return NULL;
}
}
// Check if stream is being prepared
if (stream->isPreparing()) {
CLOGE("Request references an output stream that's being prepared!");
return NULL;
}
// 将mOutputStreams中的成员放入到newRequest->mOutputStreams
newRequest->mOutputStreams.push(stream);
}
newRequest->mSettingsList.begin()->metadata.erase(ANDROID_REQUEST_OUTPUT_STREAMS);
newRequest->mBatchSize = 1;
return newRequest;
}
上面就创建出了一个request,并将surface和outputStream填充进去,看下CaptureRequest结构:
class CaptureRequest : public LightRefBase<CaptureRequest> {
public:
PhysicalCameraSettingsList mSettingsList;
sp<camera3::Camera3Stream> mInputStream;
camera3_stream_buffer_t mInputBuffer;
Vector<sp<camera3::Camera3OutputStreamInterface> >
mOutputStreams;
SurfaceMap mOutputSurfaces;
CaptureResultExtras mResultExtras;
// The number of requests that should be submitted to HAL at a time.
// For example, if batch size is 8, this request and the following 7
// requests will be submitted to HAL at a time. The batch size for
// the following 7 requests will be ignored by the request thread.
int mBatchSize;
// Whether this request is from a repeating or repeating burst.
bool mRepeating;
};
创建好了request后,执行Camera3Device::RequestThread::setRepeatingRequests来发送request请求,看下相应实现:
status_t Camera3Device::RequestThread::setRepeatingRequests(
const RequestList &requests,
/*out*/
int64_t *lastFrameNumber) {
ATRACE_CALL();
Mutex::Autolock l(mRequestLock);
if (lastFrameNumber != NULL) {
*lastFrameNumber = mRepeatingLastFrameNumber;
}
mRepeatingRequests.clear();
mRepeatingRequests.insert(mRepeatingRequests.begin(),
requests.begin(), requests.end());
ALOGE("WUKEKE %s mRepeatingRequests.size = %d, requests.size = %d", __FUNCTION__, mRepeatingRequests.size(), requests.size());
unpauseForNewRequests();
mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
return OK;
}
这里insert一个request到mRepeatingRequests中;
mRepeatingRequests在insert一个 request 后,thread 线程会一直去拿该request(因为mRepeatingRequests中只insert了一个request,每次取mRepeatingRequests.begin()即可),拿完后不会remove掉,这样thread就会一直发该request请求;
如果需要改变request的metadata,那么app需要重新call一次setRepeatingRequests;
此函数每一次都会调用 mRepeatingRequests.clear() 清除之前的request。
这里总结一下:configure的时候为每个surface创建相应的Camera3OutputStream,然后将相应的信息保存起来(注意CameraDeviceClient中的mStreamMap、mConfiguredOutputs以及Camera3Device中的mOutputStreams等),然后告诉hal层需要配置什么格式、分辨率等的流。request中会将上述的surfaces和outputStreams填充进request请求中,然后在Camera3Device中起一个thread,一直向hal层发送request请求。
接下来分析Camera3Device::RequestThread是如何向hal层发送request请求的。Camera3Device::RequestThread是继承于thread类的,在openCamera的时候,会执行Camera3Device::initializeCommonLocked将该线程run起来:
Camera3Device::initializeCommonLocked()
......
/** Start up request queue thread */
mRequestThread = new RequestThread(this, mStatusTracker, mInterface, sessionParamKeys);
res = mRequestThread->run(String8::format("C3Dev-%s-ReqQueue", mId.string()).string());
if (res != OK) {
SET_ERR_L("Unable to start request queue thread: %s (%d)",
strerror(-res), res);
mInterface->close();
mRequestThread.clear();
return res;
}
}
run起来以后,就会一直循环执行其中的threadLoop函数:
bool Camera3Device::RequestThread::threadLoop() {
ATRACE_CALL();
status_t res;
// Handle paused state.
if (waitIfPaused()) {
return true;
}
// ①等待是否有request请求发送过来
waitForNextRequestBatch();
if (mNextRequests.size() == 0) {
return true;
}
// Get the latest request ID, if any
int latestRequestId;
camera_metadata_entry_t requestIdEntry = mNextRequests[mNextRequests.size() - 1].
captureRequest->mSettingsList.begin()->metadata.find(ANDROID_REQUEST_ID);
if (requestIdEntry.count > 0) {
latestRequestId = requestIdEntry.data.i32[0];
} else {
ALOGW("%s: Did not have android.request.id set in the request.", __FUNCTION__);
latestRequestId = NAME_NOT_FOUND;
}
// 'mNextRequests' will at this point contain either a set of HFR batched requests
// or a single request from streaming or burst. In either case the first element
// should contain the latest camera settings that we need to check for any session
// parameter updates.
if (updateSessionParameters(mNextRequests[0].captureRequest->mSettingsList.begin()->metadata)) {
res = OK;
//Input stream buffers are already acquired at this point so an input stream
//will not be able to move to idle state unless we force it.
if (mNextRequests[0].captureRequest->mInputStream != nullptr) {
res = mNextRequests[0].captureRequest->mInputStream->forceToIdle();
if (res != OK) {
ALOGE("%s: Failed to force idle input stream: %d", __FUNCTION__, res);
cleanUpFailedRequests(/*sendRequestError*/ false);
return false;
}
}
if (res == OK) {
sp<StatusTracker> statusTracker = mStatusTracker.promote();
if (statusTracker != 0) {
sp<Camera3Device> parent = mParent.promote();
if (parent != nullptr) {
parent->pauseStateNotify(true);
}
statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
if (parent != nullptr) {
mReconfigured |= parent->reconfigureCamera(mLatestSessionParams);
}
statusTracker->markComponentActive(mStatusId);
setPaused(false);
}
if (mNextRequests[0].captureRequest->mInputStream != nullptr) {
mNextRequests[0].captureRequest->mInputStream->restoreConfiguredState();
if (res != OK) {
ALOGE("%s: Failed to restore configured input stream: %d", __FUNCTION__, res);
cleanUpFailedRequests(/*sendRequestError*/ false);
return false;
}
}
}
}
// ②填充向hal层发送的request请求,即halRequest
res = prepareHalRequests();
if (res == TIMED_OUT) {
// Not a fatal error if getting output buffers time out.
cleanUpFailedRequests(/*sendRequestError*/ true);
// Check if any stream is abandoned.
checkAndStopRepeatingRequest();
return true;
} else if (res != OK) {
cleanUpFailedRequests(/*sendRequestError*/ false);
return false;
}
// Inform waitUntilRequestProcessed thread of a new request ID
{
Mutex::Autolock al(mLatestRequestMutex);
mLatestRequestId = latestRequestId;
mLatestRequestSignal.signal();
}
// Submit a batch of requests to HAL.
// Use flush lock only when submitting multilple requests in a batch.
// TODO: The problem with flush lock is flush() will be blocked by process_capture_request()
// which may take a long time to finish so synchronizing flush() and
// process_capture_request() defeats the purpose of cancelling requests ASAP with flush().
// For now, only synchronize for high speed recording and we should figure something out for
// removing the synchronization.
bool useFlushLock = mNextRequests.size() > 1;
if (useFlushLock) {
mFlushLock.lock();
}
ALOGVV("%s: %d: submitting %zu requests in a batch.", __FUNCTION__, __LINE__,
mNextRequests.size());
bool submitRequestSuccess = false;
nsecs_t tRequestStart = systemTime(SYSTEM_TIME_MONOTONIC);
if (mInterface->supportBatchRequest()) {
// ③tv中是走这条路线,不过这两个函数最终都是通过
// Camera3Device::HalInterface::processBatchCaptureRequests向hal层发送request请求
submitRequestSuccess = sendRequestsBatch();
} else {
submitRequestSuccess = sendRequestsOneByOne();
}
nsecs_t tRequestEnd = systemTime(SYSTEM_TIME_MONOTONIC);
mRequestLatency.add(tRequestStart, tRequestEnd);
if (useFlushLock) {
mFlushLock.unlock();
}
// Unset as current request
{
Mutex::Autolock l(mRequestLock);
mNextRequests.clear();
}
return submitRequestSuccess;
}
这里只关注上面标注的三个函数:
① Camera3Device::RequestThread::waitForNextRequestBatch;
② Camera3Device::RequestThread::prepareHalRequests;
③ Camera3Device::HalInterface::processBatchCaptureRequests;
首先看下waitForNextRequestBatch具体实现:
void Camera3Device::RequestThread::waitForNextRequestBatch() {
ATRACE_CALL();
// Optimized a bit for the simple steady-state case (single repeating
// request), to avoid putting that request in the queue temporarily.
Mutex::Autolock l(mRequestLock);
assert(mNextRequests.empty());
NextRequest nextRequest;
nextRequest.captureRequest = waitForNextRequestLocked();
if (nextRequest.captureRequest == nullptr) {
return;
}
// 创建camera3_capture_request_t()空对象,这个属性会在prepareHalRequest中重点封装
nextRequest.halRequest = camera3_capture_request_t();
nextRequest.submitted = false;
// 添加进全局变量mNextRequests中
mNextRequests.add(nextRequest);
// Wait for additional requests
const size_t batchSize = nextRequest.captureRequest->mBatchSize;
// tv中,batchSize大小为1,因此这里不执行
for (size_t i = 1; i < batchSize; i++) {
NextRequest additionalRequest;
additionalRequest.captureRequest = waitForNextRequestLocked();
if (additionalRequest.captureRequest == nullptr) {
break;
}
additionalRequest.halRequest = camera3_capture_request_t();
additionalRequest.submitted = false;
mNextRequests.add(additionalRequest);
}
if (mNextRequests.size() < batchSize) {
ALOGE("RequestThread: only get %zu out of %zu requests. Skipping requests.",
mNextRequests.size(), batchSize);
cleanUpFailedRequests(/*sendRequestError*/true);
}
return;
}
这里创建了一个NextRequest对象,然后进行填充,再push进全局变量mNextRequests中。看下NextRequests该结构体有如下四个成员变量:
struct NextRequest {
sp<CaptureRequest> captureRequest;
camera3_capture_request_t halRequest;
Vector<camera3_stream_buffer_t> outputBuffers;
bool submitted;
};
通过waitForNextRequestLocked来获取CaptureRequest对象,看其具体实现:
sp<Camera3Device::CaptureRequest>
Camera3Device::RequestThread::waitForNextRequestLocked() {
status_t res;
sp<CaptureRequest> nextRequest;
while (mRequestQueue.empty()) {
if (!mRepeatingRequests.empty()) {
// 如果app发送了request请求,mRepeatingRequests就不为空,取其唯一成员
const RequestList &requests = mRepeatingRequests;
RequestList::const_iterator firstRequest =
requests.begin();
nextRequest = *firstRequest;
mRequestQueue.insert(mRequestQueue.end(),
++firstRequest,
requests.end());
// No need to wait any longer
// insert后,mRequestQueue.size() == 0, requests.size() == 1
mRepeatingLastFrameNumber = mFrameNumber + requests.size() - 1;
// 结束while循环,不必再阻塞等待了
break;
}
// app层如果没有setRepeatingRequest,mRepeatingRequests就一直为空,这里会阻塞等待50ms
res = mRequestSignal.waitRelative(mRequestLock, kRequestTimeout);
if ((mRequestQueue.empty() && mRepeatingRequests.empty()) ||
exitPending()) {
Mutex::Autolock pl(mPauseLock);
if (mPaused == false) {
ALOGV("%s: RequestThread: Going idle", __FUNCTION__);
mPaused = true;
// Let the tracker know
sp<StatusTracker> statusTracker = mStatusTracker.promote();
if (statusTracker != 0) {
statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
}
}
// Stop waiting for now and let thread management happen
return NULL;
}
}
if (nextRequest == NULL) {
// Don't have a repeating request already in hand, so queue
// must have an entry now.
RequestList::iterator firstRequest =
mRequestQueue.begin();
nextRequest = *firstRequest;
mRequestQueue.erase(firstRequest);
if (mRequestQueue.empty() && !nextRequest->mRepeating) {
sp<NotificationListener> listener = mListener.promote();
if (listener != NULL) {
listener->notifyRequestQueueEmpty();
}
}
}
// In case we've been unpaused by setPaused clearing mDoPause, need to
// update internal pause state (capture/setRepeatingRequest unpause
// directly).
Mutex::Autolock pl(mPauseLock);
if (mPaused) {
ALOGV("%s: RequestThread: Unpaused", __FUNCTION__);
sp<StatusTracker> statusTracker = mStatusTracker.promote();
if (statusTracker != 0) {
statusTracker->markComponentActive(mStatusId);
}
}
mPaused = false;
// Check if we've reconfigured since last time, and reset the preview
// request if so. Can't use 'NULL request == repeat' across configure calls.
if (mReconfigured) {
mPrevRequest.clear();
mReconfigured = false;
}
// 如果获取到nextRequest,继续封装其属性,mFrameNumber是递增状态,每次的frame是不一样的
if (nextRequest != NULL) {
nextRequest->mResultExtras.frameNumber = mFrameNumber++;
nextRequest->mResultExtras.afTriggerId = mCurrentAfTriggerId;
nextRequest->mResultExtras.precaptureTriggerId = mCurrentPreCaptureTriggerId;
// Since RequestThread::clear() removes buffers from the input stream,
// get the right buffer here before unlocking mRequestLock
if (nextRequest->mInputStream != NULL) {
res = nextRequest->mInputStream->getInputBuffer(&nextRequest->mInputBuffer);
if (res != OK) {
// Can't get input buffer from gralloc queue - this could be due to
// disconnected queue or other producer misbehavior, so not a fatal
// error
ALOGE("%s: Can't get input buffer, skipping request:"
" %s (%d)", __FUNCTION__, strerror(-res), res);
sp<NotificationListener> listener = mListener.promote();
if (listener != NULL) {
listener->notifyError(
hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
nextRequest->mResultExtras);
}
return NULL;
}
}
}
return nextRequest;
}
这个mRequestQueue只有在拍照的流程中会向其添加成员,在预览的时候,会一直保持为empty状态。这里也可以看出拍照的优先级是大于预览的,当次循环在执行该函数的时候,就会先去判断mRequestQueue是否为空,如果为空就表示没有拍照请求发送过来,只要处理好预览请求就好了。mRepeatingRequests中存放的CaptureRequest为setRepeatingRequests之前创建好的,赋值给nextRequest.captureRequest。所以,waitForNextRequestBatch就是在阻塞创建NextRequest对象,如果成功创建该对象,就会进入到第二阶段prepareHalRequests。
看下prepareHalRequests的具体实现:
status_t Camera3Device::RequestThread::prepareHalRequests() {
ATRACE_CALL();
for (size_t i = 0; i < mNextRequests.size(); i++) {
auto& nextRequest = mNextRequests.editItemAt(i);
sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
camera3_capture_request_t* halRequest = &nextRequest.halRequest;
Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
// Prepare a request to HAL
halRequest->frame_number = captureRequest->mResultExtras.frameNumber;
// Insert any queued triggers (before metadata is locked)
status_t res = insertTriggers(captureRequest);
if (res < 0) {
SET_ERR("RequestThread: Unable to insert triggers "
"(capture request %d, HAL device: %s (%d)",
halRequest->frame_number, strerror(-res), res);
return INVALID_OPERATION;
}
int triggerCount = res;
bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
mPrevTriggers = triggerCount;
// If the request is the same as last, or we had triggers last time
bool newRequest = mPrevRequest != captureRequest || triggersMixedIn;
if (newRequest) {
/**
* HAL workaround:
* Insert a dummy trigger ID if a trigger is set but no trigger ID is
*/
res = addDummyTriggerIds(captureRequest);
if (res != OK) {
SET_ERR("RequestThread: Unable to insert dummy trigger IDs "
"(capture request %d, HAL device: %s (%d)",
halRequest->frame_number, strerror(-res), res);
return INVALID_OPERATION;
}
{
// Correct metadata regions for distortion correction if enabled
sp<Camera3Device> parent = mParent.promote();
if (parent != nullptr) {
res = parent->mDistortionMapper.correctCaptureRequest(
&(captureRequest->mSettingsList.begin()->metadata));
if (res != OK) {
SET_ERR("RequestThread: Unable to correct capture requests "
"for lens distortion for request %d: %s (%d)",
halRequest->frame_number, strerror(-res), res);
return INVALID_OPERATION;
}
}
}
/**
* The request should be presorted so accesses in HAL
* are O(logn). Sidenote, sorting a sorted metadata is nop.
*/
captureRequest->mSettingsList.begin()->metadata.sort();
halRequest->settings = captureRequest->mSettingsList.begin()->metadata.getAndLock();
mPrevRequest = captureRequest;
ALOGVV("%s: Request settings are NEW", __FUNCTION__);
IF_ALOGV() {
camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
find_camera_metadata_ro_entry(
halRequest->settings,
ANDROID_CONTROL_AF_TRIGGER,
&e
);
if (e.count > 0) {
ALOGV("%s: Request (frame num %d) had AF trigger 0x%x",
__FUNCTION__,
halRequest->frame_number,
e.data.u8[0]);
}
}
} else {
// leave request.settings NULL to indicate 'reuse latest given'
ALOGVV("%s: Request settings are REUSED",
__FUNCTION__);
}
if (captureRequest->mSettingsList.size() > 1) {
halRequest->num_physcam_settings = captureRequest->mSettingsList.size() - 1;
halRequest->physcam_id = new const char* [halRequest->num_physcam_settings];
if (newRequest) {
halRequest->physcam_settings =
new const camera_metadata* [halRequest->num_physcam_settings];
} else {
halRequest->physcam_settings = nullptr;
}
auto it = ++captureRequest->mSettingsList.begin();
size_t i = 0;
for (; it != captureRequest->mSettingsList.end(); it++, i++) {
halRequest->physcam_id[i] = it->cameraId.c_str();
if (newRequest) {
it->metadata.sort();
halRequest->physcam_settings[i] = it->metadata.getAndLock();
}
}
}
uint32_t totalNumBuffers = 0;
// Fill in buffers
if (captureRequest->mInputStream != NULL) {
halRequest->input_buffer = &captureRequest->mInputBuffer;
totalNumBuffers += 1;
} else {
halRequest->input_buffer = NULL;
}
// 创建captureRequest->mOutputStreams.size个camera3_stream_buffer_t空对象,
// 放入到nextRequest.outputBuffers中
outputBuffers->insertAt(camera3_stream_buffer_t(), 0,
captureRequest->mOutputStreams.size());
// 将outputBuffers数组的地址赋值给halRequest->output_buffers
// 接下来去给outputBuffers赋值
halRequest->output_buffers = outputBuffers->array();
std::set<String8> requestedPhysicalCameras;
for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
// captureRequest->mOutputStreams是在配置流(creatStream函数)的时候创建的
sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(j);
// Prepare video buffers for high speed recording on the first video request.
if (mPrepareVideoStream && outputStream->isVideoStream()) {
// Only try to prepare video stream on the first video request.
mPrepareVideoStream = false;
res = outputStream->startPrepare(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX);
while (res == NOT_ENOUGH_DATA) {
res = outputStream->prepareNextBuffer();
}
if (res != OK) {
ALOGW("%s: Preparing video buffers for high speed failed: %s (%d)",
__FUNCTION__, strerror(-res), res);
outputStream->cancelPrepare();
}
}
// 向surface申请GraphicBuffer(也就是一段图像缓冲区,surface需要GraphicBuffer来绘制ui界面)
// 及其对应的FenceFD,将结果赋值给outputStream,即halRequest->output_buffers
res = outputStream->getBuffer(&outputBuffers->editItemAt(j),
captureRequest->mOutputSurfaces[j]);
if (res != OK) {
// Can't get output buffer from gralloc queue - this could be due to
// abandoned queue or other consumer misbehavior, so not a fatal
// error
ALOGE("RequestThread: Can't get output buffer, skipping request:"
" %s (%d)", strerror(-res), res);
return TIMED_OUT;
}
String8 physicalCameraId = outputStream->getPhysicalCameraId();
if (!physicalCameraId.isEmpty()) {
// Physical stream isn't supported for input request.
if (halRequest->input_buffer) {
CLOGE("Physical stream is not supported for input request");
return INVALID_OPERATION;
}
requestedPhysicalCameras.insert(physicalCameraId);
}
halRequest->num_output_buffers++;
}
totalNumBuffers += halRequest->num_output_buffers;
// Log request in the in-flight queue
sp<Camera3Device> parent = mParent.promote();
if (parent == NULL) {
// Should not happen, and nowhere to send errors to, so just log it
CLOGE("RequestThread: Parent is gone");
return INVALID_OPERATION;
}
// If this request list is for constrained high speed recording (not
// preview), and the current request is not the last one in the batch,
// do not send callback to the app.
bool hasCallback = true;
if (mNextRequests[0].captureRequest->mBatchSize > 1 && i != mNextRequests.size()-1) {
hasCallback = false;
}
res = parent->registerInFlight(halRequest->frame_number,
totalNumBuffers, captureRequest->mResultExtras,
/*hasInput*/halRequest->input_buffer != NULL,
hasCallback,
calculateMaxExpectedDuration(halRequest->settings),
requestedPhysicalCameras);
ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
", burstId = %" PRId32 ".",
__FUNCTION__,
captureRequest->mResultExtras.requestId, captureRequest->mResultExtras.frameNumber,
captureRequest->mResultExtras.burstId);
if (res != OK) {
SET_ERR("RequestThread: Unable to register new in-flight request:"
" %s (%d)", strerror(-res), res);
return INVALID_OPERATION;
}
}
return OK;
}
该函数就是在填充nextRequest.halRequest成员。其中一个重要的赋值就是申请GraphicBuffer,该动作是在getBuffer中完成的。
ps:这里先看下继承关系:
Camera3OuputStream继承于Camera3IOStreamBase、Camera3OuputStreamInterface;
Camera3IOStreamBase继承于Camera3Stream。
当有基类指针指向子类对象的时候,如果没有重写相应的函数,就直接去父类中查找。
这里GraphicBuffer的申请在Camera3Stream::getBuffer中,我们看下其具体实现:
status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer,
const std::vector<size_t>& surface_ids) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
status_t res = OK;
// This function should be only called when the stream is configured already.
if (mState != STATE_CONFIGURED) {
ALOGE("%s: Stream %d: Can't get buffers if stream is not in CONFIGURED state %d",
__FUNCTION__, mId, mState);
return INVALID_OPERATION;
}
// Wait for new buffer returned back if we are running into the limit.
// 判断mHandoutOutputBufferCount的值是否达到了限制,如果达到限制就会阻塞等待
if (getHandoutOutputBufferCountLocked() == camera3_stream::max_buffers) {
ALOGV("%s: Already dequeued max output buffers (%d), wait for next returned one.",
__FUNCTION__, camera3_stream::max_buffers);
nsecs_t waitStart = systemTime(SYSTEM_TIME_MONOTONIC);
res = mOutputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
nsecs_t waitEnd = systemTime(SYSTEM_TIME_MONOTONIC);
mBufferLimitLatency.add(waitStart, waitEnd);
if (res != OK) {
if (res == TIMED_OUT) {
ALOGE("%s: wait for output buffer return timed out after %lldms (max_buffers %d)",
__FUNCTION__, kWaitForBufferDuration / 1000000LL,
camera3_stream::max_buffers);
}
return res;
}
}
// 申请GraphicBuffer
res = getBufferLocked(buffer, surface_ids);
if (res == OK) {
fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
if (buffer->buffer) {
Mutex::Autolock l(mOutstandingBuffersLock);
mOutstandingBuffers.push_back(*buffer->buffer);
}
}
return res;
}
向hal层发送request请求之前,需要向surface申请buffer,不然从hal层获取的图像数据就无处安放了。每成功向hal层发送一次request,就会将mHandoutOutputBufferCount的值+1,如果该值等于camera3_stream::max_buffers(tv上该值为4,也就是最多只能发送4个request请求),就会阻塞等待3000ms,如果此期间没有激活信号发来,就会返回,不会再下发request请求,并且抛出异常,报错打印如下:
Camera3Stream: wait for output buffer return timed out after 3000ms (max_buffers 4)
Camera3Device: RequestThread: Can't get output buffer, skipping request:***
申请GraphicBuffer是在Camera3OutputStream::getBufferLocked中进行的,看其具体实现:
status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer,
const std::vector<size_t>&) {
ATRACE_CALL();
ANativeWindowBuffer* anb;
int fenceFd = -1;
status_t res;
// 申请GraphicBuffer及对应的fenceFd
res = getBufferLockedCommon(&anb, &fenceFd);
if (res != OK) {
return res;
}
/**
* FenceFD now owned by HAL except in case of error,
* in which case we reassign it to acquire_fence
*/
// 将申请的GraphicBuffer及Fence赋值给halRequest->output_buffers
handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
/*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);
return OK;
}
先看下Camera3OutputStream::getBufferLockedCommon中如何获取GraphicBuffer:
status_t Camera3OutputStream::getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd) {
ATRACE_CALL();
status_t res;
if ((res = getBufferPreconditionCheckLocked()) != OK) {
return res;
}
bool gotBufferFromManager = false;
if (mUseBufferManager) {
sp<GraphicBuffer> gb;
res = mBufferManager->getBufferForStream(getId(), getStreamSetId(), &gb, fenceFd);
if (res == OK) {
// Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
// successful return.
*anb = gb.get();
res = mConsumer->attachBuffer(*anb);
if (res != OK) {
ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
gotBufferFromManager = true;
ALOGV("Stream %d: Attached new buffer", getId());
} else if (res == ALREADY_EXISTS) {
// Have sufficient free buffers already attached, can just
// dequeue from buffer queue
ALOGV("Stream %d: Reusing attached buffer", getId());
gotBufferFromManager = false;
} else if (res != OK) {
ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
}
if (!gotBufferFromManager) {
/**
* Release the lock briefly to avoid deadlock for below scenario:
* Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
* This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
* Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
* This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
* StreamingProcessor lock.
* Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
* and try to lock bufferQueue lock.
* Then there is circular locking dependency.
*/
// mConsumer是在配置流的时候(也就是new Camera3OutputStream时)传入的surface
sp<ANativeWindow> currentConsumer = mConsumer;
mLock.unlock();
nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
// 前面有分析过surface::dequeueBuffer,也就是先从BufferQueue中获取mSlots下标
// 然后根据下标,将mSlots下标对应的GraphicBuffer对象映射到Surface中。
res = currentConsumer->dequeueBuffer(currentConsumer.get(), anb, fenceFd);
nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
mLock.lock();
if (res != OK) {
ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
// Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is STATE_PREPARING,
// let prepareNextBuffer handle the error.)
if (res == NO_INIT && mState == STATE_CONFIGURED) {
mState = STATE_ABANDONED;
}
return res;
}
}
if (res == OK) {
std::vector<sp<GraphicBuffer>> removedBuffers;
// 获取该surface中需要被释放的GraphicBuffer队列removedBuffers,
//cameraservice在向CameraProvider申请帧数据时,
//会一并将申请到的GraphicBuffer和removedBuffers
//传递给CameraProvide
res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
if (res == OK) {
// 这里会将获得到的removedBuffers传递给Camera3Device::HalInterface::mFreedBuffers
onBuffersRemovedLocked(removedBuffers);
if (mUseBufferManager && removedBuffers.size() > 0) {
mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), removedBuffers.size());
}
}
}
return res;
}
通过上面分析发现,CameraServer会通过Surface::dequeueBuffer来申请GraphicBuffer及相应的fenceFd,这个在之前的博客中有介绍,这里不再赘述。再通过Surface::getAndFlushRemovedBuffer获取Surface中需要被释放的GraphicBuffer列表removedBuffers,分析一下该removedBuffers的流转流程:
void Camera3OutputStream::onBuffersRemovedLocked(
const std::vector<sp<GraphicBuffer>>& removedBuffers) {
sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
if (callback != nullptr) {
for (const auto& gb : removedBuffers) {
// mId为Camera3Stream中的成员,用于和surface对应
callback->onBufferFreed(mId, gb->handle);
}
}
}
这个mBufferFreedListener是父类Camera3Streams中的成员,在执行Camera3Device::HalInterface::configureStreams时会从mOutputStreams成员中获取相应的
Camera3Stream指针,并设置了bufferfreed监听:cam3stream->setBufferFreedListener(this);这里会将Camera3Device::HalInterface(继承于Camera3StreamBufferFreedListener)自身作为参数设置进去:
void Camera3Stream::setBufferFreedListener(
wp<Camera3StreamBufferFreedListener> listener) {
Mutex::Autolock l(mLock);
// Only allow set listener during stream configuration because stream is guaranteed to be IDLE
// at this state, so setBufferFreedListener won't collide with onBufferFreed callbacks
if (mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) {
ALOGE("%s: listener must be set during stream configuration!",__FUNCTION__);
return;
}
mBufferFreedListener = listener;
}
因此最终执行Camera3Device::HalInterface::onBufferFreed来将需要释放的GraphicBuffer队列传到Camera3Device::HalInterface::mFreedBuffers中,具体实现如下:
void Camera3Device::HalInterface::onBufferFreed(
int streamId, const native_handle_t* handle) {
std::lock_guard<std::mutex> lock(mBufferIdMapLock);
uint64_t bufferId = BUFFER_ID_NO_BUFFER;
auto mapIt = mBufferIdMaps.find(streamId);
if (mapIt == mBufferIdMaps.end()) {
// streamId might be from a deleted stream here
ALOGI("%s: stream %d has been removed",
__FUNCTION__, streamId);
return;
}
BufferIdMap& bIdMap = mapIt->second;
auto it = bIdMap.find(handle);
if (it == bIdMap.end()) {
ALOGW("%s: cannot find buffer %p in stream %d",
__FUNCTION__, handle, streamId);
return;
} else {
bufferId = it->second;
bIdMap.erase(it);
ALOGV("%s: stream %d now have %zu buffer caches after removing buf %p",
__FUNCTION__, streamId, bIdMap.size(), handle);
}
mFreedBuffers.push_back(std::make_pair(streamId, bufferId));
}
至此,surface中需要获取的GraphicBuffer和需要释放的GraphicBuffer队列均已获得。
那获取到了的GraphicBuffer是如何赋值给halRequest->output_buffers的,继续看Camera3IOStreamBase::handoutBufferLocked实现:
void Camera3IOStreamBase::handoutBufferLocked(camera3_stream_buffer &buffer,
buffer_handle_t *handle,
int acquireFence,
int releaseFence,
camera3_buffer_status_t status,
bool output) {
/**
* Note that all fences are now owned by HAL.
*/
// Handing out a raw pointer to this object. Increment internal refcount.
incStrong(this);
buffer.stream = this;
buffer.buffer = handle;
buffer.acquire_fence = acquireFence;
buffer.release_fence = releaseFence;
buffer.status = status;
// Inform tracker about becoming busy
if (mHandoutTotalBufferCount == 0 && mState != STATE_IN_CONFIG &&
mState != STATE_IN_RECONFIG && mState != STATE_PREPARING) {
/**
* Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers
* before/after register_stream_buffers during initial configuration
* or re-configuration, or during prepare pre-allocation
*/
sp<StatusTracker> statusTracker = mStatusTracker.promote();
if (statusTracker != 0) {
statusTracker->markComponentActive(mStatusId);
}
}
mHandoutTotalBufferCount++;
if (output) {
// 这个计数决定下一次request能否发送
mHandoutOutputBufferCount++;
}
}
这里就是对传入的halRequest->output_buffers中的属性进行各种填充。
接下来,分析下一阶段Camera3Device::HalInterface::processBatchCaptureRequests,也就是如何将上述准备好的halRequest发送给hal层,分析一下该函数:
status_t Camera3Device::HalInterface::processBatchCaptureRequests(
std::vector<camera3_capture_request_t*>& requests,/*out*/uint32_t* numRequestProcessed) {
ATRACE_NAME("CameraHal::processBatchCaptureRequests");
if (!valid()) return INVALID_OPERATION;
sp<device::V3_4::ICameraDeviceSession> hidlSession_3_4;
auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
if (castResult_3_4.isOk()) {
hidlSession_3_4 = castResult_3_4;
}
hardware::hidl_vec<device::V3_2::CaptureRequest> captureRequests;
hardware::hidl_vec<device::V3_4::CaptureRequest> captureRequests_3_4;
size_t batchSize = requests.size();
if (hidlSession_3_4 != nullptr) {
// 现在一般都是走这里,只有一个request请求,batchSize为1
captureRequests_3_4.resize(batchSize);
} else {
captureRequests.resize(batchSize);
}
std::vector<native_handle_t*> handlesCreated;
// 将request的类型从camera3_capture_request_t转换成device::V3_4::CaptureRequest类型
// 这里会将request成员变量output_buffers的类型从camera3_stream_buffer_t转换成
// StreamBuffer类型
for (size_t i = 0; i < batchSize; i++) {
if (hidlSession_3_4 != nullptr) {
wrapAsHidlRequest(requests[i], /*out*/&captureRequests_3_4[i].v3_2,
/*out*/&handlesCreated);
} else {
wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
}
}
// 将mFreedBuffers成员赋值给cachesToRemove中,再清空mFreedBuffers
std::vector<device::V3_2::BufferCache> cachesToRemove;
{
std::lock_guard<std::mutex> lock(mBufferIdMapLock);
for (auto& pair : mFreedBuffers) {
// The stream might have been removed since onBufferFreed
if (mBufferIdMaps.find(pair.first) != mBufferIdMaps.end()) {
cachesToRemove.push_back({pair.first, pair.second});
}
}
mFreedBuffers.clear();
}
common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
*numRequestProcessed = 0;
// Write metadata to FMQ.
for (size_t i = 0; i < batchSize; i++) {
camera3_capture_request_t* request = requests[i];
device::V3_2::CaptureRequest* captureRequest;
if (hidlSession_3_4 != nullptr) {
captureRequest = &captureRequests_3_4[i].v3_2;
} else {
captureRequest = &captureRequests[i];
}
if (request->settings != nullptr) {
size_t settingsSize = get_camera_metadata_size(request->settings);
if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
reinterpret_cast<const uint8_t*>(request->settings), settingsSize)) {
captureRequest->settings.resize(0);
captureRequest->fmqSettingsSize = settingsSize;
} else {
if (mRequestMetadataQueue != nullptr) {
ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
}
captureRequest->settings.setToExternal(
reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(request->settings)),
get_camera_metadata_size(request->settings));
captureRequest->fmqSettingsSize = 0u;
}
} else {
// A null request settings maps to a size-0 CameraMetadata
captureRequest->settings.resize(0);
captureRequest->fmqSettingsSize = 0u;
}
if (hidlSession_3_4 != nullptr) {
captureRequests_3_4[i].physicalCameraSettings.resize(request->num_physcam_settings);
for (size_t j = 0; j < request->num_physcam_settings; j++) {
if (request->physcam_settings != nullptr) {
size_t settingsSize = get_camera_metadata_size(request->physcam_settings[j]);
if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
reinterpret_cast<const uint8_t*>(request->physcam_settings[j]),
settingsSize)) {
captureRequests_3_4[i].physicalCameraSettings[j].settings.resize(0);
captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize =
settingsSize;
} else {
if (mRequestMetadataQueue != nullptr) {
ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
}
captureRequests_3_4[i].physicalCameraSettings[j].settings.setToExternal(
reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(
request->physcam_settings[j])),
get_camera_metadata_size(request->physcam_settings[j]));
captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize = 0u;
}
} else {
captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize = 0u;
captureRequests_3_4[i].physicalCameraSettings[j].settings.resize(0);
}
captureRequests_3_4[i].physicalCameraSettings[j].physicalCameraId =
request->physcam_id[j];
}
}
}
hardware::details::return_status err;
if (hidlSession_3_4 != nullptr) {
// binder通信,将captureRequests_3_4、cachesToRemove都传递给CameraProvider
err = hidlSession_3_4->processCaptureRequest_3_4(captureRequests_3_4, cachesToRemove,
[&status, &numRequestProcessed] (auto s, uint32_t n) {
status = s;
*numRequestProcessed = n;
});
} else {
err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
[&status, &numRequestProcessed] (auto s, uint32_t n) {
status = s;
*numRequestProcessed = n;
});
}
if (!err.isOk()) {
ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
return DEAD_OBJECT;
}
if (status == common::V1_0::Status::OK && *numRequestProcessed != batchSize) {
ALOGE("%s: processCaptureRequest returns OK but processed %d/%zu requests",
__FUNCTION__, *numRequestProcessed, batchSize);
status = common::V1_0::Status::INTERNAL_ERROR;
}
for (auto& handle : handlesCreated) {
native_handle_delete(handle);
}
return CameraProviderManager::mapToStatusT(status);
}
从上述代码中分析得出,在向hal层申请帧数据之前,需要先做好如下两个准备工作:
1. 将halRequest保存到hidl_vecdevice::V3_2::CaptureRequest captureRequests 队列;
2. 将removedBuffer 保存到std::vectordevice::V3_2::BufferCache cachesToRemove队列。
接下来进入hal层进一步分析,hal层对应上述的相应函数:
Return<void> ExternalCameraDeviceSession::processCaptureRequest_3_4(
const hidl_vec<V3_4::CaptureRequest>& requests,
const hidl_vec<V3_2::BufferCache>& cachesToRemove,
ICameraDeviceSession::processCaptureRequest_3_4_cb _hidl_cb) {
Mutex::Autolock _il(mInterfaceLock);
// 更新mCirculatingBuffers
// 从mCirculatingBuffers中删除cachesToRemove中的buff
updateBufferCaches(cachesToRemove);
uint32_t numRequestProcessed = 0;
Status s = Status::OK;
for (size_t i = 0; i < requests.size(); i++, numRequestProcessed++) {
s = processOneCaptureRequest(requests[i].v3_2);
if (s != Status::OK) {
break;
}
}
_hidl_cb(s, numRequestProcessed);
return Void();
}
先分析下CameraDeviceSession::updateBufferCaches的实现:
void ExternalCameraDeviceSession::updateBufferCaches(const hidl_vec<BufferCache>& cachesToRemove) {
Mutex::Autolock _l(mLock);
for (auto& cache : cachesToRemove) {
//获取cache.streamId的GraphicBuffe容器
auto cbsIt = mCirculatingBuffers.find(cache.streamId);
if (cbsIt == mCirculatingBuffers.end()) {
// The stream could have been removed
continue;
}
CirculatingBuffers& cbs = cbsIt->second;
//查看cache.bufferId是否在容器中
auto it = cbs.find(cache.bufferId);
if (it != cbs.end()) {
//释放该GraphicBuffer
sHandleImporter.freeBuffer(it->second);
//从容器中删除该GraphicBuffer
cbs.erase(it);
} else {
ALOGE("%s: stream %d buffer %" PRIu64 " is not cached",
__FUNCTION__, cache.streamId, cache.bufferId);
}
}
}
可以看出updateBufferCaches作用是更新mCirculatingBuffers,将cachesToRemove中的GraphicBuffer从mCirculatingBuffers中删除,并释放占用的空间。
继续分析processOneCaptureRequest实现:
Status ExternalCameraDeviceSession::processOneCaptureRequest(const CaptureRequest& request) {
ATRACE_CALL();
......
hidl_vec<buffer_handle_t*> allBufPtrs;
hidl_vec<int> allFences;
size_t numOutputBufs = request.outputBuffers.size();
if (numOutputBufs == 0) {
ALOGE("%s: capture request must have at least one output buffer!", __FUNCTION__);
return Status::ILLEGAL_ARGUMENT;
}
......
//importRequest作用
//1. 获取V3_2::CaptureRequest request中的所有GraphicBuffer及其对应fencefd
//并分别保存到allBufPtrs和allFences
//2. 如果GraphicBuffer是首次传递到CameraProvider,则保存其到mCirculatingBuffers
status = importRequest(request, allBufPtrs, allFences);
if (status != Status::OK) {
return status;
}
nsecs_t shutterTs = 0;
// 通过DQBUF从camera中获取raw buffer,这是从camera中直接获取的,未经处理
sp<V4L2Frame> frameIn = dequeueV4l2FrameLocked(&shutterTs);
if ( frameIn == nullptr) {
ALOGE("%s: V4L2 deque frame failed!", __FUNCTION__);
return Status::INTERNAL_ERROR;
}
// 填充halReq结构体
std::shared_ptr<HalRequest> halReq = std::make_shared<HalRequest>();
halReq->frameNumber = request.frameNumber;
halReq->setting = mLatestReqSetting;
halReq->frameIn = frameIn;
halReq->shutterTs = shutterTs;
halReq->buffers.resize(numOutputBufs);
for (size_t i = 0; i < numOutputBufs; i++) {
HalStreamBuffer& halBuf = halReq->buffers[i];
int streamId = halBuf.streamId = request.outputBuffers[i].streamId;
halBuf.bufferId = request.outputBuffers[i].bufferId;
const Stream& stream = mStreamMap[streamId];
halBuf.width = stream.width;
halBuf.height = stream.height;
halBuf.format = stream.format;
halBuf.usage = stream.usage;
halBuf.bufPtr = allBufPtrs[i];
halBuf.acquireFence = allFences[i];
halBuf.fenceTimeout = false;
}
{
std::lock_guard<std::mutex> lk(mInflightFramesLock);
mInflightFrames.insert(halReq->frameNumber);
}
// Send request to OutputThread for the rest of processing
// 发送信号量,来对halReq进一步处理
mOutputThread->submitRequest(halReq);
mFirstRequest = false;
return Status::OK;
}
接下来分析下importRequest实现:
Status ExternalCameraDeviceSession::importRequest(
const CaptureRequest& request,
hidl_vec<buffer_handle_t*>& allBufPtrs,
hidl_vec<int>& allFences) {
size_t numOutputBufs = request.outputBuffers.size();
size_t numBufs = numOutputBufs;
// Validate all I/O buffers
hidl_vec<buffer_handle_t> allBufs;
hidl_vec<uint64_t> allBufIds;
allBufs.resize(numBufs);
allBufIds.resize(numBufs);
allBufPtrs.resize(numBufs);
allFences.resize(numBufs);
std::vector<int32_t> streamIds(numBufs);
for (size_t i = 0; i < numOutputBufs; i++) {
// request.outputBuffers[i].buffer类型为native_handle_t*,allBufs[i]可能为空
allBufs[i] = request.outputBuffers[i].buffer.getNativeHandle();
allBufIds[i] = request.outputBuffers[i].bufferId;
allBufPtrs[i] = &allBufs[i];
streamIds[i] = request.outputBuffers[i].streamId;
}
for (size_t i = 0; i < numBufs; i++) {
buffer_handle_t buf = allBufs[i];
uint64_t bufId = allBufIds[i];
CirculatingBuffers& cbs = mCirculatingBuffers[streamIds[i]];
//如果在mCirculatingBuffers未有bufId相关信息,说明之前未传递过该buffer,buffer是不能为空的
//如果mCirculatingBuffers存在该bufId相关信息,直接从mCirculatingBuffers取出buffer_handle_t型的buffer
if (cbs.count(bufId) == 0) {
if (buf == nullptr) {
ALOGE("%s: bufferId %" PRIu64 " has null buffer handle!", __FUNCTION__, bufId);
return Status::ILLEGAL_ARGUMENT;
}
// Register a newly seen buffer
buffer_handle_t importedBuf = buf;
// 这里会进行映射,这样importedBuf会拿到buf的缓冲区,相当于拷贝了一份
sHandleImporter.importBuffer(importedBuf);
if (importedBuf == nullptr) {
ALOGE("%s: output buffer %zu is invalid!", __FUNCTION__, i);
return Status::INTERNAL_ERROR;
} else {
//将该新buffer注册到mCirculatingBuffers中
cbs[bufId] = importedBuf;
}
}
allBufPtrs[i] = &cbs[bufId];
}
//至此allBufPtrs所有元素不为空了,需要检测下 acquire fences的有效性,检查fenceFd的有效性
for (size_t i = 0; i < numOutputBufs; i++) {
if (!sHandleImporter.importFence(
request.outputBuffers[i].acquireFence, allFences[i])) {
ALOGE("%s: output buffer %zu acquire fence is invalid", __FUNCTION__, i);
cleanupInflightFences(allFences, i);
return Status::INTERNAL_ERROR;
}
}
return Status::OK;
}
importRequest的作用是读取device::V3_2::CaptureReques request中的所有GraphicBuffe (实际读取是GraphicBuffe .handle ) 和bufferId。如果该bufferId对应GraphicBuffe 在mCirculatingBuffers中,则说明该GraphicBuffe 以前已经传递给了CameraProvider,只需要从mCirculatingBuffers读取使用即可;如果bufferId对应buff不在mCirculatingBuffers中,则说明是首次将该GraphicBuffe 传递给CameraProvider,则需要将该GraphicBuffe填充给mCirculatingBuffers。
从上述流程看,在CameraProvider收到processCaptureRequest3_4时,会根据CaptureRequest3_4和cachesToRemove中的streamId和bufferId更新CameraDeviceSession中mCirculatingBuffers对应Stream的GraphicBuffer,最终将V3_4::CaptureRequest request转换为camera3_capture_request_t型halrequest。至此,完成了CameraProvider响应processCaptureRequest并从中获取CameraService传递来的GraphicBuffer及其对应的Fence。整个流程看似非常复杂,其实总结下,CameraService和CameraProvider中通过GraphicBuffer handle传递GraphicBuffer,GraphicBuffer handle类型为native_handle_t*。
发送完halRequest后,processCaptureRequest3_4就直接返回了,至于hal层怎么处理,cameraserver是不关心的,接下来cameraserver就会准备下一次的request。
接下来看看result流程:
void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
ATRACE_CALL();
status_t res;
uint32_t frameNumber = result->frame_number;
if (result->result == NULL && result->num_output_buffers == 0 &&
result->input_buffer == NULL) {
SET_ERR("No result data provided by HAL for frame %d",
frameNumber);
return;
}
if (!mUsePartialResult &&
result->result != NULL &&
result->partial_result != 1) {
SET_ERR("Result is malformed for frame %d: partial_result %u must be 1"
" if partial result is not supported",
frameNumber, result->partial_result);
return;
}
bool isPartialResult = false;
CameraMetadata collectedPartialResult;
bool hasInputBufferInRequest = false;
// Get shutter timestamp and resultExtras from list of in-flight requests,
// where it was added by the shutter notification for this frame. If the
// shutter timestamp isn't received yet, append the output buffers to the
// in-flight request and they will be returned when the shutter timestamp
// arrives. Update the in-flight status and remove the in-flight entry if
// all result data and shutter timestamp have been received.
nsecs_t shutterTimestamp = 0;
{
Mutex::Autolock l(mInFlightLock);
ssize_t idx = mInFlightMap.indexOfKey(frameNumber);
if (idx == NAME_NOT_FOUND) {
SET_ERR("Unknown frame number for capture result: %d",
frameNumber);
return;
}
InFlightRequest &request = mInFlightMap.editValueAt(idx);
ALOGVV("%s: got InFlightRequest requestId = %" PRId32
", frameNumber = %" PRId64 ", burstId = %" PRId32
", partialResultCount = %d, hasCallback = %d",
__FUNCTION__, request.resultExtras.requestId,
request.resultExtras.frameNumber, request.resultExtras.burstId,
result->partial_result, request.hasCallback);
// Always update the partial count to the latest one if it's not 0
// (buffers only). When framework aggregates adjacent partial results
// into one, the latest partial count will be used.
if (result->partial_result != 0)
request.resultExtras.partialResultCount = result->partial_result;
// Check if this result carries only partial metadata
if (mUsePartialResult && result->result != NULL) {
if (result->partial_result > mNumPartialResults || result->partial_result < 1) {
SET_ERR("Result is malformed for frame %d: partial_result %u must be in"
" the range of [1, %d] when metadata is included in the result",
frameNumber, result->partial_result, mNumPartialResults);
return;
}
isPartialResult = (result->partial_result < mNumPartialResults);
if (isPartialResult && result->num_physcam_metadata) {
SET_ERR("Result is malformed for frame %d: partial_result not allowed for"
" physical camera result", frameNumber);
return;
}
if (isPartialResult) {
request.collectedPartialResult.append(result->result);
}
if (isPartialResult && request.hasCallback) {
// Send partial capture result
sendPartialCaptureResult(result->result, request.resultExtras,
frameNumber);
}
}
shutterTimestamp = request.shutterTimestamp;
hasInputBufferInRequest = request.hasInputBuffer;
// Did we get the (final) result metadata for this capture?
if (result->result != NULL && !isPartialResult) {
if (request.physicalCameraIds.size() != result->num_physcam_metadata) {
SET_ERR("Requested physical Camera Ids %d not equal to number of metadata %d",
request.physicalCameraIds.size(), result->num_physcam_metadata);
return;
}
if (request.haveResultMetadata) {
SET_ERR("Called multiple times with metadata for frame %d",
frameNumber);
return;
}
for (uint32_t i = 0; i < result->num_physcam_metadata; i++) {
String8 physicalId(result->physcam_ids[i]);
std::set<String8>::iterator cameraIdIter =
request.physicalCameraIds.find(physicalId);
if (cameraIdIter != request.physicalCameraIds.end()) {
request.physicalCameraIds.erase(cameraIdIter);
} else {
SET_ERR("Total result for frame %d has already returned for camera %s",
frameNumber, physicalId.c_str());
return;
}
}
if (mUsePartialResult &&
!request.collectedPartialResult.isEmpty()) {
collectedPartialResult.acquire(
request.collectedPartialResult);
}
request.haveResultMetadata = true;
}
uint32_t numBuffersReturned = result->num_output_buffers;
if (result->input_buffer != NULL) {
if (hasInputBufferInRequest) {
numBuffersReturned += 1;
} else {
ALOGW("%s: Input buffer should be NULL if there is no input"
" buffer sent in the request",
__FUNCTION__);
}
}
request.numBuffersLeft -= numBuffersReturned;
if (request.numBuffersLeft < 0) {
SET_ERR("Too many buffers returned for frame %d",
frameNumber);
return;
}
camera_metadata_ro_entry_t entry;
res = find_camera_metadata_ro_entry(result->result,
ANDROID_SENSOR_TIMESTAMP, &entry);
if (res == OK && entry.count == 1) {
request.sensorTimestamp = entry.data.i64[0];
}
// If shutter event isn't received yet, append the output buffers to
// the in-flight request. Otherwise, return the output buffers to
// streams.
if (shutterTimestamp == 0) {
request.pendingOutputBuffers.appendArray(result->output_buffers,
result->num_output_buffers);
} else {
// 将buffer进行入队,然后通知消费者去消费
returnOutputBuffers(result->output_buffers,
result->num_output_buffers, shutterTimestamp);
}
if (result->result != NULL && !isPartialResult) {
for (uint32_t i = 0; i < result->num_physcam_metadata; i++) {
CameraMetadata physicalMetadata;
physicalMetadata.append(result->physcam_metadata[i]);
request.physicalMetadatas.push_back({String16(result->physcam_ids[i]),
physicalMetadata});
}
if (shutterTimestamp == 0) {
request.pendingMetadata = result->result;
request.collectedPartialResult = collectedPartialResult;
} else if (request.hasCallback) {
CameraMetadata metadata;
metadata = result->result;
// 将该request返回的消息回调到CameraDeviceCallback中
sendCaptureResult(metadata, request.resultExtras,
collectedPartialResult, frameNumber,
hasInputBufferInRequest, request.physicalMetadatas);
}
}
removeInFlightRequestIfReadyLocked(idx);
} // scope for mInFlightLock
if (result->input_buffer != NULL) {
if (hasInputBufferInRequest) {
Camera3Stream *stream =
Camera3Stream::cast(result->input_buffer->stream);
res = stream->returnInputBuffer(*(result->input_buffer));
// Note: stream may be deallocated at this point, if this buffer was the
// last reference to it.
if (res != OK) {
ALOGE("%s: RequestThread: Can't return input buffer for frame %d to"
" its stream:%s (%d)", __FUNCTION__,
frameNumber, strerror(-res), res);
}
} else {
ALOGW("%s: Input buffer should be NULL if there is no input"
" buffer sent in the request, skipping input buffer return.",
__FUNCTION__);
}
}
}
这里主要分析两个函数:
① returnOutputBuffers,将buffer归还给surface。该通路会执行Surface::queueBuffer函数,告诉BufferQueue该buffer已经填充完毕,然后通知消费者去消费。该过程在前面的博客《BufferQueueProducer和surface联系》中已经分析过了,这里不再赘述。
②sendCaptureResult,一路回调到CameraDeviceCallback中,也就是告知上层,该request已经处理完毕。
这里也就不分析了,流程比较简单。遗漏的后期补充,BYE~