API1+HAL3
frameworks\av\services\camera\libcameraservice\api1\Camera2Client.cpp
status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder,
const sp<Surface>& window) {
...
res = mStreamingProcessor->setPreviewWindow(window);
===>(StreamingProcessor.cpp)
mPreviewWindow = window;
<===
...
}
前文Camera 初始化(Preview) 一(Framework->HAL3)提到相机预览会调用到下面函数
frameworks\av\services\camera\libcameraservice\api1\client2\StreamingProcessor.cpp
status_t StreamingProcessor::updatePreviewStream(const Parameters ¶ms) {
...
if (mPreviewStreamId == NO_STREAM) {
res = device->createStream(mPreviewWindow,
params.previewWidth, params.previewHeight,
CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, HAL_DATASPACE_UNKNOWN,
CAMERA3_STREAM_ROTATION_0, &mPreviewStreamId, String8());
if (res != OK) {
ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
}
}
这里Camera3Device在创建Stream的时候将Surface传递给了stream,在后面的分析开始前,先贴张图便于理解
其中生产者是Surface,消费者是SurfaceFlinger。
frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
status_t Camera3Device::createStream(sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
const String8& physicalCameraId,
std::vector<int> *surfaceIds, int streamSetId, bool isShared, uint64_t consumerUsage) {
ATRACE_CALL();
if (consumer == nullptr) {
ALOGE("%s: consumer must not be null", __FUNCTION__);
return BAD_VALUE;
}
std::vector<sp<Surface>> consumers;
consumers.push_back(consumer);
return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
format, dataSpace, rotation, id, physicalCameraId, surfaceIds, streamSetId,
isShared, consumerUsage);
}
上面先保存了当前的Surface到Vector的容器中,并将容器向下传递
status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
const String8& physicalCameraId,
std::vector<int> *surfaceIds, int streamSetId, bool isShared, uint64_t consumerUsage) {
...
} else {
//新建的stream将携带surface
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, format, dataSpace, rotation,
mTimestampOffset, physicalCameraId, streamSetId);
===> Camera3OutputStream.cpp
mConsumer(consumer),
<===
}
//保存surface的id
size_t consumerCount = consumers.size();
for (size_t i = 0; i < consumerCount; i++) {
int id = newStream->getSurfaceId(consumers[i]);
if (id < 0) {
SET_ERR_L("Invalid surface id");
return BAD_VALUE;
}
if (surfaceIds != nullptr) {
surfaceIds->push_back(id);
}
}
...
}
如上我们在Camera3OutputStream
的内存成员变量mConsumer
中保存surface
之后,我们看下这个buffer是怎么填充的了,前文Camera 初始化(Preview) 一(Framework->HAL3)的最后,框架层不断的下发preview request会调用到prepareHalRequests
从而为HAL3准备buffer
frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
status_t Camera3Device::RequestThread::prepareHalRequests() {
for (size_t i = 0; i < mNextRequests.size(); i++) {
auto& nextRequest = mNextRequests.editItemAt(i);
sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
camera3_capture_request_t* halRequest = &nextRequest.halRequest;
//outputBuffers是一个保存stream buffer的容器
Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
...
/*
* 1. 构造一个camera3_stream_buffer的对象,并将其放入outputBuffers的容器中
* 2. 插在序号0的后面,插入个数为captureRequest->mOutputStreams.size(),一般为1个
*/
outputBuffers->insertAt(camera3_stream_buffer_t(), 0,captureRequest->mOutputStreams.size());
//halRequest->output_buffers指向的是上一步插入buffer的地址
halRequest->output_buffers = outputBuffers->array();
...
for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(j);
...
res = outputStream->getBuffer(&outputBuffers->editItemAt(j),
captureRequest->mOutputSurfaces[j]);
...
}
...
}
frameworks\av\services\camera\libcameraservice\device3\Camera3Stream.cpp
status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer,
const std::vector<size_t>& surface_ids) {
...
es = getBufferLocked(buffer, surface_ids);
...
}
frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp
status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer,
const std::vector<size_t>&) {
ATRACE_CALL();
ANativeWindowBuffer* anb;
int fenceFd = -1;
status_t res;
//调用dequeueBuffer从而关联Surface到ANativeWindowBuffer类型的anb中
res = getBufferLockedCommon(&anb, &fenceFd);
===>
*anb = gb.get();
res = mConsumer->attachBuffer(*anb);
...
sp<ANativeWindow> currentConsumer = mConsumer;
//这里anb将关联到surface了
res = currentConsumer->dequeueBuffer(currentConsumer.get(), anb, fenceFd);
<===
if (res != OK) {
return res;
}
/**
* FenceFD now owned by HAL except in case of error,
* in which case we reassign it to acquire_fence
*/
//通过上面得到的anb,构造buffer
handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
/*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);
===>
buffer.stream = this;
buffer.buffer = handle; //这一步是关键
buffer.acquire_fence = acquireFence;
buffer.release_fence = releaseFence;
buffer.status = status;
<===
return OK;
}
自此,我们终于将halRequest->output_buffers关联到了我们的预览窗口了
我们再来看HAL3
hardware\qcom\camera\qcamera2\hal3\QCamera3HWI.cpp
int QCamera3HardwareInterface::processCaptureRequest(
camera3_capture_request_t *request,
List<InternalRequest> &internallyRequestedStreams)
{
...
for (size_t i = 0; i < request->num_output_buffers; i++) {
const camera3_stream_buffer_t& output = request->output_buffers[i];
QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
...
rc = channel->request(output.buffer, frameNumber,
NULL, mParameters, indexUsed);
===>
registerBuffer(QCamera3Channel.cpp)
registerBuffer(QCamera3Mem.cpp)
===>
mMemInfo[idx].main_ion_fd = open("/dev/ion", O_RDONLY);
ion_info_fd.fd = mPrivateHandle[idx]->fd;
if (ioctl(mMemInfo[idx].main_ion_fd,ION_IOC_IMPORT, &ion_info_fd) < 0) {
...
mMemInfo[idx].fd = mPrivateHandle[idx]->fd;
mMemInfo[idx].size = mPrivateHandle[idx]->size;
mMemInfo[idx].handle = ion_info_fd.handle;
vaddr = mmap(NULL,
mMemInfo[idx].size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
mMemInfo[idx].fd, 0);
mPtr[idx] = vaddr;
<===
<===
...
}
如上我们将窗口的Buffer注册到了HAL3中
我们同时需要注意的是V4L2向内核qbuf的时候也是用的这个内存,当我们在start channel的时候会调用到stream的MM_STREAM_EVT_GET_BUF
分支;
hardware\qcom\camera\qcamera2\stack\mm-camera-interface\src\Mm_camera_stream.c
int32_t mm_stream_init_bufs(mm_stream_t * my_obj)
{
...
if (!my_obj->is_res_shared) {
rc = my_obj->mem_vtbl.get_bufs(&my_obj->frame_offset,
&my_obj->total_buf_cnt, ®_flags, &my_obj->buf,
&my_obj->map_ops, my_obj->mem_vtbl.user_data);
===> QCamera3Stream.cpp
rc = stream->getBufs(offset, num_bufs, initial_reg_flag, bufs, ops_tbl);
<===
...
}
int32_t QCamera3Stream::getBufs(cam_frame_len_offset_t *offset,
uint8_t *num_bufs,
uint8_t **initial_reg_flag,
mm_camera_buf_def_t **bufs,
mm_camera_map_unmap_ops_tbl_t *ops_tbl)
{
...
for (uint32_t i = 0; i < mNumBufs; i++) {
if (mStreamBufs->valid(i)) {
//获取buf的大小,返回mMemInfo[index].size
ssize_t bufSize = mStreamBufs->getSize(i);
if (BAD_INDEX != bufSize) {
//将这块buffer映射到camera的Daemon进程
rc = ops_tbl->map_ops(i, -1, mStreamBufs->getFd(i),
(size_t)bufSize, mStreamBufs->getPtr(i),
CAM_MAPPING_BUF_TYPE_STREAM_BUF,
ops_tbl->userdata);
...
}
} else {
LOGE("Failed to retrieve buffer size (bad index)");
return INVALID_OPERATION;
}
}
}
...
//分配内存
mBufDefs = (mm_camera_buf_def_t *)malloc(mNumBufs * sizeof(mm_camera_buf_def_t));
memset(mBufDefs, 0, mNumBufs * sizeof(mm_camera_buf_def_t));
for (uint32_t i = 0; i < mNumBufs; i++) {
if (mStreamBufs->valid(i)) {
//填充内存
mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[i], i);
}
}
*num_bufs = mNumBufs;
*initial_reg_flag = regFlags;
*bufs = mBufDefs;
return NO_ERROR;
}
继续
int32_t QCamera3Memory::getBufDef(const cam_frame_len_offset_t &offset,
mm_camera_buf_def_t &bufDef, uint32_t index)
{
Mutex::Autolock lock(mLock);
if (!mBufferCount) {
LOGE("Memory not allocated");
return NO_INIT;
}
bufDef.fd = mMemInfo[index].fd; //这里是关键
bufDef.frame_len = mMemInfo[index].size;
bufDef.mem_info = (void *)this;
bufDef.buffer = getPtrLocked(index); //这里是关键
bufDef.planes_buf.num_planes = (int8_t)offset.num_planes;
bufDef.buf_idx = (uint8_t)index;
/* Plane 0 needs to be set separately. Set other planes in a loop */
bufDef.planes_buf.planes[0].length = offset.mp[0].len;
bufDef.planes_buf.planes[0].m.userptr = (long unsigned int)mMemInfo[index].fd; //注意这里
bufDef.planes_buf.planes[0].data_offset = offset.mp[0].offset;
bufDef.planes_buf.planes[0].reserved[0] = 0;
for (int i = 1; i < bufDef.planes_buf.num_planes; i++) {
bufDef.planes_buf.planes[i].length = offset.mp[i].len;
bufDef.planes_buf.planes[i].m.userptr = (long unsigned int)mMemInfo[i].fd;
bufDef.planes_buf.planes[i].data_offset = offset.mp[i].offset;
bufDef.planes_buf.planes[i].reserved[0] =
bufDef.planes_buf.planes[i-1].reserved[0] +
bufDef.planes_buf.planes[i-1].length;
}
return NO_ERROR;
}
hardware\qcom\camera\qcamera2\hal3\QCamera3Mem.cpp
void *QCamera3GrallocMemory::getPtrLocked(uint32_t index)
{
...
return mPtr[index];
}
最后看V4L2是怎么qbuf的
hardware\qcom\camera\qcamera2\stack\mm-camera-interface\src\Mm_camera_stream.c
int32_t mm_stream_qbuf(mm_stream_t *my_obj, mm_camera_buf_def_t *buf)
{
...
if (buf->buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
memset(&planes, 0, sizeof(planes));
planes[0].length = my_obj->stream_info->user_buf_info.size;
planes[0].m.userptr = buf->fd;
length = 1;
} else {
memcpy(planes, buf->planes_buf.planes, sizeof(planes));
length = buf->planes_buf.num_planes;
}
memset(&buffer, 0, sizeof(buffer));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.memory = V4L2_MEMORY_USERPTR;
buffer.index = (__u32)buf->buf_idx;
buffer.m.planes = &planes[0];
buffer.length = (__u32)length;
...
rc = ioctl(my_obj->fd, VIDIOC_QBUF, &buffer);
...
}