Camera 初始化(Preview) 三(预览Buffer的流程)

API1+HAL3

frameworks\av\services\camera\libcameraservice\api1\Camera2Client.cpp

status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder,
        const sp<Surface>& window) {
        ...
        res = mStreamingProcessor->setPreviewWindow(window);
        	===>(StreamingProcessor.cpp)
        	mPreviewWindow = window;
        	<===
        ...
}

前文Camera 初始化(Preview) 一(Framework->HAL3)提到相机预览会调用到下面函数

frameworks\av\services\camera\libcameraservice\api1\client2\StreamingProcessor.cpp

status_t StreamingProcessor::updatePreviewStream(const Parameters &params) {
		...
	    if (mPreviewStreamId == NO_STREAM) {
        res = device->createStream(mPreviewWindow,
                params.previewWidth, params.previewHeight,
                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, HAL_DATASPACE_UNKNOWN,
                CAMERA3_STREAM_ROTATION_0, &mPreviewStreamId, String8());
        if (res != OK) {
            ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
                    __FUNCTION__, mId, strerror(-res), res);
            return res;
        }
    }
}

这里Camera3Device在创建Stream的时候将Surface传递给了stream,在后面的分析开始前,先贴张图便于理解
在这里插入图片描述
其中生产者是Surface,消费者是SurfaceFlinger。

frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp

status_t Camera3Device::createStream(sp<Surface> consumer,
            uint32_t width, uint32_t height, int format,
            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
            const String8& physicalCameraId,
            std::vector<int> *surfaceIds, int streamSetId, bool isShared, uint64_t consumerUsage) {
    ATRACE_CALL();

    if (consumer == nullptr) {
        ALOGE("%s: consumer must not be null", __FUNCTION__);
        return BAD_VALUE;
    }

    std::vector<sp<Surface>> consumers;
    consumers.push_back(consumer);

    return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
            format, dataSpace, rotation, id, physicalCameraId, surfaceIds, streamSetId,
            isShared, consumerUsage);
}

上面先保存了当前的Surface到Vector的容器中,并将容器向下传递

status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
        bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
        android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
        const String8& physicalCameraId,
        std::vector<int> *surfaceIds, int streamSetId, bool isShared, uint64_t consumerUsage) {

		...
	} else {
			//新建的stream将携带surface
		    newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
            	width, height, format, dataSpace, rotation,
            	mTimestampOffset, physicalCameraId, streamSetId);
            ===> Camera3OutputStream.cpp
            	mConsumer(consumer),
            <===
          
    }
	//保存surface的id
    size_t consumerCount = consumers.size();
    for (size_t i = 0; i < consumerCount; i++) {
        int id = newStream->getSurfaceId(consumers[i]);
        if (id < 0) {
            SET_ERR_L("Invalid surface id");
            return BAD_VALUE;
        }
        if (surfaceIds != nullptr) {
            surfaceIds->push_back(id);
        }
    }
    ...
}

如上我们在Camera3OutputStream的内存成员变量mConsumer中保存surface

之后,我们看下这个buffer是怎么填充的了,前文Camera 初始化(Preview) 一(Framework->HAL3)的最后,框架层不断的下发preview request会调用到prepareHalRequests从而为HAL3准备buffer
frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp

status_t Camera3Device::RequestThread::prepareHalRequests() {

    for (size_t i = 0; i < mNextRequests.size(); i++) {
        auto& nextRequest = mNextRequests.editItemAt(i);
        sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
        camera3_capture_request_t* halRequest = &nextRequest.halRequest;
        //outputBuffers是一个保存stream buffer的容器
        Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
		...
		/*
		 * 1. 构造一个camera3_stream_buffer的对象,并将其放入outputBuffers的容器中
		 * 2. 插在序号0的后面,插入个数为captureRequest->mOutputStreams.size(),一般为1个
		*/
		outputBuffers->insertAt(camera3_stream_buffer_t(), 0,captureRequest->mOutputStreams.size());
		//halRequest->output_buffers指向的是上一步插入buffer的地址
        halRequest->output_buffers = outputBuffers->array();
		...
		for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
            sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(j);
            ...
            
            res = outputStream->getBuffer(&outputBuffers->editItemAt(j),
                    captureRequest->mOutputSurfaces[j]);
            ...
       }
       ...
}

frameworks\av\services\camera\libcameraservice\device3\Camera3Stream.cpp

status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer,
        const std::vector<size_t>& surface_ids) {
	...
	es = getBufferLocked(buffer, surface_ids);
	...
}

frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp

status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer,
        const std::vector<size_t>&) {
    ATRACE_CALL();

    ANativeWindowBuffer* anb;
    int fenceFd = -1;

    status_t res;
    //调用dequeueBuffer从而关联Surface到ANativeWindowBuffer类型的anb中
    res = getBufferLockedCommon(&anb, &fenceFd);
    	===>
    	*anb = gb.get();
    	res = mConsumer->attachBuffer(*anb);
    	...
    	sp<ANativeWindow> currentConsumer = mConsumer;
    	//这里anb将关联到surface了
    	res = currentConsumer->dequeueBuffer(currentConsumer.get(), anb, fenceFd);
    	<===
    if (res != OK) {
        return res;
    }

    /**
     * FenceFD now owned by HAL except in case of error,
     * in which case we reassign it to acquire_fence
     */
    //通过上面得到的anb,构造buffer
    handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
                        /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);
                        ===>
                        buffer.stream = this;
    					buffer.buffer = handle; //这一步是关键
					    buffer.acquire_fence = acquireFence;
    					buffer.release_fence = releaseFence;
    					buffer.status = status;
                        <===

    return OK;
}

自此,我们终于将halRequest->output_buffers关联到了我们的预览窗口了

我们再来看HAL3

hardware\qcom\camera\qcamera2\hal3\QCamera3HWI.cpp

int QCamera3HardwareInterface::processCaptureRequest(
                    camera3_capture_request_t *request,
                    List<InternalRequest> &internallyRequestedStreams)
{
					...
					for (size_t i = 0; i < request->num_output_buffers; i++) {
						const camera3_stream_buffer_t& output = request->output_buffers[i];
						QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
						...
                    	rc = channel->request(output.buffer, frameNumber,
                            	NULL, mParameters, indexUsed);
                            	===>
                            	registerBuffer(QCamera3Channel.cpp)
                            		registerBuffer(QCamera3Mem.cpp)
                            			===>
                            			mMemInfo[idx].main_ion_fd = open("/dev/ion", O_RDONLY);
                            			ion_info_fd.fd = mPrivateHandle[idx]->fd;
                            			if (ioctl(mMemInfo[idx].main_ion_fd,ION_IOC_IMPORT, &ion_info_fd) < 0) {
                            			...
                            			mMemInfo[idx].fd = mPrivateHandle[idx]->fd;
                            			mMemInfo[idx].size = mPrivateHandle[idx]->size;
    									mMemInfo[idx].handle = ion_info_fd.handle;
    									vaddr = mmap(NULL,
											            mMemInfo[idx].size,
											            PROT_READ | PROT_WRITE,
											            MAP_SHARED,
											            mMemInfo[idx].fd, 0);
										mPtr[idx] = vaddr;	           
                            			<===
                            		
                            	<===
                    ...
}

如上我们将窗口的Buffer注册到了HAL3中
我们同时需要注意的是V4L2向内核qbuf的时候也是用的这个内存,当我们在start channel的时候会调用到stream的MM_STREAM_EVT_GET_BUF分支;
hardware\qcom\camera\qcamera2\stack\mm-camera-interface\src\Mm_camera_stream.c

int32_t mm_stream_init_bufs(mm_stream_t * my_obj)
{
	...
	if (!my_obj->is_res_shared) {
		rc = my_obj->mem_vtbl.get_bufs(&my_obj->frame_offset,
	                &my_obj->total_buf_cnt, &reg_flags, &my_obj->buf,
	                &my_obj->map_ops, my_obj->mem_vtbl.user_data);
	         ===> QCamera3Stream.cpp
	         rc = stream->getBufs(offset, num_bufs, initial_reg_flag, bufs, ops_tbl);
	         <===
	...
}

int32_t QCamera3Stream::getBufs(cam_frame_len_offset_t *offset,
                     uint8_t *num_bufs,
                     uint8_t **initial_reg_flag,
                     mm_camera_buf_def_t **bufs,
                     mm_camera_map_unmap_ops_tbl_t *ops_tbl)
{
		...
	    for (uint32_t i = 0; i < mNumBufs; i++) {
        if (mStreamBufs->valid(i)) {
        	//获取buf的大小,返回mMemInfo[index].size
            ssize_t bufSize = mStreamBufs->getSize(i);
            
            if (BAD_INDEX != bufSize) {
            	//将这块buffer映射到camera的Daemon进程
                rc = ops_tbl->map_ops(i, -1, mStreamBufs->getFd(i),
                        (size_t)bufSize, mStreamBufs->getPtr(i),
                        CAM_MAPPING_BUF_TYPE_STREAM_BUF,
                        ops_tbl->userdata);
				...
                }
            } else {
                LOGE("Failed to retrieve buffer size (bad index)");
                return INVALID_OPERATION;
            }
        }
    }
    ...
    //分配内存
    mBufDefs = (mm_camera_buf_def_t *)malloc(mNumBufs * sizeof(mm_camera_buf_def_t));  
    memset(mBufDefs, 0, mNumBufs * sizeof(mm_camera_buf_def_t));
    for (uint32_t i = 0; i < mNumBufs; i++) {
        if (mStreamBufs->valid(i)) {
        	//填充内存
            mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[i], i);
        }
    }
	*num_bufs = mNumBufs;
	*initial_reg_flag = regFlags;
	*bufs = mBufDefs;
	return NO_ERROR;
}

继续

int32_t QCamera3Memory::getBufDef(const cam_frame_len_offset_t &offset,
        mm_camera_buf_def_t &bufDef, uint32_t index)
{
    Mutex::Autolock lock(mLock);

    if (!mBufferCount) {
        LOGE("Memory not allocated");
        return NO_INIT;
    }

    bufDef.fd = mMemInfo[index].fd; //这里是关键
    bufDef.frame_len = mMemInfo[index].size;
    bufDef.mem_info = (void *)this;
    bufDef.buffer = getPtrLocked(index); //这里是关键
    bufDef.planes_buf.num_planes = (int8_t)offset.num_planes;
    bufDef.buf_idx = (uint8_t)index;

    /* Plane 0 needs to be set separately. Set other planes in a loop */
    bufDef.planes_buf.planes[0].length = offset.mp[0].len;
    bufDef.planes_buf.planes[0].m.userptr = (long unsigned int)mMemInfo[index].fd; //注意这里
    bufDef.planes_buf.planes[0].data_offset = offset.mp[0].offset;
    bufDef.planes_buf.planes[0].reserved[0] = 0;
    for (int i = 1; i < bufDef.planes_buf.num_planes; i++) {
         bufDef.planes_buf.planes[i].length = offset.mp[i].len;
         bufDef.planes_buf.planes[i].m.userptr = (long unsigned int)mMemInfo[i].fd;
         bufDef.planes_buf.planes[i].data_offset = offset.mp[i].offset;
         bufDef.planes_buf.planes[i].reserved[0] =
                 bufDef.planes_buf.planes[i-1].reserved[0] +
                 bufDef.planes_buf.planes[i-1].length;
    }

    return NO_ERROR;
}

hardware\qcom\camera\qcamera2\hal3\QCamera3Mem.cpp

void *QCamera3GrallocMemory::getPtrLocked(uint32_t index)
{
	...
    return mPtr[index];
}

最后看V4L2是怎么qbuf的

hardware\qcom\camera\qcamera2\stack\mm-camera-interface\src\Mm_camera_stream.c

int32_t mm_stream_qbuf(mm_stream_t *my_obj, mm_camera_buf_def_t *buf)
{
	...
	    if (buf->buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
        memset(&planes, 0, sizeof(planes));
        planes[0].length = my_obj->stream_info->user_buf_info.size;
        planes[0].m.userptr = buf->fd;
        length = 1;
    } else {
        memcpy(planes, buf->planes_buf.planes, sizeof(planes));
        length = buf->planes_buf.num_planes;
    }

    memset(&buffer, 0, sizeof(buffer));
    buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    buffer.memory = V4L2_MEMORY_USERPTR;
    buffer.index = (__u32)buf->buf_idx;
    buffer.m.planes = &planes[0];
    buffer.length = (__u32)length;
    ...
    rc = ioctl(my_obj->fd, VIDIOC_QBUF, &buffer);
    ...
}
要在 Android 应用中使用 GLSurfaceView 和 Camera2 API 实现预览,可以参考以下步骤: 1. 在你的 Android 项目中添加 GLSurfaceView 控件,并在应用程序中初始化它。 2. 通过 Camera2 API 打开相机,并将相机输出连接到 GLSurfaceView 控件上。 3. 在 GLSurfaceView 控件中实现自定义的 Renderer,并在 Renderer 中实现图像渲染和处理逻辑。 4. 将渲染结果显示在 GLSurfaceView 控件上。 以下是一个简单的代码示例,演示如何使用 GLSurfaceView 和 Camera2 API 实现预览: ```java public class PreviewActivity extends AppCompatActivity { private CameraManager cameraManager; private CameraDevice cameraDevice; private CameraCaptureSession captureSession; private CaptureRequest.Builder previewRequestBuilder; private CaptureRequest previewRequest; private Size previewSize; private SurfaceTexture surfaceTexture; private GLSurfaceView glSurfaceView; private CameraDevice.StateCallback stateCallback = new CameraDevice.StateCallback() { @Override public void onOpened(@NonNull CameraDevice camera) { cameraDevice = camera; createCameraPreviewSession(); } @Override public void onDisconnected(@NonNull CameraDevice camera) { cameraDevice.close(); cameraDevice = null; } @Override public void onError(@NonNull CameraDevice camera, int error) { cameraDevice.close(); cameraDevice = null; } }; private void openCamera() { cameraManager = (CameraManager) getSystemService(Context.CAMERA_SERVICE); try { String cameraId = cameraManager.getCameraIdList()[0]; CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(cameraId); StreamConfigurationMap map = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP); previewSize = map.getOutputSizes(SurfaceTexture.class)[0]; surfaceTexture.setDefaultBufferSize(previewSize.getWidth(), previewSize.getHeight()); Surface previewSurface = new Surface(surfaceTexture); previewRequestBuilder = cameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW); previewRequestBuilder.addTarget(previewSurface); cameraDevice.createCaptureSession(Arrays.asList(previewSurface), new CameraCaptureSession.StateCallback() { @Override public void onConfigured(@NonNull CameraCaptureSession session) { captureSession = session; updatePreview(); } @Override public void onConfigureFailed(@NonNull CameraCaptureSession session) { } }, null); } catch (CameraAccessException e) { e.printStackTrace(); } } private void createCameraPreviewSession() { try { surfaceTexture = glSurfaceView.getSurfaceTexture(); surfaceTexture.setDefaultBufferSize(previewSize.getWidth(), previewSize.getHeight()); openCamera(); } catch (CameraAccessException e) { e.printStackTrace(); } } private void updatePreview() { previewRequestBuilder.set(CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_AUTO); previewRequest = previewRequestBuilder.build(); try { captureSession.setRepeatingRequest(previewRequest, null, null); } catch (CameraAccessException e) { e.printStackTrace(); } } @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); glSurfaceView = new GLSurfaceView(this); glSurfaceView.setEGLContextClientVersion(2); glSurfaceView.setRenderer(new PreviewRenderer()); setContentView(glSurfaceView); } @Override protected void onResume() { super.onResume(); if (glSurfaceView != null) { glSurfaceView.onResume(); } if (cameraDevice == null) { try { cameraManager.openCamera(cameraManager.getCameraIdList()[0], stateCallback, null); } catch (CameraAccessException e) { e.printStackTrace(); } } } @Override protected void onPause() { if (glSurfaceView != null) { glSurfaceView.onPause(); } if (cameraDevice != null) { cameraDevice.close(); cameraDevice = null; } super.onPause(); } private class PreviewRenderer implements GLSurfaceView.Renderer { private final float[] vertexData = { -1f, -1f, 1f, -1f, -1f, 1f, 1f, 1f }; private final float[] textureData = { 0f, 1f, 1f, 1f, 0f, 0f, 1f, 0f }; private int textureId; private int program; private int aPositionLocation; private int aTextureLocation; private int uTextureMatrixLocation; @Override public void onSurfaceCreated(GL10 gl, EGLConfig config) { textureId = createTexture(); program = createProgram(); aPositionLocation = glGetAttribLocation(program, "aPosition"); aTextureLocation = glGetAttribLocation(program, "aTextureCoord"); uTextureMatrixLocation = glGetUniformLocation(program, "uTextureMatrix"); glClearColor(0f, 0f, 0f, 0f); } @Override public void onSurfaceChanged(GL10 gl, int width, int height) { glViewport(0, 0, width, height); Matrix.scaleM(textureMatrix, 0, 1f, -1f, 1f); Matrix.translateM(textureMatrix, 0, 0f, -1f, 0f); Matrix.rotateM(textureMatrix, 0, 90f, 0f, 0f, 1f); } @Override public void onDrawFrame(GL10 gl) { glClear(GL_COLOR_BUFFER_BIT); glUseProgram(program); glEnableVertexAttribArray(aPositionLocation); glVertexAttribPointer(aPositionLocation, 2, GL_FLOAT, false, 0, vertexBuffer); glEnableVertexAttribArray(aTextureLocation); glVertexAttribPointer(aTextureLocation, 2, GL_FLOAT, false, 0, textureBuffer); glUniformMatrix4fv(uTextureMatrixLocation, 1, false, textureMatrix, 0); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); glDisableVertexAttribArray(aPositionLocation); glDisableVertexAttribArray(aTextureLocation); } private int createTexture() { int[] textures = new int[1]; glGenTextures(1, textures, 0); int textureId = textures[0]; glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureId); glTexParameterf(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameterf(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); return textureId; } private int createProgram() { String vertexShaderCode = "attribute vec4 aPosition;\n" + "attribute vec4 aTextureCoord;\n" + "uniform mat4 uTextureMatrix;\n" + "varying vec2 vTextureCoord;\n" + "void main() {\n" + " vTextureCoord = (uTextureMatrix * aTextureCoord).xy;\n" + " gl_Position = aPosition;\n" + "}"; String fragmentShaderCode = "#extension GL_OES_EGL_image_external : require\n" + "precision mediump float;\n" + "uniform samplerExternalOES uTexture;\n" + "varying vec2 vTextureCoord;\n" + "void main() {\n" + " gl_FragColor = texture2D(uTexture, vTextureCoord);\n" + "}"; int vertexShader = loadShader(GL_VERTEX_SHADER, vertexShaderCode); int fragmentShader = loadShader(GL_FRAGMENT_SHADER, fragmentShaderCode); int program = glCreateProgram(); glAttachShader(program, vertexShader); glAttachShader(program, fragmentShader); glLinkProgram(program); glUseProgram(program); return program; } private int loadShader(int type, String code) { int shader = glCreateShader(type); glShaderSource(shader, code); glCompileShader(shader); return shader; } } } ``` 需要注意的是,这只是一个简单的示例,并且可能需要进行进一步的优化和改进,以满足你的实际需求和性能要求。同时,为了确保应用程序的稳定性,还需要进行充分的测试和错误处理。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值