Android源码版本Version:4.2.2; 硬件平台 全志A31
step1:之前在讲到CameraService处的setPreviewWindow中传入一个窗口给HAL
- status_t setPreviewWindow(const sp<ANativeWindow>& buf)
- {
- ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());
-
- if (mDevice->ops->set_preview_window) {
- mPreviewWindow = buf;
- mHalPreviewWindow.user = this;
- ALOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p", __FUNCTION__,
- &mHalPreviewWindow, mHalPreviewWindow.user);
- return mDevice->ops->set_preview_window(mDevice,
- buf.get() ? &mHalPreviewWindow.nw : 0);
- }
- return INVALID_OPERATION;
- }
传入给HAL的参数buf为一个Surface,还有一个变量是关于预览窗口的数据流操作nw
- struct camera_preview_window {
- struct preview_stream_ops nw;
- void *user;
- };
该变量的初始如下:这些函数接口看上去很熟悉,的确在SurfaceFlinger中,客户端的Surface也就是通过这些接口来向SurfaceFlinger申请图形缓存并处理图形缓存显示的,只是之前的操作都交个了OpenGL ES的eglswapbuf()来对这个本地窗口进行如下的dequeueBuffer和enqueuebuffer的操作而已。而在Camera的预览中,这些操作将手动完成。
- void initHalPreviewWindow()
- {
- mHalPreviewWindow.nw.cancel_buffer = __cancel_buffer;
- mHalPreviewWindow.nw.lock_buffer = __lock_buffer;
- mHalPreviewWindow.nw.dequeue_buffer = __dequeue_buffer;
- mHalPreviewWindow.nw.enqueue_buffer = __enqueue_buffer;
- mHalPreviewWindow.nw.set_buffer_count = __set_buffer_count;
- mHalPreviewWindow.nw.set_buffers_geometry = __set_buffers_geometry;
- mHalPreviewWindow.nw.set_crop = __set_crop;
- mHalPreviewWindow.nw.set_timestamp = __set_timestamp;
- mHalPreviewWindow.nw.set_usage = __set_usage;
- mHalPreviewWindow.nw.set_swap_interval = __set_swap_interval;
-
- mHalPreviewWindow.nw.get_min_undequeued_buffer_count =
- __get_min_undequeued_buffer_count;
- }
step2.继续前面的preview的处理操作,在CameraService处的CameraClinet已经调用了CameraHardwareInterface的startPreview函数,实际就是操作HAL处的Camera设备如下
- status_t startPreview()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->start_preview)
- return mDevice->ops->start_preview(mDevice);
- return INVALID_OPERATION;
- }
step3.进入HAL来看Preview的处理
-
- status_t CameraHardware::doStartPreview(){
- ...........
- res = camera_dev->startDevice(mCaptureWidth, mCaptureHeight, org_fmt, video_hint);
- ......
- }
调用V4L2设备来启动视频流的采集,startDevice()函数更好的解释了预览的启动也就是视频采集的启动。
- status_t V4L2CameraDevice::startDevice(int width,
- int height,
- uint32_t pix_fmt,
- bool video_hint)
- {
- LOGD("%s, wxh: %dx%d, fmt: %d", __FUNCTION__, width, height, pix_fmt);
-
- Mutex::Autolock locker(&mObjectLock);
-
- if (!isConnected())
- {
- LOGE("%s: camera device is not connected.", __FUNCTION__);
- return EINVAL;
- }
-
- if (isStarted())
- {
- LOGE("%s: camera device is already started.", __FUNCTION__);
- return EINVAL;
- }
-
-
- mVideoFormat = pix_fmt;
- mCurrentV4l2buf = NULL;
-
- mVideoHint = video_hint;
- mCanBeDisconnected = false;
-
-
-
- v4l2setCaptureParams();
-
-
- CHECK_NO_ERROR(v4l2SetVideoParams(width, height, pix_fmt));
-
-
- int buf_cnt = (mTakePictureState == TAKE_PICTURE_NORMAL) ? 1 : NB_BUFFER;
- CHECK_NO_ERROR(v4l2ReqBufs(&buf_cnt));
- mBufferCnt = buf_cnt;
-
-
- CHECK_NO_ERROR(v4l2QueryBuf());
-
-
- CHECK_NO_ERROR(v4l2StartStreaming());
-
- mCameraDeviceState = STATE_STARTED;
-
- mContinuousPictureAfter = 1000000 / 10;
- mFaceDectectAfter = 1000000 / 15;
- mPreviewAfter = 1000000 / 24;
-
- return NO_ERROR;
- }
这个是完全参考了V4L2的视频采集处理流程:
1.v4l2setCaptureParams()设置采集的相关参数;
2.v4l2QueryBuf():获取内核图像缓存的信息,并将所有的内核图像缓存映射到当前的进程中来。方便用户空间的处理
3.v4l2StartStreaming():开启V4L2的视频采集流程。
step4: 图像采集线程bool V4L2CameraDevice::captureThread();
该函数的内容比较复杂,但核心是 ret = getPreviewFrame(&buf),获取当前一帧图像:
- int V4L2CameraDevice::getPreviewFrame(v4l2_buffer *buf)
- {
- int ret = UNKNOWN_ERROR;
-
- buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf->memory = V4L2_MEMORY_MMAP;
-
- ret = ioctl(mCameraFd, VIDIOC_DQBUF, buf);
- if (ret < 0)
- {
- LOGW("GetPreviewFrame: VIDIOC_DQBUF Failed, %s", strerror(errno));
- return __LINE__;
- }
-
- return OK;
- }
调用了典型的VIDIOC_DQBUF命令,出列一帧图形缓存,提取到用户空间供显示。
当前的平台通过定义一个V4L2BUF_t结构体来表示当前采集到的一帧图像,分别记录到Y和C所在的物理地址和用户空间的虚拟地址。虚拟地址是对内核采集缓存的映射
- typedef struct V4L2BUF_t
- {
- unsigned int addrPhyY;
- unsigned int addrPhyC;
- unsigned int addrVirY;
- unsigned int addrVirC;
- unsigned int width;
- unsigned int height;
- int index;
- long long timeStamp;
- RECT_t crop_rect;
- int format;
- void* overlay_info;
-
-
- unsigned char isThumbAvailable;
- unsigned char thumbUsedForPreview;
- unsigned char thumbUsedForPhoto;
- unsigned char thumbUsedForVideo;
- unsigned int thumbAddrPhyY;
- unsigned int thumbAddrVirY;
- unsigned int thumbWidth;
- unsigned int thumbHeight;
- RECT_t thumb_crop_rect;
- int thumbFormat;
-
- int refCnt;
- unsigned int bytesused;
- }V4L2BUF_t;
来看看该结构体的初始化代码:
- V4L2BUF_t v4l2_buf;
- if (mVideoFormat != V4L2_PIX_FMT_YUYV
- && mCaptureFormat == V4L2_PIX_FMT_YUYV)
- {
- v4l2_buf.addrPhyY = mVideoBuffer.buf_phy_addr[buf.index];
- v4l2_buf.addrVirY = mVideoBuffer.buf_vir_addr[buf.index];
- }
- else
- {
- v4l2_buf.addrPhyY = buf.m.offset & 0x0fffffff;
- v4l2_buf.addrVirY = (unsigned int)mMapMem.mem[buf.index];
- }
- v4l2_buf.index = buf.index;
- v4l2_buf.timeStamp = mCurFrameTimestamp;
- v4l2_buf.width = mFrameWidth;
- v4l2_buf.height = mFrameHeight;
- v4l2_buf.crop_rect.left = mRectCrop.left;
- v4l2_buf.crop_rect.top = mRectCrop.top;
- v4l2_buf.crop_rect.width = mRectCrop.right - mRectCrop.left + 1;
- v4l2_buf.crop_rect.height = mRectCrop.bottom - mRectCrop.top + 1;
- v4l2_buf.format = mVideoFormat;
addrPhy和addrViry分别记录到Y和C所在的物理地址和用户空间的虚拟地址。而这个地址都是通过当前Buf的index直接设置的,为什么?因为内核的图像缓存区的mmap操作将每一个缓存,以其Index分别逐一的映射到了用户空间,并记录缓存的物理和虚拟地址,而这主要是方便后续图像的显示而已。
step5:bool V4L2CameraDevice::previewThread()//预览线程
获得了一帧数据必须通知预览线程进行图像的显示,采集线程和显示线程之间通过pthread_cond_wait(&mPreviewCond, &mPreviewMutex);进程间锁进行等待。
- bool V4L2CameraDevice::previewThread()
- {
- V4L2BUF_t * pbuf = (V4L2BUF_t *)OSAL_Dequeue(&mQueueBufferPreview);
- if (pbuf == NULL)
- {
-
- pthread_mutex_lock(&mPreviewMutex);
- pthread_cond_wait(&mPreviewCond, &mPreviewMutex);
- pthread_mutex_unlock(&mPreviewMutex);
- return true;
- }
-
- Mutex::Autolock locker(&mObjectLock);
- if (mMapMem.mem[pbuf->index] == NULL
- || pbuf->addrPhyY == 0)
- {
- LOGV("preview buffer have been released...");
- return true;
- }
-
-
- mCallbackNotifier->onNextFrameAvailable((void*)pbuf, mUseHwEncoder);
-
-
- if (isPreviewTime())
- {
- mPreviewWindow->onNextFrameAvailable((void*)pbuf);
- }
-
-
-
- releasePreviewFrame(pbuf->index);
-
- return true;
- }
预览线程主要做了两件事,一是完成图像缓存数据的回调供最最上层的使用;另一件当然是送显。
step6:预览线程如何显示?
- bool PreviewWindow::onNextFrameAvailable(const void* frame)
- {
- int res;
- Mutex::Autolock locker(&mObjectLock);
-
- V4L2BUF_t * pv4l2_buf = (V4L2BUF_t *)frame;
- ......
- res = mPreviewWindow->set_buffers_geometry(mPreviewWindow,
- mPreviewFrameWidth,
- mPreviewFrameHeight,
- format);
- ......
- res = mPreviewWindow->dequeue_buffer(mPreviewWindow, &buffer, &stride);
- ..................
- res = grbuffer_mapper.lock(*buffer, GRALLOC_USAGE_SW_WRITE_OFTEN, rect, &img);
- .............
- mPreviewWindow->enqueue_buffer(mPreviewWindow, buffer);
- ............
- }
上述代码实时了本地窗口图像向SurfaceFlinger的投递,为何这么说,看下面的分析:
1.PreviewWindow类里的mPreviewWindow成员变量是什么?
这个是从应用端的setPreviewDisplay()设置过来的,传入到HAL的地方在CameraHardwareInterface的initialize函数里:
- return mDevice->ops->set_preview_window(mDevice,
- buf.get() ? &mHalPreviewWindow.nw : 0);
- }
nw的操作在step1里面已经有说明了,初始化相关的一些操作。
2.以dequeue_buffer为例:
- static int __dequeue_buffer(struct preview_stream_ops* w,
- buffer_handle_t** buffer, int *stride)
- {
- int rc;
- ANativeWindow *a = anw(w);
- ANativeWindowBuffer* anb;
- rc = native_window_dequeue_buffer_and_wait(a, &anb);
- if (!rc) {
- *buffer = &anb->handle;
- *stride = anb->stride;
- }
- return rc;
- }
调用到本地的窗口,通过w获得ANativeWindow对象,来看看该宏的实现:
- static ANativeWindow *__to_anw(void *user)
- {
- CameraHardwareInterface *__this =
- reinterpret_cast<CameraHardwareInterface *>(user);
- return __this->mPreviewWindow.get();
- }
- #define anw(n) __to_anw(((struct camera_preview_window *)n)->user)
首先获取user对象为CameraHardwareInterface对象,通过它获得之前初始化的Surface对象即成员变量mPreviewWindow(属于本地窗口ANativeWindow类)。
3.本地窗口的操作
- static inline int native_window_dequeue_buffer_and_wait(ANativeWindow *anw,
- struct ANativeWindowBuffer** anb) {
- return anw->dequeueBuffer_DEPRECATED(anw, anb);
- }
上述的过程其实是调用应用层创建的Surface对象,该对象已经完全打包传递给了CameraService,来进行绘图和渲染的处理。如下所示:BpCamera
-
- status_t setPreviewDisplay(const sp<Surface>& surface)
- {
- ALOGV("setPreviewDisplay");
- Parcel data, reply;
- data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
- Surface::writeToParcel(surface, &data);
- remote()->transact(SET_PREVIEW_DISPLAY, data, &reply);
- return reply.readInt32();
- }
BnCamera处,内部实现了新建一个CameraService处的Surface,但是都是用客户端处的参数来初始化的。即两者再不同进程中,但所包含的信息完全一样。
- case SET_PREVIEW_DISPLAY: {
- ALOGV("SET_PREVIEW_DISPLAY");
- CHECK_INTERFACE(ICamera, data, reply);
- sp<Surface> surface = Surface::readFromParcel(data);
- reply->writeInt32(setPreviewDisplay(surface));
- return NO_ERROR;
- } break;
- Surface::Surface(const Parcel& parcel, const sp<IBinder>& ref)
- : SurfaceTextureClient()
- {
- mSurface = interface_cast<ISurface>(ref);
- sp<IBinder> st_binder(parcel.readStrongBinder());
- sp<ISurfaceTexture> st;
- if (st_binder != NULL) {
- st = interface_cast<ISurfaceTexture>(st_binder);
- } else if (mSurface != NULL) {
- st = mSurface->getSurfaceTexture();
- }
-
- mIdentity = parcel.readInt32();
- init(st);
- }
这里的Surface建立是通过mSurface来完成和SurfaceFlinger的通信的,因为之前Camera客户端处的Surface是和SurfaceFLinger进行Binder通信,现在要将原先的Bpxxx相关的写入到CameraService进一步和SurfaceFlinger做后续的Binder通信处理,如queueBuffer()处理中和SurfaceFlinger的Bufferqueue的通信等。
4.故anw->dequeueBuffer的函数就和之前的从Android Bootanimation理解SurfaceFlinger的客户端建立完全对应起来,而且完全一样,只是Bootanimation进程创建的Surface交给OpenGL Es来进行底层的比如dequeue(缓存申请,填充当前的buffer)和enqueue(入列渲染)的绘图操作而已,见Android4.2.2 SurfaceFlinger之图形缓存区申请与分配dequeueBuffer一文。具体的绘图就不在这里说明了。通过该方法已经和SurfaceFlinger建立起连接,最终交由其进行显示。