http://blog.csdn.net/wsb1321/article/details/21975951
接着上一篇:
Camera显示之app实现简单camera
mCamera.setPreviewDisplay(mSurfaceHolder);函数往下分析。
一.调用关系图:
二.1.mCamera为:android.hardware.Camera。
最终:
- public final void setPreviewDisplay(SurfaceHolder holder) throws IOException {
- if (holder != null) {
- setPreviewDisplay(holder.getSurface());
- } else {
- setPreviewDisplay((Surface)null);
- }
- }
- private native final void setPreviewDisplay(Surface surface) throws IOException;
2.调用到jni层:
- static void android_hardware_Camera_setPreviewDisplay(JNIEnv *env, jobject thiz, jobject jSurface)
- {
- ALOGV("setPreviewDisplay");
- sp<Camera> camera = get_native_camera(env, thiz, NULL);//这里是拿到一个和CameraService通信的客户端。目的是和CameraService进行通信
- if (camera == 0) return;
- sp<Surface> surface = NULL;
- if (jSurface != NULL) {
- surface = reinterpret_cast<Surface*>(env->GetIntField(jSurface, fields.surface));//将java层的Surface转化成native的Surface对象指针。
- }
- if (camera->setPreviewDisplay(surface) != NO_ERROR) {//通过CameraService的客户端最终和CameraService进行通信。
- jniThrowException(env, "java/io/IOException", "setPreviewDisplay failed");
- }
- }
- }
我们来关注下:camera->setPreviewDisplay(surface)中的camera是如何获取的?
1).jni层
- // connect to camera service
- static void android_hardware_Camera_native_setup(JNIEnv *env, jobject thiz,
- jobject weak_this, jint cameraId)//在Cameraopen的时候会调用
- {
- sp<Camera> camera = Camera::connect(cameraId); //调用Camera的静态方法连接获取的。
- if (camera == NULL) {
- jniThrowRuntimeException(env, "Fail to connect to camera service");
- return;
- }
- // make sure camera hardware is alive
- if (camera->getStatus() != NO_ERROR) {
- jniThrowRuntimeException(env, "Camera initialization failed");
- return;
- }
- jclass clazz = env->GetObjectClass(thiz);
- if (clazz == NULL) {
- jniThrowRuntimeException(env, "Can't find android/hardware/Camera");
- return;
- }
- // We use a weak reference so the Camera object can be garbage collected.
- // The reference is only used as a proxy for callbacks.
- //!++
- #ifdef MTK_CAMERA_BSP_SUPPORT
- sp<JNICameraContext> context = new MtkJNICameraContext(env, weak_this, clazz, camera);//将camera通过MtkJNICameraContext保存到这个实例, 要用的时候直接通过这个类实例获取。
- #else
- sp<JNICameraContext> context = new JNICameraContext(env, weak_this, clazz, camera);//Android原生的也是这个涉及思路。
2).Camera.cpp
继续1)中:sp<Camera> camera = Camera::connect(cameraId);
- sp<Camera> Camera::connect(int cameraId)
- {
- ALOGV("connect");
- sp<Camera> c = new Camera();
- const sp<ICameraService>& cs = getCameraService();//获取CameraService实例指针:
- if (cs != 0) {
- c->mCamera = cs->connect(c, cameraId);//调用CameraService的connect的方法。
- }
- if (c->mCamera != 0) {
- c->mCamera->asBinder()->linkToDeath(c);
- c->mStatus = NO_ERROR;
- } else {
- c.clear();
- }
- return c;
- }
3). CameraService.cpp
由于此方法代码较多, 我们只关注部分关键点:
- sp<ICamera> CameraService::connect(
- const sp<ICameraClient>& cameraClient, int cameraId) {
- #ifdef MTK_CAMERAPROFILE_SUPPORT
- initCameraProfile();
- AutoCPTLog cptlog(Event_CS_connect);
- #endif
- int callingPid = getCallingPid();
- LOG1("CameraService::connect E (pid %d, id %d)", callingPid, cameraId);
- .............................
- ............................
- int deviceVersion;
- if (mModule->common.module_api_version == CAMERA_MODULE_API_VERSION_2_0) {
- deviceVersion = info.device_version;
- } else {
- deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
- }
- switch(deviceVersion) {//从这个里面可以看到client是CameraClient
- case CAMERA_DEVICE_API_VERSION_1_0:
- client = new CameraClient(this, cameraClient, cameraId,
- info.facing, callingPid, getpid());//Android原生设计。
- break;
- case CAMERA_DEVICE_API_VERSION_2_0://这里应该是MTK自己进行了扩展。
- client = new Camera2Client(this, cameraClient, cameraId,
- info.facing, callingPid, getpid());
- break;
- default:
- ALOGE("Unknown camera device HAL version: %d", deviceVersion);
- return NULL;
- }
- .....................................................
- ....................................................
- mClient[cameraId] = client;
- LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId, getpid());
- return client;//最终返回的是client, 也即是CameraClient。
- }
通过以上的调用关系, 可以知道camera->setPreviewDisplay(surface)调用到了CameraClient中的对应的方法, 注意这里已经是两个不同的进程了(一个是app进程, 一个是CameraService所在的mediaserver进程), 在jni层通过Camera.cpp里面实现的客户端通过Binder机制连接到CameraService端, 后面的通信都是通过Binder来进行,而不是直接调用。
3.CameraService端:
继续2中的camera->setPreviewDisplay(surface):
可以知道最终会通过Binder调用到CameraClient端。
- // set the Surface that the preview will use
- status_t CameraClient::setPreviewDisplay(const sp<Surface>& surface) {
- #ifdef MTK_CAMERAPROFILE_SUPPORT
- AutoCPTLog cptlog(Event_CS_setPreviewDisplay);
- #endif
- LOG1("setPreviewDisplay(%p) (pid %d)", surface.get(), getCallingPid());
- sp<IBinder> binder(surface != 0 ? surface->asBinder() : 0);
- sp<ANativeWindow> window(surface);//将Surface么与ANativeWindow绑定。
- return setPreviewWindow(binder, window);
- }
- status_t CameraClient::setPreviewWindow(const sp<IBinder>& binder,
- const sp<ANativeWindow>& window) {
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
- // return if no change in surface.
- if (binder == mSurface) {
- return NO_ERROR;
- }
- if (window != 0) {
- result = native_window_api_connect(window.get(), NATIVE_WINDOW_API_CAMERA);
- if (result != NO_ERROR) {
- ALOGE("native_window_api_connect failed: %s (%d)", strerror(-result),
- result);
- return result;
- }
- }
- // If preview has been already started, register preview buffers now.
- if (mHardware->previewEnabled()) {
- if (window != 0) {
- native_window_set_scaling_mode(window.get(),
- NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
- native_window_set_buffers_transform(window.get(), mOrientation);
- result = mHardware->setPreviewWindow(window);
- }
- }
- //!++
- else if ( window == 0 ) {
- result = mHardware->setPreviewWindow(window);//将window设置到hal层, Android代码架构真正的实现就止于此,hal层的东西就看具体厂家根据自身情况进行实现了。
- }
- //!--
- if (result == NO_ERROR) {
- // Everything has succeeded. Disconnect the old window and remember the
- // new window.
- disconnectWindow(mPreviewWindow);
- mSurface = binder;
- mPreviewWindow = window;
- } else {
- // Something went wrong after we connected to the new window, so
- // disconnect here.
- disconnectWindow(window);
- }
- return result;
- }
三. 关键点, 这里jni层后涉及到camera所在的app进程和CameraService所在mediaserver两个不同的进程, 他们之间会通过Binder进行通信。 对于这部分, 后面会继续和大家分享。
在CameraService的下一层就是hal层了, 这部分是各个厂家根据自己芯片的特色进行设计构建的。 所以这下面的实现肯定多种多样, 但是可以从去学习代码架构, 看看一帧数据是如何一步一步的进行显示的。
通过上面我们可以看到, 对于显示部分, 我们其实只是设置了一个surface, 在中间层, 又和 ANativeWindow的绑定, 最后完全交个hal层去实现。
后面的分析, 我会以ktm平台的实现去分析。
本篇接着上一篇:
Camera显示之Framework层设置显示窗口
话说上一篇说道
else if ( window == 0 ) {
result = mHardware->setPreviewWindow(window);//将window设置到hal层, Android代码架构真正的实现就止于此,hal层的东西就看具体厂家根据自身情况进行实现了。 }
那究竟mHardware是如何和hal联系起来的的呢?
1.在CameraClient.cpp中:
- status_t CameraClient::initialize(camera_module_t *module) {
- int callingPid = getCallingPid();
- LOG1("CameraClient::initialize E (pid %d, id %d)", callingPid, mCameraId);
- char camera_device_name[10];
- status_t res;
- snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
- mHardware = new CameraHardwareInterface(camera_device_name);//<span style="color:#FF0000;">注意到此处。</span>
- res = mHardware->initialize(&module->common);//<span style="color:#FF0000;">注意此处</span>
- if (res != OK) {
- ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- mHardware.clear();
- return NO_INIT;
- }
- mHardware->setCallbacks(notifyCallback,
- dataCallback,
- dataCallbackTimestamp,
- (void *)mCameraId);
- // Enable zoom, error, focus, and metadata messages by default
- enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS |
- CAMERA_MSG_PREVIEW_METADATA | CAMERA_MSG_FOCUS_MOVE);
- //!++
- #ifdef MTK_CAMERA_BSP_SUPPORT
- // Enable MTK-extended messages by default
- enableMsgType(MTK_CAMERA_MSG_EXT_NOTIFY | MTK_CAMERA_MSG_EXT_DATA);
- #endif
- //!--
- LOG1("CameraClient::initialize X (pid %d, id %d)", callingPid, mCameraId);
- return OK;
- }
从代码片段:
- mHardware = new CameraHardwareInterface(camera_device_name);//注意到此处。
mHardware 定义是 CameraHardwareInterface, 他也是Android的通用接口。 各个厂家提供的功能都要通过CameraHardwareInterface适配向CameraService提供硬件操作接口。
这篇的主题就是主要分享CameraHardwareInterface如何进行适配的。
2. 接着1中的代码片段:
- res = mHardware->initialize(&module->common);//涉及到module,module即为CameraClient::initialize(camera_module_t *module)传进来的参数, 为一个结构体变量的指针。
- CameraClient::initialize(camera_module_t *module)调用的地方为CameraService中connect camera的时候调用:
- sp<ICamera> CameraService::connect(
- const sp<ICameraClient>& cameraClient, int cameraId) {
- #ifdef MTK_CAMERAPROFILE_SUPPORT
- initCameraProfile();
- AutoCPTLog cptlog(Event_CS_connect);
- #endif
- int callingPid = getCallingPid();
- LOG1("CameraService::connect E (pid %d, id %d)", callingPid, cameraId);
- if (!mModule) {
- ALOGE("Camera HAL module not loaded");
- ...........................
- ............................
- #endif
- if (client->initialize(mModule) != OK) {//在这里调用CameraClient的initialize, 而传入的参数为mModule。
- #ifdef MTK_CAMERAPROFILE_SUPPORT
- CPTLogStr(Event_CS_newCamHwIF, CPTFlagEnd, "new CameraHardwareInterface failed");
- #endif
- #ifdef MTK_CAMERA_BSP_SUPPORT
所以这里我们就关注下mModule这成员, mModule的定义:
- Mutex mSoundLock;
- sp<MediaPlayer> mSoundPlayer[NUM_SOUNDS];
- int mSoundRef; // reference count (release all MediaPlayer when 0)
- camera_module_t *mModule;//
为一个camera_module_t结构体变量的指针。
Camera最先被使用到的地方是在onFirstRef()函数中, 在这里主要是初始化了mModule的一些变量。 至于onFirstRef何时调用, 后续进行相关的分享, 这里大家只要记住,这个是和sp相关的, 并且在构建sp的时候就会调用。 可以参考这位的博客:http://blog.csdn.net/gzzaigcnforever/article/details/20649781
- void CameraService::onFirstRef()
- {
- BnCameraService::onFirstRef();
- if (hw_get_module(CAMERA_HARDWARE_MODULE_ID,//这个定义为"came"
- (const hw_module_t **)&mModule) < 0) {//<span style="color:#FF0000;">注意这个函数调用</span>
- ALOGE("Could not load camera HAL module");
- mNumberOfCameras = 0;
- }
- else {
- mNumberOfCameras = mModule->get_number_of_cameras();
- if (mNumberOfCameras > MAX_CAMERAS) {
- ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).",
- mNumberOfCameras, MAX_CAMERAS);
- mNumberOfCameras = MAX_CAMERAS;
- }
- for (int i = 0; i < mNumberOfCameras; i++) {
- setCameraFree(i);
- }
- }
- }
- /** Base path of the hal modules */
- #define HAL_LIBRARY_PATH1 "/system/lib/hw"
- #define HAL_LIBRARY_PATH2 "/vendor/lib/hw"
- #define HAL_LIBRARY_PATH3 "/system/lib"
- int hw_get_module(const char *id, const struct hw_module_t **module)
- {
- return hw_get_module_by_class(id, NULL, module);
- }
- int hw_get_module_by_class(const char *class_id, const char *inst,
- const struct hw_module_t **module)
- {
- int status;
- int i;
- const struct hw_module_t *hmi = NULL;
- char prop[PATH_MAX];
- char path[PATH_MAX];
- char name[PATH_MAX];
- if (inst)
- snprintf(name, PATH_MAX, "%s.%s", class_id, inst);//class_id为camera, inst为null, 所以现在name=“camera”
- else
- strlcpy(name, class_id, PATH_MAX);
- /*
- * Here we rely on the fact that calling dlopen multiple times on
- * the same .so will simply increment a refcount (and not load
- * a new copy of the library).
- * We also assume that dlopen() is thread-safe.
- */
- /* Loop through the configuration variants looking for a module */
- for (i=0 ; i<HAL_VARIANT_KEYS_COUNT+1 ; i++) {
- if (i < HAL_VARIANT_KEYS_COUNT) {
- if (property_get(variant_keys[i], prop, NULL) == 0) {
- continue;
- }
- snprintf(path, sizeof(path), "%s/%s.%s.so",
- HAL_LIBRARY_PATH2, name, prop);//path=/vendor/lib/hw/camera.**.so, 根据属性的配置值生成文件名。
- if (access(path, R_OK) == 0) break;//判断是否有读文件权限。
- snprintf(path, sizeof(path), "%s/%s.%s.so",//path=/system/lib/hw/camera.**.so
- HAL_LIBRARY_PATH1, name, prop);
- if (access(path, R_OK) == 0) break;
- snprintf(path, sizeof(path), "%s/%s.%s.so",
- HAL_LIBRARY_PATH3, name, prop);//path=/system/lib/camera.**.so
- if (access(path, R_OK) == 0) break;
- } else {
- snprintf(path, sizeof(path), "%s/%s.default.so",
- HAL_LIBRARY_PATH1, name);//path=/vendor/lib/hw/camera.default.so
- if (access(path, R_OK) == 0) break;
- snprintf(path, sizeof(path), "%s/%s.default.so",//path=/system/lib/camera.default.so
- HAL_LIBRARY_PATH3, name);
- if (access(path, R_OK) == 0) break;
- }
- }
- status = -ENOENT;
- if (i < HAL_VARIANT_KEYS_COUNT+1) {
- /* load the module, if this fails, we're doomed, and we should not try
- * to load a different variant. */
- status = load(class_id, path, module);//动态加载动态库。
- }
- return status;
- }
上面的思路就是:
遍历
- #define HAL_LIBRARY_PATH1 "/system/lib/hw"
- #define HAL_LIBRARY_PATH2 "/vendor/lib/hw"
- #define HAL_LIBRARY_PATH3 "/system/lib"
继续看看:load(class_id, path, module);:
- static int load(const char *id,
- const char *path,
- const struct hw_module_t **pHmi)
- {
- int status;
- void *handle;
- struct hw_module_t *hmi;
- /*
- * load the symbols resolving undefined symbols before
- * dlopen returns. Since RTLD_GLOBAL is not or'd in with
- * RTLD_NOW the external symbols will not be global
- */
- handle = dlopen(path, RTLD_NOW);
- if (handle == NULL) {
- char const *err_str = dlerror();
- ALOGE("load: module=%s\n%s", path, err_str?err_str:"unknown");
- status = -EINVAL;
- goto done;
- }
- /* Get the address of the struct hal_module_info. */
- const char *sym = HAL_MODULE_INFO_SYM_AS_STR;
- hmi = (struct hw_module_t *)dlsym(handle, sym);//关注这两句
- if (hmi == NULL) {
- ALOGE("load: couldn't find symbol %s", sym);
- status = -EINVAL;
- goto done;
- }
- /* Check that the id matches */
- if (strcmp(id, hmi->id) != 0) {
- ALOGE("load: id=%s != hmi->id=%s", id, hmi->id);
- status = -EINVAL;
- goto done;
- }
- hmi->dso = handle;
- /* success */
- status = 0;
- done:
- if (status != 0) {
- hmi = NULL;
- if (handle != NULL) {
- dlclose(handle);
- handle = NULL;
- }
- } else {
- ALOGV("loaded HAL id=%s path=%s hmi=%p handle=%p",
- id, path, *pHmi, handle);
- }
- *pHmi = hmi;
- return status;
- }
关注这两句:
- const char *sym = HAL_MODULE_INFO_SYM_AS_STR;
- hmi = (struct hw_module_t *)dlsym(handle, sym);//关注这两句
- HAL_MODULE_INFO_SYM_AS_STR:
- /**
- * Name of the hal_module_info
- */
- #define HAL_MODULE_INFO_SYM HMI
- /**
- * Name of the hal_module_info as a string
- */
- #define HAL_MODULE_INFO_SYM_AS_STR "HMI"
- static
- camera_module
- instantiate_camera_module()
- {
- CAM_LOGD("[%s]", __FUNCTION__);
- //
- // (1) Prepare One-shot init.
- MtkCamUtils::Property::clear();
- // (2)
- camera_module module = {
- common: {
- tag: HARDWARE_MODULE_TAG,
- module_api_version: 1,
- hal_api_version: 0,
- id: CAMERA_HARDWARE_MODULE_ID,
- name: "MTK Camera Module",
- author: "MTK",
- methods: CamDeviceManager::get_module_methods(),
- dso: NULL,
- reserved: {0},
- },
- get_number_of_cameras: CamDeviceManager::get_number_of_cameras,
- get_camera_info: CamDeviceManager::get_camera_info,
- };
- return module;
- }
- /*******************************************************************************
- * Implementation of camera_module
- *******************************************************************************/
- camera_module HAL_MODULE_INFO_SYM = instantiate_camera_module();
上面这个代码片段就是mtk实现的, 结合上边, 可以得到*pHmi指向了module这个结构体。 也即是说最后将*pHmi指向这里的module。 进一步回到上面, 就是CameraService中的mModule指向了这里的module。所以说后面的引用大概是CameraService中通过mModule->common->methods这种方式去或则mModule->get_number_of_cameras实现到MTK的hal层的调用。 这样就将Android原生CameraService通过CameraHardwareInterface连接到MTK实现的Hal层, 通过CamDeviceManager来承上启下的作用。
3.继续2关注到CamDeviceManager::get_module_methods()这个函数:
- hw_module_methods_t*
- CamDeviceManager::
- get_module_methods()
- {
- static
- hw_module_methods_t
- _methods =
- {
- open: CamDeviceManager::open_device
- };
- return &_methods;
- }
呵呵, 可以看到通过mModule->common->methods-->open可以引用到CamDeviceManager::open_device。
通过名字可以猜测到这个方法应该是在Camera启动的时候会去调用。
所以我们看看何时调用CamDeviceManager::open_device这个方法:
回到CameraService:CameraClient:
- status_t CameraClient::initialize(camera_module_t *module) {
- int callingPid = getCallingPid();
- LOG1("CameraClient::initialize E (pid %d, id %d)", callingPid, mCameraId);
- char camera_device_name[10];
- status_t res;
- snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
- mHardware = new CameraHardwareInterface(camera_device_name);
- res = mHardware->initialize(&module->common);//这里初始化了, 并且传入的module->common
回到CameraHardwareInterface:
- status_t initialize(hw_module_t *module)
- {
- ALOGI("Opening camera %s", mName.string());
- int rc = module->methods->open(module, mName.string(),
- (hw_device_t **)&mDevice);//这里进行了打开camera的操作, 这里调用到的已经是MTK hal层的方法了, 注意最后一个参数。
- if (rc != OK) {
- ALOGE("Could not open camera %s: %d", mName.string(), rc);
- return rc;
- }
- initHalPreviewWindow();
- return rc;
- }
关注到CamDeviceManager::open_device
- int
- CamDeviceManager::
- open_device(const hw_module_t* module, const char* name, hw_device_t** device)
- {
- return CamDeviceManager::getInstance().openDevice(module, name, device);
- }
- int
- CamDeviceManager::
- openDevice(const hw_module_t* module, const char* name, hw_device_t** device)
- {
- int err = OK;
- //
- ICamDevice* pdev = NULL;
- int32_t i4OpenId = 0;
- //
- Mutex::Autolock lock(mMtxOpenLock);
- //
- MY_LOGI("+ mi4OpenNum(%d), mi4DeviceNum(%d)", mi4OpenNum, mi4DeviceNum);
- if (name != NULL)
- {
- i4OpenId = ::atoi(name);
- //
- if ( DevMetaInfo::queryNumberOfDevice() < i4OpenId )
- {
- err = -EINVAL;
- goto lbExit;
- }
- //
- if ( MAX_SIMUL_CAMERAS_SUPPORTED <= mi4OpenNum )
- {
- MY_LOGW("open number(%d) >= maximum number(%d)", mi4OpenNum, MAX_SIMUL_CAMERAS_SUPPORTED);
- MY_LOGE("does not support multi-open");
- err = -ENOMEM;
- goto lbExit;
- }
- //
- pdev = createIDevice(
- i4OpenId,
- *get_hw_device(),
- module
- );//注意此处, 进行camDevice的创建
- //
- if ( ! pdev )
- {
- MY_LOGE("camera device allocation fail: pdev(0)");
- err = -ENOMEM;
- goto lbExit;
- }
- *device = pdev->get_hw_device();//此处将CamDevice的指针付给传进来形参, 最终是CameraHardwareInterface中的mDevice指向了CamDevice。
- //
- mi4OpenNum++;
- }
- lbExit:
- if ( OK != err )
- {
- if ( pdev )
- {
- destroyDevice(pdev);
- pdev = NULL;
- }
- //
- *device = NULL;
- }
- MY_LOGI("- mi4OpenNum(%d)", mi4OpenNum);
- return err;
- }
4.继续往下关注到
- pdev = createIDevice(
- i4OpenId,
- *get_hw_device(),
- module
- );
- static
- ICamDevice*
- createIDevice(
- int32_t const i4DevOpenId,
- hw_device_t const& hwdevice,
- hw_module_t const*const hwmodule
- )
- {
- g_s8ClientAppMode = queryClientAppMode();
- //
- MY_LOGI("+ tid:%d OpenID:%d ClientAppMode:%s", ::gettid(), i4DevOpenId, g_s8ClientAppMode.string());
- //
- ICamDevice* pdev = NSCamDevice::createDevice(g_s8ClientAppMode, i4DevOpenId);//pDeve 指向的就是ICamDevice的一个对象
- //
- if ( pdev != 0 )
- {
- pdev->incStrong(pdev);
- //
- hw_device_t* hwdev = pdev->get_hw_device();//
- *hwdev = hwdevice;
- hwdev->module = const_cast<hw_module_t*>(hwmodule);
- //
- if ( ! pdev->init() )//在这里初始化了ICamDvice
- {
- MY_LOGE("fail to initialize a newly-created instance");
- pdev->uninit();
- pdev = NULL;
- }
- }
- //
- MY_LOGI("- created instance=%p", &(*pdev));
- return pdev;//返回创建的ICamDevice。
- }
现在可以得出pdev即是指向ICamDevice对象
注意到ICamDevice对象的构造函数:
- ICamDevice::
- ICamDevice()
- : camera_device_t()
- , RefBase()
- , mDevOps()
- //
- , mMtxLock()
- //
- {
- MY_LOGD("ctor");
- ::memset(static_cast<camera_device_t*>(this), 0, sizeof(camera_device_t));
- this->priv = this;
- this->ops = &mDevOps;//ops指向了mDevOps
- mDevOps = gCameraDevOps;//mDevOps为gCameraDevOps指向的结构体
- }
- gCameraDevOps:
- static camera_device_ops_t const gCameraDevOps = {
- set_preview_window: camera_set_preview_window,
- set_callbacks: camera_set_callbacks,
- enable_msg_type: camera_enable_msg_type,
- disable_msg_type: camera_disable_msg_type,
- msg_type_enabled: camera_msg_type_enabled,
- start_preview: camera_start_preview,
- stop_preview: camera_stop_preview,
- preview_enabled: camera_preview_enabled,
- store_meta_data_in_buffers: camera_store_meta_data_in_buffers,
- start_recording: camera_start_recording,
- stop_recording: camera_stop_recording,
- recording_enabled: camera_recording_enabled,
- release_recording_frame: camera_release_recording_frame,
- auto_focus: camera_auto_focus,
- cancel_auto_focus: camera_cancel_auto_focus,
- take_picture: camera_take_picture,
- cancel_picture: camera_cancel_picture,
- set_parameters: camera_set_parameters,
- get_parameters: camera_get_parameters,
- put_parameters: camera_put_parameters,
- send_command: camera_send_command,
- release: camera_release,
- dump: camera_dump,
- };
所以在CameraHardwareInterface中通过:
mDevice->ops->set_preview_window(mDevice, 0)类似的方法就可以调用到ICamDevice中对应的方法了。
5. 我们回到Camera显示相关的东西,
在CameraClient中//!++
else if ( window == 0 ) {
result = mHardware->setPreviewWindow(window);
}
进而在CameraHardwareInterface中:
- /** Set the ANativeWindow to which preview frames are sent */
- status_t setPreviewWindow(const sp<ANativeWindow>& buf)
- {
- ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());
- if (mDevice->ops->set_preview_window) {
- //!++
- if ( buf == 0 ) {
- ALOGD("set_preview_window(0) before mPreviewWindow = 0");
- mDevice->ops->set_preview_window(mDevice, 0);//直接调用了ICamDevice的相关的方法。
- mPreviewWindow = 0;
- return OK;
- }
- //!--
- mPreviewWindow = buf;
- mHalPreviewWindow.user = this;
- ALOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p", __FUNCTION__,
- &mHalPreviewWindow, mHalPreviewWindow.user);
- return mDevice->ops->set_preview_window(mDevice,
- buf.get() ? &mHalPreviewWindow.nw : 0);
- }
- return INVALID_OPERATION;
- }
5.说到这里, CameraService::CameraClient--->CameraHardwareInterface-->CamDeviceManager-->ICamDevice这一条完整的路线非常清楚。
具体思路就是:
a.CameraHardwareInterface是Android原生定义的和硬件hal层连接的适配接口。各个厂家根据需要去具体实现这些接口,并具体实现底层的相关功能。
b.为了代码通用性和模块的分离性, 对hal层模块的实现封装成动态库(so), CameraService根据需要动态加载hal层的库。
c.CamDeviceManager是Hal层的一个入口类, 从CameraService打开关闭camera的时候都是通过它进行总的安排。
d.hal层下具体的实现都是不断的适配CameraHardwareInterface向上提供的接口的一个过程。
附上以一个打开Camera的流程图供参考:
接着上一篇:
Camera显示之Hal层的适配(一)
一.基本关系
1.先来看看KTM hal层大概类图关系:
大概类图关系就是这样, 其中和显示相关的类图关系如红线所圈区域。
可以猜测到 与显示相关的逻辑处理应该都会在DisplayClient这个类去实现。
以后app下达有关预览显示相关的东西啊在hal层基本上都是这一条先进行传递命令, 不过总1中我们可以看到CamDevice还有一些衍生类, 这些都是mtk为不同设备做的一些定制, 主要的路径还是如上图所示。
二.接着之前的在CameraClient中的代码:
- //!++
- else if ( window == 0 ) {
- result = mHardware->setPreviewWindow(window);
- }
1.setPreviewWindow(window)通过CameraHardwareInterface适配:
- mDevice->ops->set_preview_window(mDevice,
- buf.get() ? &mHalPreviewWindow.nw : 0);
来实现向hal层下达命令和设置参数。
在这里我们发现传入的是mHalPreviewWindow.nw, 而不是我们之前所讲述的ANativeWindow 这是因为mHalPreviewWindow.nw将ANativeWindow的一些流的操作进行封装, 使之操作更加简便。
mHalPreviewWindow.nw的定义:
- struct camera_preview_window {
- struct preview_stream_ops nw;
- void *user;
- };
就是结构体:struct :
- typedef struct preview_stream_ops {
- int (*dequeue_buffer)(struct preview_stream_ops* w,
- buffer_handle_t** buffer, int *stride);
- int (*enqueue_buffer)(struct preview_stream_ops* w,
- buffer_handle_t* buffer);
- int (*cancel_buffer)(struct preview_stream_ops* w,
- buffer_handle_t* buffer);
- int (*set_buffer_count)(struct preview_stream_ops* w, int count);
- int (*set_buffers_geometry)(struct preview_stream_ops* pw,
- int w, int h, int format);
- int (*set_crop)(struct preview_stream_ops *w,
- int left, int top, int right, int bottom);
- int (*set_usage)(struct preview_stream_ops* w, int usage);
- int (*set_swap_interval)(struct preview_stream_ops *w, int interval);
- int (*get_min_undequeued_buffer_count)(const struct preview_stream_ops *w,
- int *count);
- int (*lock_buffer)(struct preview_stream_ops* w,
- buffer_handle_t* buffer);
- // Timestamps are measured in nanoseconds, and must be comparable
- // and monotonically increasing between two frames in the same
- // preview stream. They do not need to be comparable between
- // consecutive or parallel preview streams, cameras, or app runs.
- int (*set_timestamp)(struct preview_stream_ops *w, int64_t timestamp);
对显示流的操作都是通过这些函数实现的,而mHalPreviewWindow中实现了具体操的方法, 在这些方法的实现中实现对作ANativeWindow的操作。 而在hal端就是通过mHalPreviewWindow.nw 进行对ANativeWindow的具体操作。
基本类图关系:
2.继续1中的:
- mDevice->ops->set_preview_window(mDevice,
- buf.get() ? &mHalPreviewWindow.nw : 0);
我已经知道了mHalPreviewWindow.nw为传入的一个重要参数mHalPreviewWindow.nw 为preview_stream_ops。
继续看看set_preview_window这个方法。 我们有上篇文章知道ops是ICamDevice的一个成员gCameraDevOps,类型为camera_device_ops_t:
可以看到:
- static camera_device_ops_t const gCameraDevOps = {
- set_preview_window: camera_set_preview_window,
- set_callbacks: camera_set_callbacks,
- enable_msg_type: camera_enable_msg_type,
- disable_msg_type: camera_disable_msg_type,
- msg_type_enabled: camera_msg_type_enabled,
- start_preview: camera_start_preview,
- stop_preview: camera_stop_preview,
- preview_enabled: camera_preview_enabled,
- store_meta_data_in_buffers: camera_store_meta_data_in_buffers,
- start_recording: camera_start_recording,
- stop_recording: camera_stop_recording,
- recording_enabled: camera_recording_enabled,
- release_recording_frame: camera_release_recording_frame,
- auto_focus: camera_auto_focus,
- cancel_auto_focus: camera_cancel_auto_focus,
- take_picture: camera_take_picture,
- cancel_picture: camera_cancel_picture,
- set_parameters: camera_set_parameters,
- get_parameters: camera_get_parameters,
- put_parameters: camera_put_parameters,
- send_command: camera_send_command,
- release: camera_release,
- dump: camera_dump,
- };
gCameraDevOps 中的函数地址映射到ICamDevice中的函数实现。
所以 :ops->set_preview_window(mDevice, buf.get() ? &mHalPreviewWindow.nw : 0) 就对应到ICamDevice::camera_set_preview_window的发发调用。
- static int camera_set_preview_window(
- struct camera_device * device,
- struct preview_stream_ops *window
- )
- {
- int err = -EINVAL;
- //
- ICamDevice*const pDev = ICamDevice::getIDev(device);
- if ( pDev )
- {
- err = pDev->setPreviewWindow(window);
- }
- //
- return err;
- }
- static inline ICamDevice* getIDev(camera_device*const device)
- {
- return (NULL == device)
- ? NULL
- : reinterpret_cast<ICamDevice*>(device->priv);//得到device->priv
由上篇文章:
知道device->pri实际上是在创建实例的时候指向的自己:
- ICamDevice::
- ICamDevice()
- : camera_device_t()
- , RefBase()
- , mDevOps()
- //
- , mMtxLock()
- //
- {
- MY_LOGD("ctor");
- ::memset(static_cast<camera_device_t*>(this), 0, sizeof(camera_device_t));
- this->priv = this; //用priv指针保存自己。
- this->ops = &mDevOps;//ops指向了mDevOps
- mDevOps = gCameraDevOps;//mDevOps为gCameraDevOps指向的结构体
- }
继续回到pDev->setPreviewWindow(window);
在ICamDevice中没有对setPreviewWindow具体的实现,而是在其子类CamDevice对ICamDevice进行了具体的实现;
随意代码定位到CamDevice:
- status_t
- CamDevice::
- setPreviewWindow(preview_stream_ops* window)
- {
- MY_LOGI("+ window(%p)", window);
- //
- status_t status = initDisplayClient(window);//开始初始化DisplayClient
- if ( OK == status && previewEnabled() && mpDisplayClient != 0 )
- {
- status = enableDisplayClient();//时能DisplayClient端
- }
- //
- return status;
- }
- status_t
- CamDevice::
- initDisplayClient(preview_stream_ops* window)
- {
- #if '1'!=MTKCAM_HAVE_DISPLAY_CLIENT
- #warning "Not Build Display Client"
- MY_LOGD("Not Build Display Client");
- ..............
- .............
- / [3.1] create a Display Client.
- mpDisplayClient = IDisplayClient::createInstance();
- if ( mpDisplayClient == 0 )
- {
- MY_LOGE("Cannot create mpDisplayClient");
- status = NO_MEMORY;
- goto lbExit;
- }
- // [3.2] initialize the newly-created Display Client.
- if ( ! mpDisplayClient->init() )
- {
- MY_LOGE("mpDisplayClient init() failed");
- mpDisplayClient->uninit();
- mpDisplayClient.clear();
- status = NO_MEMORY;
- goto lbExit;
- }
- // [3.3] set preview_stream_ops & related window info.
- if ( ! mpDisplayClient->setWindow(window, previewSize.width, previewSize.height, queryDisplayBufCount()) )//绑定window
- {
- status = INVALID_OPERATION;
- goto lbExit;
- }
- // [3.4] set Image Buffer Provider Client if it exist.
- if ( mpCamAdapter != 0 && ! mpDisplayClient->setImgBufProviderClient(mpCamAdapter) )//重要! 设置流数据的Buffer提供者。
- {
- status = INVALID_OPERATION;
- goto lbExit;
- }
- ..................
- ..................
- status_t
- CamDevice::
- enableDisplayClient()
- {
- status_t status = OK;
- Size previewSize;
- //
- // [1] Get preview size.
- if ( ! queryPreviewSize(previewSize.width, previewSize.height) )
- {
- MY_LOGE("queryPreviewSize");
- status = DEAD_OBJECT;
- goto lbExit;
- }
- //
- // [2] Enable
- if ( ! mpDisplayClient->enableDisplay(previewSize.width, previewSize.height, queryDisplayBufCount(), mpCamAdapter) )//设置了预览数据的尺寸和Buffer提供者相关的数据
- {
- MY_LOGE("mpDisplayClient(%p)->enableDisplay()", mpDisplayClient.get());
- status = INVALID_OPERATION;
- goto lbExit;
- }
- //
- status = OK;
- lbExit:
- return status;
- }
3.定位到DisplayClient中:
- enableDisplay(
- int32_t const i4Width,
- int32_t const i4Height,
- int32_t const i4BufCount,
- sp<IImgBufProviderClient>const& rpClient
- )
- {
- bool ret = false;
- preview_stream_ops* pStreamOps = mpStreamOps;
- //
- // [1] Re-configurate this instance if any setting changes.
- if ( ! checkConfig(i4Width, i4Height, i4BufCount, rpClient) )
- {
- MY_LOGW("<Config Change> Uninit the current DisplayClient(%p) and re-config...", this);
- //
- // [.1] uninitialize
- uninit();
- //
- // [.2] initialize
- if ( ! init() )
- {
- MY_LOGE("re-init() failed");
- goto lbExit;
- }
- //
- // [.3] set related window info.
- if ( ! setWindow(pStreamOps, i4Width, i4Height, i4BufCount) )//window的尺寸和预览数据的大小一致
- {
- goto lbExit;
- }
- //
- // [.4] set Image Buffer Provider Client.
- if ( ! setImgBufProviderClient(rpClient) )//Buffer的数据提供者为mpCamAdapter, 就是CamAdapter, 后面的预览数据元都是通过它来提供。
- {
- goto lbExit;
- }
- }
- //
- // [2] Enable.
- if ( ! enableDisplay() )//开始进行数据的获取和显示
- {
- goto lbExit;
- }
- //
- ret = true;
- lbExit:
- return ret;
- }
先来看看第一个关键函数:setWindow(pStreamOps, i4Width, i4Height, i4BufCount)
- bool
- DisplayClient::
- setWindow(
- preview_stream_ops*const window,
- int32_t const wndWidth,
- int32_t const wndHeight,
- int32_t const i4MaxImgBufCount
- )
- {
- MY_LOGI("+ window(%p), WxH=%dx%d, count(%d)", window, wndWidth, wndHeight, i4MaxImgBufCount);
- //
- if ( ! window )
- {
- MY_LOGE("NULL window passed into");
- return false;
- }
- //
- if ( 0 >= wndWidth || 0 >= wndHeight || 0 >= i4MaxImgBufCount )
- {
- MY_LOGE("bad arguments - WxH=%dx%d, count(%d)", wndWidth, wndHeight, i4MaxImgBufCount);
- return false;
- }
- //
- //
- Mutex::Autolock _l(mModuleMtx);
- return set_preview_stream_ops(window, wndWidth, wndHeight, i4MaxImgBufCount);//
- }
- ool
- DisplayClient::
- set_preview_stream_ops(
- preview_stream_ops*const window,
- int32_t const wndWidth,
- int32_t const wndHeight,
- int32_t const i4MaxImgBufCount
- )
- {
- CamProfile profile(__FUNCTION__, "DisplayClient");
- //
- bool ret = false;
- status_t err = 0;
- int32_t min_undequeued_buf_count = 0;
- //
- // (2) Check
- if ( ! mStreamBufList.empty() )
- {
- MY_LOGE(
- "locked buffer count(%d)!=0, "
- "callers must return all dequeued buffers, "
- // "and then call cleanupQueue()"
- , mStreamBufList.size()
- );
- dumpDebug(mStreamBufList, __FUNCTION__);
- goto lbExit;
- }
- //
- // (3) Sava info.
- mpStreamImgInfo.clear();//mpStreamImgInfo封装的视屏数据流的基本信息。
- mpStreamImgInfo = new ImgInfo(wndWidth, wndHeight, CAMERA_DISPLAY_FORMAT, CAMERA_DISPLAY_FORMAT_HAL, "Camera@Display");//设置了Stream的宽高和显示类型。
- mpStreamOps = window;//mpStreamOps保存了上层传进来的对象指针。后面就通过它和显示方进行交互。
- mi4MaxImgBufCount = i4MaxImgBufCount;
- ........................
- ........................
- err = mpStreamOps->set_buffer_count(mpStreamOps, mi4MaxImgBufCount+min_undequeued_buf_count);
- if ( err )
- {
- MY_LOGE("set_buffer_count failed: status[%s(%d)]", ::strerror(-err), -err);
- if ( ENODEV == err )
- {
- MY_LOGD("Preview surface abandoned!");
- mpStreamOps = NULL;
- }
- goto lbExit;
- }
- //
- // (4.4) Set window geometry
- err = mpStreamOps->set_buffers_geometry(//设置基本的流信息
- mpStreamOps,
- mpStreamImgInfo->mu4ImgWidth,
- mpStreamImgInfo->mu4ImgHeight,
- mpStreamImgInfo->mi4ImgFormat
- );
通过 上面的代码片段和分析, 确定了上层传递下来的对象指针保存在mpStreamOps, 与显示相关的交互都将通过mpStreamOps来进行操作。 而mpStreamImgInfo封装了流数据的大小和格式等。
再来看看第二个关键函数:setImgBufProviderClient(rpClient):
- bool
- DisplayClient::
- setImgBufProviderClient(sp<IImgBufProviderClient>const& rpClient)
- {
- bool ret = false;
- //
- MY_LOGD("+ ImgBufProviderClient(%p), mpImgBufQueue.get(%p)", rpClient.get(), mpImgBufQueue.get());
- //
- if ( rpClient == 0 )
- {
- MY_LOGE("NULL ImgBufProviderClient");
- mpImgBufPvdrClient = NULL;
- goto lbExit;
- }
- //
- if ( mpImgBufQueue != 0 )
- {
- if ( ! rpClient->onImgBufProviderCreated(mpImgBufQueue) )//通知Provider端(Buffer数据提供者端),我这边已经建好Buffer队列, 后面你就填充数据到对应的Buffer供我使用。
- {
- goto lbExit;
- }
- mpImgBufPvdrClient = rpClient;//用mpImgBufPvdrClient保存provider的对象指针, 方便使用。
- }
- //
- ret = true;
- lbExit:
- MY_LOGD("-");
- return ret;
- };
再来看看第三个关键函数 enableDisplay() :
- bool
- DisplayClient::
- enableDisplay()
- {
- bool ret = false;
- //
- // (1) Lock
- Mutex::Autolock _l(mModuleMtx);
- //
- MY_LOGD("+ isDisplayEnabled(%d), mpDisplayThread.get(%p)", isDisplayEnabled(), mpDisplayThread.get());
- //
- // (2) Check to see if it has been enabled.
- if ( isDisplayEnabled() )
- {
- MY_LOGD("Display is already enabled");
- ret = true;
- goto lbExit;
- }
- //
- // (3) Check to see if thread is alive.
- if ( mpDisplayThread == 0 )
- {
- MY_LOGE("NULL mpDisplayThread");
- goto lbExit;
- }
- //
- // (4) Enable the flag.
- ::android_atomic_write(1, &mIsDisplayEnabled);
- //
- // (5) Post a command to wake up the thread.
- mpDisplayThread->postCommand(Command(Command::eID_WAKEUP));//通知获取数据的线程开始运行
- //
- //
- ret = true;
- lbExit:
- MY_LOGD("- ret(%d)", ret);
- return ret;
- }
- bool
- DisplayThread::
- threadLoop()
- {
- Command cmd;
- if ( getCommand(cmd) )
- {
- switch (cmd.eId)
- {
- case Command::eID_EXIT:
- MY_LOGD("Command::%s", cmd.name());
- break;
- //
- case Command::eID_WAKEUP://对应上面发送的命令
- default:
- if ( mpThreadHandler != 0 )
- {
- mpThreadHandler->onThreadLoop(cmd);//注意此处, mpThreadHandler就是DisplayClient(它继承了IDisplayThreadHandler),
- }
- else
- {
- MY_LOGE("cannot handle cmd(%s) due to mpThreadHandler==NULL", cmd.name());
- }
- break;
- }
- }
- //
- MY_LOGD("- mpThreadHandler.get(%p)", mpThreadHandler.get());
- return true;
- }
- bool
- DisplayClient::
- onThreadLoop(Command const& rCmd)
- {
- // (0) lock Processor.
- sp<IImgBufQueue> pImgBufQueue;
- {
- Mutex::Autolock _l(mModuleMtx);
- pImgBufQueue = mpImgBufQueue;
- if ( pImgBufQueue == 0 || ! isDisplayEnabled() )//判断显示相关的初始化是否完成和启动
- {
- MY_LOGW("pImgBufQueue.get(%p), isDisplayEnabled(%d)", pImgBufQueue.get(), isDisplayEnabled());
- return true;
- }
- }
- // (1) Prepare all TODO buffers.
- if ( ! prepareAllTodoBuffers(pImgBufQueue) )//为pImgBufQueue添加空Buffer。
- {
- return true;
- }
- // (2) Start
- if ( ! pImgBufQueue->startProcessor() )//开始获取数据
- {
- return true;
- }
- //
- {
- Mutex::Autolock _l(mStateMutex);
- mState = eState_Loop;
- mStateCond.broadcast();
- }
- //
- // (3) Do until disabled.
- while ( 1 )//进入无限循环
- {
- // (.1)
- waitAndHandleReturnBuffers(pImgBufQueue);//等待pImgBufQueue中的数据,并送到显示端显示
- // (.2) break if disabled.
- if ( ! isDisplayEnabled() )
- {
- MY_LOGI("Display disabled");
- break;
- }
- // (.3) re-prepare all TODO buffers, if possible,
- // since some DONE/CANCEL buffers return.
- prepareAllTodoBuffers(pImgBufQueue);//又重新准备Buffer。
- }
- //
- // (4) Stop
- pImgBufQueue->pauseProcessor();
- pImgBufQueue->flushProcessor();
- pImgBufQueue->stopProcessor();//停止数据获取
- //
- // (5) Cancel all un-returned buffers.
- cancelAllUnreturnBuffers();//没有来得及显示额数据, 也取消掉。
- //
- {
- Mutex::Autolock _l(mStateMutex);
- mState = eState_Suspend;
- mStateCond.broadcast();
- }
- //
- return true;
- }
上边这个代码片段对预览数据的处理就在waitAndHandleReturnBuffers(pImgBufQueue);中。
4.对waitAndHandleReturnBuffers(pImgBufQueue);进行分析:
- bool
- DisplayClient::
- waitAndHandleReturnBuffers(sp<IImgBufQueue>const& rpBufQueue)
- {
- bool ret = false;
- Vector<ImgBufQueNode> vQueNode;
- //
- MY_LOGD_IF((1<=miLogLevel), "+");
- //
- // (1) deque buffers from processor.
- rpBufQueue->dequeProcessor(vQueNode);//从provider端(数据提供端)获取一个填充数据了的Buffer。
- if ( vQueNode.empty() ) {
- MY_LOGW("vQueNode.empty()");
- goto lbExit;
- }
- // (2) handle buffers dequed from processor.
- ret = handleReturnBuffers(vQueNode);//处理填充了数据的这个Buffer中的数据。
- lbExit:
- //
- MY_LOGD_IF((2<=miLogLevel), "- ret(%d)", ret);
- return ret;
- }
看看handleReturnBuffers函数:
- bool
- DisplayClient::
- handleReturnBuffers(Vector<ImgBufQueNode>const& rvQueNode)
- {
- /*
- * Notes:
- * For 30 fps, we just enque (display) the latest frame,
- * and cancel the others.
- * For frame rate > 30 fps, we should judge the timestamp here or source.
- */
- // (1) determine the latest DONE buffer index to display; otherwise CANCEL.
- int32_t idxToDisp = 0;
- for ( idxToDisp = rvQueNode.size()-1; idxToDisp >= 0; idxToDisp--)
- {
- if ( rvQueNode[idxToDisp].isDONE() )
- break;
- }
- if ( rvQueNode.size() > 1 )
- {
- MY_LOGW("(%d) display frame count > 1 --> select %d to display", rvQueNode.size(), idxToDisp);
- }
- //
- // Show Time duration.
- if ( 0 <= idxToDisp )
- {
- nsecs_t const _timestamp1 = rvQueNode[idxToDisp].getImgBuf()->getTimestamp();
- mProfile_buffer_timestamp.pulse(_timestamp1);
- nsecs_t const _msDuration_buffer_timestamp = ::ns2ms(mProfile_buffer_timestamp.getDuration());
- mProfile_buffer_timestamp.reset(_timestamp1);
- //
- mProfile_dequeProcessor.pulse();
- nsecs_t const _msDuration_dequeProcessor = ::ns2ms(mProfile_dequeProcessor.getDuration());
- mProfile_dequeProcessor.reset();
- //
- MY_LOGD_IF(
- (1<=miLogLevel), "+ %s(%lld) %s(%lld)",
- (_msDuration_buffer_timestamp < 0 ) ? "time inversion!" : "", _msDuration_buffer_timestamp,
- (_msDuration_dequeProcessor > 34) ? "34ms < Duration" : "", _msDuration_dequeProcessor
- );
- }
- //
- // (2) Lock
- Mutex::Autolock _l(mModuleMtx);
- //
- // (3) Remove from List and enquePrvOps/cancelPrvOps, one by one.
- int32_t const queSize = rvQueNode.size();
- for (int32_t i = 0; i < queSize; i++)
- {
- sp<IImgBuf>const& rpQueImgBuf = rvQueNode[i].getImgBuf(); // ImgBuf in Queue.
- sp<StreamImgBuf>const pStreamImgBuf = *mStreamBufList.begin(); // ImgBuf in List.
- // (.1) Check valid pointers to image buffers in Queue & List
- if ( rpQueImgBuf == 0 || pStreamImgBuf == 0 )
- {
- MY_LOGW("Bad ImgBuf:(Que[%d], List.begin)=(%p, %p)", i, rpQueImgBuf.get(), pStreamImgBuf.get());
- continue;
- }
- // (.2) Check the equality of image buffers between Queue & List.
- if ( rpQueImgBuf->getVirAddr() != pStreamImgBuf->getVirAddr() )
- {
- MY_LOGW("Bad address in ImgBuf:(Que[%d], List.begin)=(%p, %p)", i, rpQueImgBuf->getVirAddr(), pStreamImgBuf->getVirAddr());
- continue;
- }
- // (.3) Every check is ok. Now remove the node from the list.
- mStreamBufList.erase(mStreamBufList.begin());//经过检查返回的这一帧数据的Buffer是DisplayClient端分配和提供的。
- //
- // (.4) enquePrvOps/cancelPrvOps
- if ( i == idxToDisp ) {
- MY_LOGD_IF(
- (1<=miLogLevel),
- "Show frame:%d %d [ion:%d %p/%d %lld]",
- i, rvQueNode[i].getStatus(), pStreamImgBuf->getIonFd(),
- pStreamImgBuf->getVirAddr(), pStreamImgBuf->getBufSize(), pStreamImgBuf->getTimestamp()
- );
- //
- if(mpExtImgProc != NULL)
- {
- if(mpExtImgProc->getImgMask() & ExtImgProc::BufType_Display)
- {
- IExtImgProc::ImgInfo img;
- //
- img.bufType = ExtImgProc::BufType_Display;
- img.format = pStreamImgBuf->getImgFormat();
- img.width = pStreamImgBuf->getImgWidth();
- img.height = pStreamImgBuf->getImgHeight();
- img.stride[0] = pStreamImgBuf->getImgWidthStride(0);
- img.stride[1] = pStreamImgBuf->getImgWidthStride(1);
- img.stride[2] = pStreamImgBuf->getImgWidthStride(2);
- img.virtAddr = (MUINT32)(pStreamImgBuf->getVirAddr());
- img.bufSize = pStreamImgBuf->getBufSize();
- //
- mpExtImgProc->doImgProc(img);
- }
- }
- //
- enquePrvOps(pStreamImgBuf);//送入显示端显示
- }
- else {
- MY_LOGW(
- "Drop frame:%d %d [ion:%d %p/%d %lld]",
- i, rvQueNode[i].getStatus(), pStreamImgBuf->getIonFd(),
- pStreamImgBuf->getVirAddr(), pStreamImgBuf->getBufSize(), pStreamImgBuf->getTimestamp()
- );
- cancelPrvOps(pStreamImgBuf);
- }
- }
- //
- MY_LOGD_IF((1<=miLogLevel), "-");
- return true;
- }
- void
- DisplayClient::
- enquePrvOps(sp<StreamImgBuf>const& rpImgBuf)
- {
- mProfile_enquePrvOps.pulse();
- if ( mProfile_enquePrvOps.getDuration() >= ::s2ns(2) ) {
- mProfile_enquePrvOps.updateFps();
- mProfile_enquePrvOps.showFps();
- mProfile_enquePrvOps.reset();
- }
- //
- status_t err = 0;
- //
- CamProfile profile(__FUNCTION__, "DisplayClient");
- profile.print_overtime(
- ((1<=miLogLevel) ? 0 : 1000),
- "+ locked buffer count(%d), rpImgBuf(%p,%p), Timestamp(%lld)",
- mStreamBufList.size(), rpImgBuf.get(), rpImgBuf->getVirAddr(), rpImgBuf->getTimestamp()
- );
- //
- // [1] unlock buffer before sending to display
- GraphicBufferMapper::get().unlock(rpImgBuf->getBufHndl());
- profile.print_overtime(1, "GraphicBufferMapper::unlock");
- //
- // [2] Dump image if wanted.
- dumpImgBuf_If(rpImgBuf);
- //
- // [3] set timestamp.
- err = mpStreamOps->set_timestamp(mpStreamOps, rpImgBuf->getTimestamp());
- profile.print_overtime(2, "mpStreamOps->set_timestamp, Timestamp(%lld)", rpImgBuf->getTimestamp());
- if ( err )
- {
- MY_LOGE(
- "mpStreamOps->set_timestamp failed: status[%s(%d)], rpImgBuf(%p), Timestamp(%lld)",
- ::strerror(-err), -err, rpImgBuf.get(), rpImgBuf->getTimestamp()
- );
- }
- //
- // [4] set gralloc buffer type & dirty
- ::gralloc_extra_setBufParameter(
- rpImgBuf->getBufHndl(),
- GRALLOC_EXTRA_MASK_TYPE | GRALLOC_EXTRA_MASK_DIRTY,
- GRALLOC_EXTRA_BIT_TYPE_CAMERA | GRALLOC_EXTRA_BIT_DIRTY
- );
- //
- // [5] unlocks and post the buffer to display.
- err = mpStreamOps->enqueue_buffer(mpStreamOps, rpImgBuf->getBufHndlPtr());//注意这里可以看到最终是通过mpStreamOps送入送给显示端显示的。
- profile.print_overtime(10, "mpStreamOps->enqueue_buffer, Timestamp(%lld)", rpImgBuf->getTimestamp());
- if ( err )
- {
- MY_LOGE(
- "mpStreamOps->enqueue_buffer failed: status[%s(%d)], rpImgBuf(%p,%p)",
- ::strerror(-err), -err, rpImgBuf.get(), rpImgBuf->getVirAddr()
- );
- }
- }
从上面的代码片段, 可以看到从显示数据最终是通过mpStreamOps(CameraHardwareInterface中传下来的的mHalPreviewWindow.nw)来进行处理的。
至此预览数据就算完全交给了ANativeWindow进行显示。
但是预览数据究竟是怎样从Provider端来的, 我们也提到在DisplayClient也会去分配一些buffer, 这些Buffer又是如何管理的。 后续会接着分享。