我今天才正式学习android原生开发,因为我上来创建新项目中默认使用的就是Jetpack Compose框架的。于是我就用Jetpack Compose开发我的应用了。但是在做人脸识别时就遇到了很多问题。
我在用Compose框架开发相机相关应用时,发现设备无法拍照,而在模拟器中是正常可以拍照的。
处理这个问题时,因为我是周末加班做的,设备厂商没有上班,就导致我这两天也没有解决掉。直到厂家技术人员上班我才得到回复:
因为860是usb摄像头,跟Android标准Camera接口(正常是适用于手机这种的mipi摄像头)融合得不全面
骁龙相机不要用,检测摄像头之类的采用整机测试的主、副摄像头测试
App人脸识别也只会用到Camera的预览,不会用到拍照
预览图像用于人脸识别,哪怕你要保存现场照片,也是直接预览数据保存,不用拍照
知道原因后,厂家给我的demo是用传统的写法,而且依赖的各种库版本相对旧,我一时半会儿无法应用到项目之中,又耗时一天才找到解决办法。
直到我找到了这个开源项目:https://github.com/YanneckReiss/JetpackComposeMLKitTutorial
核心代码
我只列出我核心修改的代码,其他代码都是来源于以上项目中的。
TextRecognitionAnalyzer.kt
package de.yanneckreiss.mlkittutorial.ui.camera
import android.graphics.Bitmap
import android.media.Image
import android.util.Base64
import androidx.annotation.OptIn
import androidx.camera.core.ExperimentalGetImage
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageProxy
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.SupervisorJob
import kotlinx.coroutines.delay
import kotlinx.coroutines.launch
import okhttp3.MediaType.Companion.toMediaTypeOrNull
import okhttp3.OkHttpClient
import okhttp3.Request
import okhttp3.RequestBody
import org.json.JSONObject
import java.io.ByteArrayOutputStream
import java.nio.ByteBuffer
class TextRecognitionAnalyzer(
private val onDetectedTextUpdated: (String) -> Unit
) : ImageAnalysis.Analyzer {
companion object {
const val THROTTLE_TIMEOUT_MS = 5_000L
const val API_KEY = "xxxxxxxx"
const val SECRET_KEY = "xxxxxxxxxxxx"
val HTTP_CLIENT: OkHttpClient = OkHttpClient.Builder().build()
}
private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO + SupervisorJob())
@OptIn(ExperimentalGetImage::class)
override fun analyze(imageProxy: ImageProxy) {
scope.launch {
val bitmap:Bitmap = imageProxy.toBitmap()
val byteArrayOutputStream = ByteArrayOutputStream()
bitmap.compress(Bitmap.CompressFormat.PNG, 100, byteArrayOutputStream)
val byteArray = byteArrayOutputStream.toByteArray();
val base64String = Base64.encodeToString(byteArray, Base64.DEFAULT)
val accessToken = getAccessToken()
val response = requestFaceRecognition(base64String, accessToken)
println(response)
delay(THROTTLE_TIMEOUT_MS)
}.invokeOnCompletion { exception ->
exception?.printStackTrace()
imageProxy.close()
}
}
private fun getAccessToken(): String {
val mediaType = "application/x-www-form-urlencoded".toMediaTypeOrNull()
val body = RequestBody.create(mediaType, "grant_type=client_credentials&client_id=$API_KEY&client_secret=$SECRET_KEY")
val request = Request.Builder()
.url("https://aip.baidubce.com/oauth/2.0/token")
.post(body)
.addHeader("Content-Type", "application/x-www-form-urlencoded")
.build()
val response = HTTP_CLIENT.newCall(request).execute()
val jsonObject = JSONObject(response.body?.string() ?: "")
return jsonObject.getString("access_token")
}
private fun requestFaceRecognition(base64Image: String, accessToken: String): String {
val mediaType = "application/json".toMediaTypeOrNull()
val json = JSONObject().apply {
put("group_id_list", "pay")
put("image", base64Image)
put("image_type", "BASE64")
}
val body = RequestBody.create(mediaType, json.toString())
val request = Request.Builder()
.url("https://aip.baidubce.com/rest/2.0/face/v3/search?access_token=$accessToken")
.post(body)
.addHeader("Content-Type", "application/json")
.build()
val response = HTTP_CLIENT.newCall(request).execute()
return response.body?.string() ?: ""
}
}