本文学习tee_supplicant的相关内容。以下是杂记,仅用做学习记录。学习代码均取自github和《手机安全和可信应用开发指南》(帅峰云,黄腾,宋洋)
tee-supplicant的入口Main函数为
int main(int argc, char *argv[])
{
struct thread_arg arg = { .fd = -1 };
int e;
e = pthread_mutex_init(&arg.mutex, NULL);
if (e) {
EMSG("pthread_mutex_init: %s", strerror(e));
EMSG("terminating...");
exit(EXIT_FAILURE);
}
if (argc > 2)
return usage();
if (argc == 2) {
arg.fd = open_dev(argv[1], &arg.gen_caps); //open /dev/teepriv0并且获取gen_caps
if (arg.fd < 0) {
EMSG("failed to open \"%s\"", argv[1]);
exit(EXIT_FAILURE);
}
} else {
arg.fd = get_dev_fd(&arg.gen_caps);
if (arg.fd < 0) {
EMSG("failed to find an OP-TEE supplicant device");
exit(EXIT_FAILURE);
}
}
if (tee_supp_fs_init() != 0) { //mkdir /data/tee
EMSG("error tee_supp_fs_init");
exit(EXIT_FAILURE);
}
while (!arg.abort) {
if (!process_one_request(&arg)) //具体的处理函数
arg.abort = true;
}
close(arg.fd);
return EXIT_FAILURE;
}
static bool process_one_request(struct thread_arg *arg)
{
union tee_rpc_invoke request;
size_t num_params;
size_t num_meta;
struct tee_ioctl_param *params;
uint32_t func;
uint32_t ret;
DMSG("looping");
memset(&request, 0, sizeof(request));
request.recv.num_params = RPC_NUM_PARAMS;
/* Let it be known that we can deal with meta parameters */
params = (struct tee_ioctl_param *)(&request.send + 1);
params->attr = TEE_IOCTL_PARAM_ATTR_META;
num_waiters_inc(arg);
if (!read_request(arg->fd, &request)) //fd = open("/dev/teepriv0")
return false;
//把request得到的数据转为num_params和params
if (!find_params(&request, &func, &num_params, ¶ms, &num_meta))
return false;
if (num_meta && !num_waiters_dec(arg) && !spawn_thread(arg))
return false;
switch (func) {
case OPTEE_MSG_RPC_CMD_LOAD_TA:
ret = load_ta(num_params, params);
break;
case OPTEE_MSG_RPC_CMD_FS:
ret = tee_supp_fs_process(num_params, params);
break;
case OPTEE_MSG_RPC_CMD_RPMB:
ret = process_rpmb(num_params, params);
break;
case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
ret = process_alloc(arg, num_params, params);
break;
case OPTEE_MSG_RPC_CMD_SHM_FREE:
ret = process_free(num_params, params);
break;
case OPTEE_MSG_RPC_CMD_GPROF:
ret = gprof_process(num_params, params);
break;
case OPTEE_MSG_RPC_CMD_SOCKET:
ret = tee_socket_process(num_params, params);
break;
default:
EMSG("Cmd [0x%" PRIx32 "] not supported", func);
/* Not supported. */
ret = TEEC_ERROR_NOT_SUPPORTED;
break;
}
request.send.ret = ret;
return write_response(arg->fd, &request); //处理完request以后通过写response把结果告诉kernel,最终会调用到optee_supp_send
}
static bool read_request(int fd, union tee_rpc_invoke *request)
{
struct tee_ioctl_buf_data data;
data.buf_ptr = (uintptr_t)request;
data.buf_len = sizeof(*request);
if (ioctl(fd, TEE_IOC_SUPPL_RECV, &data)) { //从kernel获取request
EMSG("TEE_IOC_SUPPL_RECV: %s", strerror(errno));
return false;
}
return true;
}
/**
* optee_supp_send() - send result of request from supplicant
* @ctx: context sending result
* @ret: return value of request
* @num_params: number of parameters returned
* @param: returned parameters
*
* Returns 0 on success or <0 on failure.
*/
int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_param *param)
{
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req;
size_t n;
size_t num_meta;
mutex_lock(&supp->mutex);
req = supp_pop_req(supp, num_params, param, &num_meta);
mutex_unlock(&supp->mutex);
if (IS_ERR(req)) {
/* Something is wrong, let supplicant restart. */
return PTR_ERR(req);
}
/* Update out and in/out parameters */
for (n = 0; n < req->num_params; n++) {
struct tee_param *p = req->param + n;
switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
p->u.value.a = param[n + num_meta].u.value.a;
p->u.value.b = param[n + num_meta].u.value.b;
p->u.value.c = param[n + num_meta].u.value.c;
break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
p->u.memref.size = param[n + num_meta].u.memref.size;
break;
default:
break;
}
}
req->ret = ret;
/* Let the requesting thread continue */
complete(&req->c); //使得等待req执行结果的线程退出while循环并使用得到的结果
return 0;
}
下面可以转到kernel中查看对于TEE_IOC_SUPPL_RECV的处理。经过一些参数的处理后最终调用的是optee_supp_recv
/**
* optee_supp_recv() - receive request for supplicant
* @ctx: context receiving the request
* @func: requested function in supplicant
* @num_params: number of elements allocated in @param, updated with number
* used elements
* @param: space for parameters for @func
*
* Returns 0 on success or <0 on failure
*/
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param)
{
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req = NULL;
int id;
size_t num_meta;
int rc;
rc = supp_check_recv_params(*num_params, param, &num_meta);
if (rc)
return rc;
while (true) {
mutex_lock(&supp->mutex);
req = supp_pop_entry(supp, *num_params - num_meta, &id);
mutex_unlock(&supp->mutex);
if (req) {
if (IS_ERR(req))
return PTR_ERR(req);
break;
}
/*
* If we didn't get a request we'll block in
* wait_for_completion() to avoid needless spinning.
*
* This is where supplicant will be hanging most of
* the time, let's make this interruptable so we
* can easily restart supplicant if needed.
*/
if (wait_for_completion_interruptible(&supp->reqs_c)) //在有新请求入列后,这里会被唤醒然后继续执行
return -ERESTARTSYS;
}
if (num_meta) {
/*
* tee-supplicant support meta parameters -> requsts can be
* processed asynchronously.
*/
param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
TEE_IOCTL_PARAM_ATTR_META;
param->u.value.a = id;
param->u.value.b = 0;
param->u.value.c = 0;
} else {
mutex_lock(&supp->mutex);
supp->req_id = id;
mutex_unlock(&supp->mutex);
}
*func = req->func;
*num_params = req->num_params + num_meta;
memcpy(param + num_meta, req->param,
sizeof(struct tee_param) * req->num_params);
return 0;
}
static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
int num_params, int *id)
{
struct optee_supp_req *req;
if (supp->req_id != -1) {
/*
* Supplicant should not mix synchronous and asnynchronous
* requests.
*/
return ERR_PTR(-EINVAL);
}
if (list_empty(&supp->reqs)) //没有请求则直接返回
return NULL;
req = list_first_entry(&supp->reqs, struct optee_supp_req, link); //从supp->reqs链表中找出第一个做处理
if (num_params < req->num_params) {
/* Not enough room for parameters */
return ERR_PTR(-EINVAL);
}
*id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
if (*id < 0)
return ERR_PTR(-ENOMEM);
list_del(&req->link);
req->busy = true;
return req;
}
supp->reqs链表的添加是在调用optee_do_call_with_arg函数中完成的,具体流程如下
/**
* optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
* @ctx: calling context
* @parg: physical address of message to pass to secure world
*
* Does and SMC to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
*
* Returns return code from secure world, 0 is OK
*/
u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_call_waiter w;
struct optee_rpc_param param = { };
struct optee_call_ctx call_ctx = { };
u32 ret;
param.a0 = OPTEE_SMC_CALL_WITH_ARG;
reg_pair_from_64(¶m.a1, ¶m.a2, parg);
/* Initialize waiter */
optee_cq_wait_init(&optee->call_queue, &w);
while (true) {
struct arm_smccc_res res;
optee_bm_timestamp();
optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
param.a4, param.a5, param.a6, param.a7,
&res); //触发一次调用TEE侧代码
optee_bm_timestamp();
if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
/*
* Out of threads in secure world, wait for a thread
* become available.
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { //如果检查发现TEE侧是要发起RPC则调用optee_handle_rpc
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
param.a3 = res.a3;
optee_handle_rpc(ctx, ¶m, &call_ctx);
} else {
ret = res.a0;
break;
}
}
optee_rpc_finalize_call(&call_ctx);
/*
* We're done with our thread in secure world, if there's any
* thread waiters wake up one.
*/
optee_cq_wait_final(&optee->call_queue, &w);
return ret;
}
//最终会调用到optee_supp_thrd_req,添加链表的一个头
/**
* optee_supp_thrd_req() - request service from supplicant
* @ctx: context doing the request
* @func: function requested
* @num_params: number of elements in @param array
* @param: parameters for function
*
* Returns result of operation to be passed to secure world
*/
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
bool interruptable;
u32 ret;
if (!req)
return TEEC_ERROR_OUT_OF_MEMORY;
init_completion(&req->c);
req->func = func;
req->num_params = num_params;
req->param = param;
/* Insert the request in the request list */
mutex_lock(&supp->mutex);
list_add_tail(&req->link, &supp->reqs); //添加链表的一个头,增加一个RPC请求
mutex_unlock(&supp->mutex);
/* Tell an eventual waiter there's a new request */
complete(&supp->reqs_c); //此处会唤醒前面提到的wait_for_completion_interruptible
/*
* Wait for supplicant to process and return result, once we've
* returned from wait_for_completion(&req->c) successfully we have
* exclusive access again.
*/
while (wait_for_completion_interruptible(&req->c)) { //等待这个req被执行完成,在tee-supplicant中的write_response函数最终会complete(&req->c),释放这个锁
mutex_lock(&supp->mutex);
interruptable = !supp->ctx;
if (interruptable) {
/*
* There's no supplicant available and since the
* supp->mutex currently is held none can
* become available until the mutex released
* again.
*
* Interrupting an RPC to supplicant is only
* allowed as a way of slightly improving the user
* experience in case the supplicant hasn't been
* started yet. During normal operation the supplicant
* will serve all requests in a timely manner and
* interrupting then wouldn't make sense.
*/
interruptable = !req->busy;
if (!req->busy)
list_del(&req->link);
}
mutex_unlock(&supp->mutex);
if (interruptable) {
req->ret = TEEC_ERROR_COMMUNICATION;
break;
}
}
ret = req->ret;
kfree(req);
return ret;
}
下面讨论一下关于锁,队列等的问题
kernel space的optee_supp的结构体描述了tee-supplicant从kernel读取request时所对应的结构,此结构对应的全局变量只有一个,也就是tee-supplicant只有一个,并且只能串行的一个一个的处理request。
/**
* struct optee_supp - supplicant synchronization struct
* @ctx the context of current connected supplicant.
* if !NULL the supplicant device is available for use,
* else busy
* @mutex: held while accessing content of this struct
* @req_id: current request id if supplicant is doing synchronous
* communication, else -1
* @reqs: queued request not yet retrieved by supplicant
* @idr: IDR holding all requests currently being processed
* by supplicant
* @reqs_c: completion used by supplicant when waiting for a
* request to be queued.
*/
struct optee_supp {
/* Serializes access to this struct */
struct mutex mutex;
struct tee_context *ctx;
int req_id; //当前正在处理的request的id
struct list_head reqs; //等待处理的所有请求,出队列后就会被传到userspace进行处理
struct idr idr; //当前正在处理的request的IDR
struct completion reqs_c; //supplicant在循环等待时需要用到此变量,在有新的request入队列后,可以通过唤醒这个变量而退出循环等待
};
/*
* struct completion - structure used to maintain state for a "completion"
*
* This is the opaque structure used to maintain the state for a "completion".
* Completions currently use a FIFO to queue threads that have to wait for
* the "completion" event.
*
* See also: complete(), wait_for_completion() (and friends _timeout,
* _interruptible, _interruptible_timeout, and _killable), init_completion(),
* reinit_completion(), and macros DECLARE_COMPLETION(),
* DECLARE_COMPLETION_ONSTACK().
*/
struct completion {
unsigned int done;
wait_queue_head_t wait;
};
因此需要保证所有的请求都进队列然后按顺序一个一个处理。发送request进队列的函数会while等待request处理完成,request进队列以后会通知tee-supplicant结束等待开始处理request,处理完成后通过写response会kernel通知等待request处理结果的函数退出等待,使用处理后的结果