对于Binder驱动,可以一个个函数进行拆分学习,以管窥豹的方式进行理解。 以下函数分析都是基于驱动binder.c文件。
1.binder_get_ref_for_node(target_proc,binder_node),该函数用来为Binder实体节点创建一个Binder引用节点,target_proc代表binder引用所在的进程。比如client获取service的Binder引用,那么client进程就拥有该Binder引用。
static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
struct binder_node *node)
{
struct rb_node *n;
struct rb_node **p = &proc->refs_by_node.rb_node; //每个进程节点都有一个refs_by_node红黑树,用来保存属于该进程的Binder引用;
struct rb_node *parent = NULL;
struct binder_ref *ref, *new_ref;
while (*p) { //先查询是否进程节点中是否已经存在Binder实体的引用;
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
if (node < ref->node)
p = &(*p)->rb_left;
else if (node > ref->node)
p = &(*p)->rb_right;
else
return ref;
}
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
new_ref->debug_id = ++binder_last_id;
new_ref->proc = proc; //binder引用节点会保存该引用节点属于哪个进程,及保存该引用节点所对应的Binder实体节点;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
//开始计算binder引用的handle值,这个值会返回给target_proc进程,从下面的handle值计算过程可以总结出:target_proc进程拥有的binder引用的handle值是从1开始递增的;所有进程拥有handle=0的binder引用都是指向service manager;同一个service的binder实体在不同进程中的binder引用的handle值可能不一样;
new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref->desc > new_ref->desc)
break;
new_ref->desc = ref->desc + 1;
}
//将new_ref插入proc->refs_by_desc红黑树中;
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
if (new_ref->desc < ref->desc)
p = &(*p)->rb_left;
else if (new_ref->desc > ref->desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
if (node) {
hlist_add_head(&new_ref->node_entry, &node->refs);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d new ref %d desc %d for node %d\n",
proc->pid, new_ref->debug_id, new_ref->desc,
node->debug_id);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d new ref %d desc %d for dead node\n",
proc->pid, new_ref->debug_id, new_ref->desc);
}
return new_ref;
}
2.命令解释
BC_ACQUIRE:增加handle的强引用计数
在client进程第一次拿到service的Binder引用时会向Binder驱动发送该命令;
在service将自己添加到service manager中时,service manager拿到service的handle时也会向Binder驱动发送该消息;
BC_RELEASE:减少handle的强引用计数
BC_INCREFS:增加handle的弱引用计数
BC_DECREFS:减少handle的弱引用计数
3.binder_transaction()函数分析
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
#ifdef BINDER_MONITOR
struct binder_transaction_log_entry log_entry;
unsigned int log_idx = -1;
if ((reply && (tr->data_size < (proc->buffer_size/16))) || log_disable)
e = &log_entry;
else
{
e = binder_transaction_log_add(&binder_transaction_log);
if (binder_transaction_log.next)
log_idx = binder_transaction_log.next - 1;
else
log_idx = binder_transaction_log.size - 1;
}
#else
e = binder_transaction_log_add(&binder_transaction_log);
#endif
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
#ifdef BINDER_MONITOR
e->code = tr->code;
/* fd 0 is also valid... set initial value to -1 */
e->fd = -1;
do_posix_clock_monotonic_gettime(&e->timestamp);
//monotonic_to_bootbased(&e->timestamp);
do_gettimeofday(&e->tv);
/* consider time zone. translate to android time */
e->tv.tv_sec -= (sys_tz.tz_minuteswest * 60);
#endif
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
#ifdef BINDER_MONITOR
binder_cancel_bwdog(in_reply_to);
#endif
binder_set_nice(in_reply_to->saved_priority);
#ifdef RT_PRIO_INHERIT
if (rt_task(current) && (MAX_RT_PRIO != in_reply_to->saved_rt_prio) &&
!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
struct sched_param param = {
.sched_priority = in_reply_to->saved_rt_prio,
};
mt_sched_setscheduler_nocheck(current,
in_reply_to->saved_policy, ¶m);
#ifdef BINDER_MONITOR
if (log_disable & BINDER_RT_LOG_ENABLE)
{
pr_debug("reply reset %d sched_policy from %d to %d rt_prio from %d to %d\n",
proc->pid, in_reply_to->policy, in_reply_to->saved_policy,
in_reply_to->rt_prio, in_reply_to->saved_rt_prio);
}
#endif
}
#endif
if (in_reply_to->to_thread != thread) {
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread == NULL) {
#ifdef MTK_BINDER_DEBUG
binder_user_error("%d:%d got reply transaction "
"with bad transaction reply_from, "
"transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
#endif
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
#ifdef BINDER_MONITOR
e->service[0] = '\0';
#endif
} else {
} else { //client发起的transaction;
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle); //获取handle值对应的binder引用节点;
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node; //从binder引用节点中取出binder实体节点;
}