本文基于Android_9.0、kernel_3.18源码
上文Binder(五)服务注册流程-发送注册请求提到,AMS通过addService会将数据发送给binder驱动,然后创建binder_transaction,并唤醒servicemananger进程进行处理。在Binder(三)servicemanager初始化我们知道,由于没有待处理的事务,servermanager进程会进入中断状态。
请求处理
1、binder_thread_read
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block){
...
// 阻塞等待事务发生
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
...
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo)) {
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
// 取出待处理的任务
w = list_first_entry(&proc->todo, struct binder_work,entry);
} else {}
...
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
...
}
// 目标节点,即servicemanager
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
// binder实体在用户空间的地址,servicemanager是0
tr.target.ptr = target_node->ptr;
...
cmd = BR_TRANSACTION;
} else {}
// code = ADD_SERVICE_TRANSACTION
tr.code = t->code;
...
// 数据大小
tr.data_size = t->buffer->data_size;
// 数据偏移
tr.offsets_size = t->buffer->offsets_size;
// 用户空间数据起始位置
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
// 将cmd写入用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
// 将binder_transaction_data拷贝到用户空间
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
...
// 移除已经处理的事务
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
// 交给当前线程
t->to_thread = thread;
thread->transaction_stack = t;
} else {}
break;
}
...
return 0;
}
servicemanager被唤醒后,会进入while循环,BINDER_WORK_TRANSACTION会取出binder_transaction,然后将binder_transaction的数据赋值到binder_transaction_data中,再将指令和binder_transaction_data拷贝到用户空间。
关于t->buffer->data的赋值:
由于mmap将内核空间和用户空间地址都映射到同一个物理地址;而t->buffer->data表示内核虚拟地址,加上proc->user_buffer_offset用户进程地址的偏移,便可以得到在用户空间的地址。
2、binder_parse
servicemanager中,binder_thread_read调用完毕,会返回到binder_ioctl_write_read中,再返回到ioctl中,然后返回到binder_loop中;
void binder_loop(struct binder_state *bs, binder_handler func){
...
for (;;) {
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
...
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
...
}
}
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func){
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
// 读取指令
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
...
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
...
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
// 初始化reply
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
// 处理消息
res = func(bs, txn, &msg, &reply);
if (txn->flags & TF_ONE_WAY) {
binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
// 反馈消息
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
ptr += sizeof(*txn);
break;
}
...
}
}
return r;
}
在binder_loop中,会通过binder_parse进行消息处理,这里的func是svcmgr_handler,在looper中传递进来的。在binder_parse中:
首先, 调用bio_init初始化reply;
然后, 调用svcmgr_handler处理消息;
最后, 调用binder_send_reply返回处理结果。
3、svcmgr_handler
int FIRST_CALL_TRANSACTION = 0x00000001;
int ADD_SERVICE_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+2;
enum {
/* Must match definitions in IBinder.h and IServiceManager.h */
PING_TRANSACTION = B_PACK_CHARS('_','P','N','G'),
SVC_MGR_GET_SERVICE = 1,
SVC_MGR_CHECK_SERVICE,
SVC_MGR_ADD_SERVICE,
SVC_MGR_LIST_SERVICES,
};
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply){
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
uint32_t dumpsys_priority;
...
switch(txn->code) {
...
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
dumpsys_priority = bio_get_uint32(msg);
if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,
txn->sender_pid))
return -1;
break;
...
}
bio_put_uint32(reply, 0);
return 0;
}
代码会进到SVC_MGR_ADD_SERVICE分支:
首先, 通过bio_get_string16获取服务名称;
然后, 通过bio_get_ref获取handle;
最后, 通过do_add_service来添加服务。
4、do_add_service
int do_add_service(struct binder_state *bs, const uint16_t *s, size_t len, uint32_t handle,
uid_t uid, int allow_isolated, uint32_t dumpsys_priority, pid_t spid) {
struct svcinfo *si;
// 检查handle
if (!handle || (len == 0) || (len > 127))
return -1;
// 检测是否有权限注册
if (!svc_can_register(s, len, spid, uid)) {
...
return -1;
}
// 已经注册过,复写handle
si = find_svc(s, len);
if (si) {
...
si->handle = handle;
} else {
// 未注册过,生成新的svcinfo
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
...
si->next = svclist;
svclist = si;
}
...
return 0;
}
首先, 检查handle是否有效,再通过svc_can_register检查权限;
然后, 判断是否已经注册过,如果注册过则更新handle;否则生成新的svcinfo。
5、binder_send_reply
结束svcmgr_handler,会返回binder_parse,在binder_parse执行binder_send_reply向binder驱动发送数据。
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status){
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
...
binder_write(bs, &data, sizeof(data));
}
int binder_write(struct binder_state *bs, void *data, size_t len){
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
return res;
}
binder_send_reply返回两个指令:BC_FREE_BUFFER(数据读取完毕,释放缓冲)、BC_REPLY(回复数据,其实里边没有内容);然后通过binder_write发送,它会调用ioctl,由于read_size = 0,只触发写的操作,进入binder_thread_write中。
6、binder_transaction
在binder_thread_write会处理BC_FREE_BUFFER命令释放内存,然后再次处理BC_REPLY命令,执行到binder_transaction:
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply){
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
...
if (reply) {
// 在AMS数据发送的时候,设置t->from = thread;thread为AMS的线程
// 在servicemananger数据接受的时候,thread->transaction_stack = t
in_reply_to = thread->transaction_stack;
...
thread->transaction_stack = in_reply_to->to_parent;
// 获取到目标线程是AMS的线程
target_thread = in_reply_to->from;
...
// 获取到目标进程是AMS的进程
target_proc = target_thread->proc;
}
// 设置目标的待处理列表
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {}
e->to_proc = target_proc->pid;
// 创建事务
t = kzalloc(sizeof(*t), GFP_KERNEL);
...
// 创建待处理任务
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
...
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
// 设置binder_transaction
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
...
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
// 在上文中并未对target_node赋值,因此target_node = null
t->buffer->target_node = target_node;
...
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
}
...
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
// 唤醒目标进程
if (target_wait)
wake_up_interruptible(target_wait);
return;
...
}
这里执行的逻辑与Binder(五)服务注册流程-发送注册请求差不多,不同的是会进入reply中,找到目标进程和线程。并且执行binder_pop_transaction将in_reply_to从AMS的事务栈中删除;之后唤醒AMS进程。
7、BINDER_WORK_TRANSACTION_COMPLETE处理
servicemanager进程处理完上述逻辑后,会按如下调用链返回:binder_thread_write -> binder_ioctl_write_read -> binder_ioctl -> binder_write -> binder_send_reply -> binder_parse -> binder_loop;之后binder_loop会进入下一次循环:
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block){
...
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
...
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
...
}
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func){
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
...
switch(cmd) {
case BR_NOOP:
break;
case BR_TRANSACTION_COMPLETE:
break;
...
}
}
return r;
}
首先, 通过ioctl读取数据,进入到binder_thread_read中,由于thread->todo不为空,会将BINDER_WORK_TRANSACTION_COMPLETE取出来进行处理;
然后, 在case分支中回写BR_TRANSACTION_COMPLETE指令,通过调用链返回到binder_loop中;
最后, 在binder_loop中交由binder_parse处理,在binder_parse对于BR_TRANSACTION_COMPLETE没有进行任何处理。
处理完BINDER_WORK_TRANSACTION_COMPLETE,servicemanager进程会再次进入binder_loop,通过ioctl的read进行阻塞,等待下一次client的请求。
请求返回
由上文我们知道,servicemanager进程会在处理完返回内容后,唤醒AMS进程。在Binder(五)服务注册流程-发送注册请求中我们也知道AMS会通过wait_event_interruptible_exclusive阻塞等待servicemanager的反馈。
1、binder_thread_read
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block){
...
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
...
while (1) {
...
w = list_first_entry(&thread->todo, struct binder_work,entry);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
...
}
// target_node = null
if (t->buffer->target_node) {
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
//
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
}
return 0;
}
在本文的开头,请求处理的时候已经对binder_thread_read进行了分析,它会将binder_transaction的数据赋值到binder_transaction_data中,再将指令和binder_transaction_data拷贝到用户空间;不同的是,这里的指令是BR_REPLY。
2、waitForResponse
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult){
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
...
// 读取指令
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
...
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
...
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
}
} else {
}
}
goto finish;
...
}
}
finish:
...
return err;
}
通过binder_thread_read读取到指令后,会通过ioctl->talkwithDriver->waitForResponse返回;在waitForResponse中,处理BR_REPLY,然后goto finish退出循环。
至此,整个服务注册流程完毕。