Service Manager被唤醒之后,就进入while循环开始处理事务了。这里wait_for_proc_work等于1,并且proc->todo不为空,所以从proc->todo列表中得到第一个工作项:
w = list_first_entry(&proc->todo, struct binder_work, entry);
从上面的描述中,我们知道,这个工作项的类型为BINDER_WORK_TRANSACTION,于是通过下面语句得到事务项:
t = container_of(w, struct binder_transaction,work);
接着就是把事务项t中的数据拷贝到本地局部变量struct binder_transaction_data tr中去了:
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
......
cmd = BR_TRANSACTION;
}else{
......
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,current->nsproxy->pid_ns);
}else{
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
这里有一个非常重要的地方,是Binder进程间通信机制的精髓所在:
tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
t->buffer->data所指向的地址是内核空间的,现在要把数据返回给Service Manager进程的用户空间,而Service Manager进程的用户空间是不能访问内核空间的数据的,所以这里要作一下处理。怎么处理呢?我们在学面向对象语言的时候,对象的拷贝有深拷贝和浅拷贝之分,深拷贝是把另外分配一块新内存,然后把原始对象的内容搬过去,浅拷贝是并没有为新对象分配一块新空间,而只是分配一个引用,而个引用指向原始对象。Binder机制用的是类似浅拷贝的方法,通过在用户空间分配一个虚拟地址,然后让这个用户空间虚拟地址与 t->buffer->data这个内核空间虚拟地址指向同一个物理地址,这样就可以实现浅拷贝了。怎么样用户空间和内核空间的虚拟地址同时指向同一个物理地址呢?请参考前面一篇文章浅谈Service Manager成为Android进程间通信(IPC)机制Binder守护进程之路,那里有详细描述。这里只要将t->buffer->data加上一个偏移值proc->user_buffer_offset就可以得到t->buffer->data对应的用户空间虚拟地址了。调整了tr.data.ptr.buffer的值之后,不要忘记也要一起调整tr.data.ptr.offsets的值。
接着就是把tr的内容拷贝到用户传进来的缓冲区去了,指针ptr指向这个用户缓冲区的地址:
if (put_user(cmd, (uint32_t __user *)ptr))
return-EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return-EFAULT;
ptr += sizeof(tr);
这里可以看出,这里只是对作tr.data.ptr.bufferr和tr.data.ptr.offsets的内容作了浅拷贝。
最后,由于已经处理了这个事务,要把它从todo列表中删除:
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
}else{
t->buffer->transaction=NULL;
kfree(t);
binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
}
注意,这里的cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)为true,表明这个事务虽然在驱动程序中已经处理完了,但是它仍然要等待Service Manager完成之后,给驱动程序一个确认,也就是需要等待回复,于是把当前事务t放在thread->transaction_stack队列的头部:
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
如果cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)为false,那就不需要等待回复了,直接把事务t删掉。
这个while最后通过一个break跳了出来,最后返回到binder_ioctl函数中:
staticlong binder_ioctl(struct file *filp, unsignedintcmd, unsigned long arg)
{
intret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsignedintsize= _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
......
switch (cmd) {
caseBINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size!= sizeof(struct binder_write_read)) {
ret = -EINVAL;
gotoerr;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
gotoerr;
}
......
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
gotoerr;
}
}
......
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
gotoerr;
}
break;
}
......
default:
ret = -EINVAL;
gotoerr;
}
ret = 0;
err:
......
returnret;
}
从binder_thread_read返回来后,再看看proc->todo是否还有事务等待处理,如果是,就把睡眠在proc->wait队列的线程唤醒来处理。最后,把本地变量struct binder_write_read bwr的内容拷贝回到用户传进来的缓冲区中,就返回了。
这里就是返回到frameworks/base/cmds/servicemanager/binder.c文件中的binder_loop函数了:
void binder_loop(struct binder_state *bs, binder_handler func)
{
intres;
struct binder_write_read bwr;
unsigned readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(unsigned));
for(;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res
LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
if (res == 0) {
LOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res
LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
返回来的数据都放在readbuf中,接着调用binder_parse进行解析:
intbinder_parse(struct binder_state *bs, struct binder_io *bio,
uint32_t *ptr, uint32_tsize, binder_handler func)
{
intr = 1;
uint32_t *end= ptr + (size/ 4);
while (ptr
uint32_t cmd = *ptr++;
......
caseBR_TRANSACTION: {
struct binder_txn *txn = (void *) ptr;
if ((end- ptr) * sizeof(uint32_t)
LOGE("parse: txn too small!\n");
return-1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
intres;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
......
default:
LOGE("parse: OOPS %d\n", cmd);
return-1;
}
}
returnr;
}
首先把从Binder驱动程序读出来的数据转换为一个struct binder_txn结构体,保存在txn本地变量中,struct binder_txn定义在frameworks/base/cmds/servicemanager/binder.h文件中:
struct binder_txn
{
void *target;
void *cookie;
uint32_t code;
uint32_t flags;
uint32_t sender_pid;
uint32_t sender_euid;
uint32_t data_size;
uint32_t offs_size;
void *data;
void *offs;
};
函数中还用到了另外一个数据结构struct binder_io,也是定义在frameworks/base/cmds/servicemanager/binder.h文件中:
struct binder_io
{
char*data; /* pointertoread/writefrom*/
uint32_t *offs; /* arrayofoffsets */
uint32_t data_avail; /* bytes availableindata buffer */
uint32_t offs_avail; /* entries availableinoffsets array */
char*data0; /* startofdata buffer */
uint32_t *offs0; /* startofoffsets buffer */
uint32_t flags;
uint32_t unused;
};
接着往下看,函数调bio_init来初始化reply变量:
void bio_init(struct binder_io *bio, void *data,
uint32_t maxdata, uint32_t maxoffs)
{
uint32_t n = maxoffs * sizeof(uint32_t);
if (n > maxdata) {
bio->flags = BIO_F_OVERFLOW;
bio->data_avail = 0;
bio->offs_avail = 0;
return;
}
bio->data = bio->data0 = data + n;
bio->offs = bio->offs0 = data;
bio->data_avail = maxdata - n;
bio->offs_avail = maxoffs;
bio->flags = 0;
}
接着又调用bio_init_from_txn来初始化msg变量:
void bio_init_from_txn(struct binder_io *bio, struct binder_txn *txn)
{
bio->data = bio->data0 = txn->data;
bio->offs = bio->offs0 = txn->offs;
bio->data_avail = txn->data_size;
bio->offs_avail = txn->offs_size / 4;
bio->flags = BIO_F_SHARED;
}
最后,真正进行处理的函数是从参数中传进来的函数指针func,这里就是定义在frameworks/base/cmds/servicemanager/service_manager.c文件中的svcmgr_handler函数:
intsvcmgr_handler(struct binder_state *bs,
struct binder_txn *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
uint32_t strict_policy;
if (txn->target != svcmgr_handle)
return-1;
// EquivalenttoParcel::enforceInterface(), reading the RPC
// headerwiththe strict mode policy maskandthe interfacename.
// Note that weignorethe strict_policyanddon't propagate it
// further (since we donooutbound RPCs anyway).
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s));
return-1;
}
switch(txn->code) {
......
caseSVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
if (do_add_service(bs, s, len, ptr, txn->sender_euid))
return-1;
break;
......
}
bio_put_uint32(reply, 0);
return0;
}
回忆一下,在BpServiceManager::addService时,传给Binder驱动程序的参数为:
writeInt32(IPCThreadState::self()->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
writeString16("android.os.IServiceManager");
writeString16("media.player");
writeStrongBinder(new MediaPlayerService());
这里的语句:
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
就是依次把它们读取出来了,这里,我们只要看一下bio_get_ref的实现。先看一个数据结构struct binder_obj的定义:
struct binder_object
{
uint32_t type;
uint32_t flags;
void *pointer;
void *cookie;
};
这个结构体其实就是对应struct flat_binder_obj的。
接着看bio_get_ref实现:
void *bio_get_ref(struct binder_io *bio)
{
struct binder_object *obj;
obj = _bio_get_obj(bio);
if (!obj)
return0;
if (obj->type == BINDER_TYPE_HANDLE)
returnobj->pointer;
return0;
}
bio_get_obj这个函数就不跟进去看了,它的作用就是从binder_io中取得第一个还没取获取过的binder_object。在这个场景下,就是我们最开始传过来代表MediaPlayerService的flat_binder_obj了,这个原始的flat_binder_obj的type为BINDER_TYPE_BINDER,binder为指向MediaPlayerService的弱引用的地址。在前面我们说过,在Binder驱动驱动程序里面,会把这个flat_binder_obj的type改为BINDER_TYPE_HANDLE,handle改为一个句柄值。这里的handle值就等于obj->pointer的值。
回到svcmgr_handler函数,调用do_add_service进一步处理:
intdo_add_service(struct binder_state *bs,
uint16_t *s, unsigned len,
void *ptr, unsigned uid)
{
struct svcinfo *si;
// LOGI("add_service('%s',%p) uid=%d\n", str8(s), ptr, uid);
if (!ptr || (len == 0) || (len > 127))
return-1;
if (!svc_can_register(uid, s)) {
LOGE("add_service('%s',%p) uid=%d - PERMISSION DENIED\n",
str8(s), ptr, uid);
return-1;
}
si = find_svc(s, len);
if (si) {
if (si->ptr) {
LOGE("add_service('%s',%p) uid=%d - ALREADY REGISTERED\n",
str8(s), ptr, uid);
return-1;
}
si->ptr = ptr;
}else{
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
LOGE("add_service('%s',%p) uid=%d - OUT OF MEMORY\n",
str8(s), ptr, uid);
return-1;
}
si->ptr = ptr;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] ='\0';
si->death.func = svcinfo_death;
si->death.ptr = si;
si->next= svclist;
svclist = si;
}
binder_acquire(bs, ptr);
binder_link_to_death(bs, ptr, &si->death);
return0;
}