android binder简单理解一

binder,负责进程(service和client)之间的通信。进程之间当然不能直接通信啦,但是所有进程都可以打开同一个设备,而且所打开的设备的空间对所有进程来说都是一样的,这就时binder的最基本原理。剩下的就时要设计一个协议,让client能够准确的找到service,并把数据传给service。android是这样做的:先建立一个servicemanger,然后所有的service(对servicemanager来说是client)和servicemanager通信向servicemanager注册自己(即写入名称和对应的taskid之类),然后client通过名称向servicemanager获取

service的相关信息。

第一章: 开始

frameworks/native/cmds/servicemanager

android的servicemanager,管理各种service?


//service_manager.c
int main(int argc, char **argv)
273 {
274     struct binder_state *bs;
275     void *svcmgr = BINDER_SERVICE_MANAGER;
276 
        /*
           打开binder  bs->fd = open("/dev/binder", O_RDWR);
           并用mmap获取  128*1024大小的buffer
        */
277     bs = binder_open(128*1024);
278 
        /* 把打开的binder(fd)设置为ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);*/

279     if (binder_become_context_manager(bs)) {  //第一次调用ioctl
主要的工作是初始化了 binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
 280         ALOGE("cannot become context manager (%s)\n", strerror(errno));
281         return -1;
282     }
283 
284     svcmgr_handle = svcmgr;
285     binder_loop(bs, svcmgr_handler);
286     return 0;
}

现在一步一步来看看他们做了些什么

:打开设备

open("/dev/binder", O_RDWR);
在驱动层到底做了些什么:

在驱动drivers/staging/android/binder.c中

2873 static int binder_open(struct inode *nodp, struct file *filp)
2874 {
2875     struct binder_proc *proc;
2876 
2877     binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2878              current->group_leader->pid, current->pid);
2879 
2880     proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2881     if (proc == NULL)
2882         return -ENOMEM;
2883     get_task_struct(current);
2884     proc->tsk = current;
2885     INIT_LIST_HEAD(&proc->todo);
2886     init_waitqueue_head(&proc->wait);
2887     proc->default_priority = task_nice(current);
2888     mutex_lock(&binder_lock);
2889     binder_stats_created(BINDER_STAT_PROC);//binder_stats.obj_created[type]++; 这个有什么用?binder_stats是个全局变量
2890     hlist_add_head(&proc->proc_node, &binder_procs);
2891     proc->pid = current->group_leader->pid;
2892     INIT_LIST_HEAD(&proc->delivered_death);
2893     filp->private_data = proc;
2894     mutex_unlock(&binder_lock);
2895 
2896     if (binder_debugfs_dir_entry_proc) {
2897         char strbuf[11];
2898         snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2899         proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2900             binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
2901     }
2902 
2903     return 0;
2904 }

这个函数主要是malloc了一个 struct binder_proc,获取当前线程号。binder_proc有点复杂,现在还不知道具体是什么意思,看来要看后面的才能理解:


struct binder_proc {
 283     struct hlist_node proc_node;
 284     struct rb_root threads;
 285     struct rb_root nodes;
 286     struct rb_root refs_by_desc;
 287     struct rb_root refs_by_node;
 288     int pid;
 289     struct vm_area_struct *vma;
 290     struct task_struct *tsk;
 291     struct files_struct *files;
 292     struct hlist_node deferred_work_node;
 293     int deferred_work;
 294     void *buffer;
 295     ptrdiff_t user_buffer_offset;
297     struct list_head buffers;
 298     struct rb_root free_buffers;
 299     struct rb_root allocated_buffers;
 300     size_t free_async_space;
 301 
 302     struct page **pages;
 303     size_t buffer_size;
 304     uint32_t buffer_free;
 305     struct list_head todo;
 306     wait_queue_head_t wait;
 307     struct binder_stats stats;
 308     struct list_head delivered_death;
 309     int max_threads;
 310     int requested_threads;
 311     int requested_threads_started;
 312     int ready_threads;
 313     long default_priority;
 314     struct dentry *debugfs_entry;
 315 };

那就接着看,回到servicemanager:

binder_loop(bs, svcmgr_handler);
这个进入loop:

357 void binder_loop(struct binder_state *bs, binder_handler func)
358 {
359     int res;
360     struct binder_write_read bwr;
361     unsigned readbuf[32];
362 
363     bwr.write_size = 0;
364     bwr.write_consumed = 0;
365     bwr.write_buffer = 0;
366 
367     readbuf[0] = BC_ENTER_LOOPER;
368     binder_write(bs, readbuf, sizeof(unsigned));
369 
370     for (;;) {
371         bwr.read_size = sizeof(readbuf);
372         bwr.read_consumed = 0;
373         bwr.read_buffer = (unsigned) readbuf;
374 
375         res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
376 
377         if (res < 0) {
378             ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
379             break;
380         }
381 
382         res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
383         if (res == 0) {
384             ALOGE("binder_loop: unexpected reply?!\n");
385             break;
386         }
387         if (res < 0) {
388             ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
389             break;
390         }
391     }
392 }
首先先驱动写入数据,数据的结构形式是struct binder_write_read,

142 int binder_write(struct binder_state *bs, void *data, unsigned len)
143 {
144     struct binder_write_read bwr;
145     int res;
146     bwr.write_size = len;
147     bwr.write_consumed = 0;
148     bwr.write_buffer = (unsigned) data;
149     bwr.read_size = 0;
150     bwr.read_consumed = 0;
151     bwr.read_buffer = 0;
152     res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
153     if (res < 0) {
154         fprintf(stderr,"binder_write: ioctl failed (%s)\n",
155                 strerror(errno));
156     }
157     return res;
158 }
其中只有一个关键的数据readbuf[0] = BC_ENTER_LOOPER;,调用ioctl BINDER_WRITE_READ

好,现在重头戏,驱动中BINDER_WRITE_READ到底做了些什么?


static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){


......

struct binder_thread *thread;

thread = binder_get_thread(proc);

2518 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2519 {
2520     struct binder_thread *thread = NULL;
2521     struct rb_node *parent = NULL;
2522     struct rb_node **p = &proc->threads.rb_node;
2523 
2524     while (*p) { //第一次调用ioctl时p为null,因为之前open时也没有对这个做什么操作。这时时第二次调用了进循环,但什么时候parent会有多于
//两个节点的情况?servicemanager不知道是否有两个以上的线程,但是例如mediaservice之类的是可能会有的
2525         parent = *p;
2526         thread = rb_entry(parent, struct binder_thread, rb_node);
2527 
2528         if (current->pid < thread->pid)
2529             p = &(*p)->rb_left;
2530         else if (current->pid > thread->pid)
2531             p = &(*p)->rb_right;
2532         else
2533             break;
2534     }
2535     if (*p == NULL) {
2536         thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2537         if (thread == NULL)
2538             return NULL;
2539         binder_stats_created(BINDER_STAT_THREAD);
2540         thread->proc = proc;
2541         thread->pid = current->pid;
2542         init_waitqueue_head(&thread->wait);
2543         INIT_LIST_HEAD(&thread->todo);
2544         rb_link_node(&thread->rb_node, parent, p);
2545         rb_insert_color(&thread->rb_node, &proc->threads);
2546         thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2547         thread->return_error = BR_OK;
2548         thread->return_error2 = BR_OK;
2549     }
2550     return thread;
2551 }

 case BINDER_WRITE_READ: {


2661         if (bwr.write_size > 0) { //由上面binder_loop可知,write_size==sizeof(unsigned)
2662             ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
               }

那么看看binder_thread_write,先把上面的参数代入

bwr.write_buffer= struct binder_write_read bwr

    bwr.write_size = 0;
     bwr.write_consumed = 0;
     bwr.write_buffer = 0;

binder_thread_write(proc, thread, (void __user *)bwr.write_buffer,sizeof(unsigned),&0)

if (get_user(cmd, (uint32_t __user *)ptr)) // *ptr = buffer + *consumed;

获取write_buffer开始的32bit的数据,这里是 readbuf[0] = BC_ENTER_LOOPER;

接下来

1797         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1798             binder_stats.bc[_IOC_NR(cmd)]++;
1799             proc->stats.bc[_IOC_NR(cmd)]++;
1800             thread->stats.bc[_IOC_NR(cmd)]++;
1801         }

完全不知道这几行代码有什么用,

然后:

case BC_ENTER_LOOPER:

   thread->looper |= BINDER_LOOPER_STATE_EXITED;

然后

*consumed = ptr - buffer;

因为len=sizeof(unsigned),while loop退出,binder_thread_write,完成。


回到

 case BINDER_WRITE_READ: {

。。。。

接下来

2670         if (bwr.read_size > 0) {  //这里read_size ==0,什么都不做。
2671             ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
2672       }

ok,这样又回到了了servicemanager的binder_loop,那这次

for (;;) {
371         bwr.read_size = sizeof(readbuf);
372         bwr.read_consumed = 0;
373         bwr.read_buffer = (unsigned) readbuf;
374 
375         res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

又回到了驱动中,这次write_size=0,read_size=sizeof(readbuf)==32

那好吧,又去看看驱动,这次不进入binder_thread_write,而是进入binder_thread_read,

2194 static int binder_thread_read(struct binder_proc *proc,
2195                   struct binder_thread *thread,
2196                   void  __user *buffer, int size,
2197                   signed long *consumed, int non_block)
2198 {
2199     void __user *ptr = buffer + *consumed;
2200     void __user *end = buffer + size;
2201         
。。。。。

2206         if (put_user(BR_NOOP, (uint32_t __user *)ptr)) //先把命令BR_NOOP写入到用户空间,也就是给servicemanager的binder_loop用

2211 retry:
2212     wait_for_proc_work = thread->transaction_stack == NULL &&
2213                 list_empty(&thread->todo);  //这里wait_for_proc_work=true,

if (thread->return_error != BR_OK && ptr < end) {//这里一开始时==BR_OK的,不进,

.......

}

2232     thread->looper |= BINDER_LOOPER_STATE_WAITING;
2233     if (wait_for_proc_work)
2234         proc->ready_threads++;
2235     mutex_unlock(&binder_lock);
2236     if (wait_for_proc_work) {//true,进
2237         if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2238                     BINDER_LOOPER_STATE_ENTERED))) {
2239             binder_user_error("binder: %d:%d ERROR: Thread waiting "
2240                 "for process work before calling BC_REGISTER_"
2241                 "LOOPER or BC_ENTER_LOOPER (state %x)\n",
2242                 proc->pid, thread->pid, thread->looper);
2243             wait_event_interruptible(binder_user_error_wait,
2244                          binder_stop_on_user_error < 2);
2245         }
2246         binder_set_nice(proc->default_priority);
2247         if (non_block) {
2248             if (!binder_has_proc_work(proc, thread))
2249                 ret = -EAGAIN;
2250         } else
2251             ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2252     } else {

2253         if (non_block) {//前面open(/dev/binder,)时没有写nonblock,也就时block,
2254             if (!binder_has_thread_work(thread))
2255                 ret = -EAGAIN;
2256         } else
2257             ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));//所以servicemanger会在这里等待命令
2258     }
2259     mutex_lock(&binder_lock);
2260     if (wait_for_proc_work)
2261         proc->ready_threads--;
2262     thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2263
2264     if (ret)
2265         return ret;
2266

总结:servicemanager打开了dev/binder,然后通过ioctl 然其在驱动的thread进入ENTER_LOOPER状态,然后进入for(;;)循环等待命令。。。。






 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值