Binder通信守护者ServiceManager

前面几章我们讨论了Binder驱动的核心实现,包括数据结构和跨进程操作方法。今天我们就在Android系统中看看整个实现的流程是什么样子,我们就从service_manager.c开始!

可能有人要问了,为什么是service_manager.c而不是其他的,这里我需要说明一下,service_manager.c是整个系统管理Binder的代码,也是一个守护进程,并且自己就是一个binder。所以我们就从它下手!


framework/native/cnds/servicemanager.cpp

int main(int argc, char **argv)
{
    struct binder_state *bs;//[1]
    bs = binder_open(128*1024);//[2]
    if (binder_become_context_manager(bs)) {//[3]
    ...
    }
    ...
    binder_loop(bs, svcmgr_handler);//[4]
    return 0;
}

1 binder_state结构体

struct binder_state
{
    int fd;//表示打开的dev/binder文件描述符
    void *mapped;//把设备文件映射到进程空间的起始地址
    size_t mapsize;//内存映射空间的大小
};

2 binder_open()

  • 打开设备驱动
  • 分配虚拟空间对应物理空间
struct binder_state *binder_open(size_t mapsize)
{
    struct binder_state *bs;
    bs = malloc(sizeof(*bs));
    bs->fd = open("/dev/binder", O_RDWR);//[2.1]打开设备驱动
    bs->mapsize = mapsize;
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);//[2.2]分配虚拟空间对应物理空间
    ...
    return bs;
    ...
}

2.1 open()->binder_open()

打开设备节点,创建binder_proc进程结构体

static int 
    binder_open(struct inode *nodp, struct file *filp)
{
    struct binder_proc *proc;//[2.1]binder进程
    proc = kzalloc(sizeof(*proc), GFP_KERNEL);//给进程分配空间
    get_task_struct(current);
    proc->tsk = current;
    INIT_LIST_HEAD(&proc->todo);//初始化todo链表
    init_waitqueue_head(&proc->wait);//初始化wait队列
    proc->default_priority = task_nice(current);//优先级相关

    binder_lock(__func__);
    binder_stats_created(BINDER_STAT_PROC);
    hlist_add_head(&proc->proc_node, &binder_procs);
    proc->pid = current->group_leader->pid;
    INIT_LIST_HEAD(&proc->delivered_death);
    filp->private_data = proc; //file文件指针的private_data变量指向binder_proc数据
    binder_unlock(__func__);
    return 0;
}

2.2 mmap()->binder_mmap()

作用是在内存中分配内存,并且交给proc中
- proc->buffer = area->addr
- proc->user_buffer_offset
进行管理


static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
    int ret;
    struct vm_struct *area;//内核虚拟空间
    struct binder_proc *proc = filp->private_data;//在binder_open时候赋值的进程
    const char *failure_string;
    struct binder_buffer *buffer;

    if ((vma->vm_end - vma->vm_start) > SZ_4M)
        vma->vm_end = vma->vm_start + SZ_4M;//虚拟内存不能超过4M
    ...
    mutex_lock(&binder_mmap_lock);
    area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);//分配内存
    proc->buffer = area->addr;//将内核的空间首地址分配proc->buffer
    proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
    mutex_unlock(&binder_mmap_lock);

    ...
    proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
    ...
    return ret;
}

这里整个函数是在内核分配空间,分配好之后将物理内存与虚拟内存中内核空间接收到的Buffer与用户控件接收到的Buffer映射起来。


3 binder_become_context_manager()

给内核发送的消息是BINDER_SET_CONTEXT_MGR,然后内核创建一个binder_node *binder_context_mgr_node,这个binder_node是一个binder实体对象。

int binder_become_context_manager(struct binder_state *bs){
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);//[3.1]
}

3.1 ioctl()->binder_ioctl()

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;//binder线程
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;

    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);//进行睡眠直到唤醒操作发生
    binder_lock(__func__);//进行锁操作
    thread = binder_get_thread(proc);//[3.1.1]从进程中得到线程
    switch (cmd) {
        case BINDER_SET_CONTEXT_MGR://
        ret = security_binder_set_context_mgr(proc->tsk);
        if (binder_context_mgr_uid != -1) {
        } else
            binder_context_mgr_uid = current->cred->euid;
        binder_context_mgr_node = binder_new_node(proc, NULL, NULL);//[3.1.2]创建ServiceManager实体
        ...
        break;
    }
    ...
    return ret;
}

3.1.1 binder_get_thread()


通过进程指针查找线程表,如果有当前线程则进行获取,如果没有当前线程将当前线程的资料整理之后放入进程中,并且返回。

static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
    struct binder_thread *thread = NULL;
    struct rb_node *parent = NULL;
    struct rb_node **p = &proc->threads.rb_node;//拿到存取线程的红黑树

    while (*p) {//在红黑树中查找当前线程
        parent = *p;
        thread = rb_entry(parent, struct binder_thread, rb_node);

        if (current->pid < thread->pid)
            p = &(*p)->rb_left;
        else if (current->pid > thread->pid)
            p = &(*p)->rb_right;
        else
            break;
    }
    if (*p == NULL) {//当线程为null时候进行创建线程
        thread = kzalloc(sizeof(*thread), GFP_KERNEL);
        binder_stats_created(BINDER_STAT_THREAD);
        thread->proc = proc;
        thread->pid = current->pid;//保存当前线程的pid
        init_waitqueue_head(&thread->wait);
        INIT_LIST_HEAD(&thread->todo);
        // 将新创建的node对象添加到proc红黑树;
        rb_link_node(&thread->rb_node, parent, p);
        rb_insert_color(&thread->rb_node, &proc->threads);
        thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
        thread->return_error = BR_OK;
        thread->return_error2 = BR_OK;
    }
    return thread;
}

3.1.2 binder_new_node()


static struct binder_node *binder_new_node(struct binder_proc *proc,
                       void __user *ptr,
                       void __user *cookie)
{
    struct rb_node **p = &proc->nodes.rb_node;//拿到存取线程的红黑树
    struct rb_node *parent = NULL;
    struct binder_node *node;

    while (*p) {//在红黑树中查找当前线程
        parent = *p;
        node = rb_entry(parent, struct binder_node, rb_node);
        if (ptr < node->ptr)
            p = &(*p)->rb_left;
        else if (ptr > node->ptr)
            p = &(*p)->rb_right;
        else
            return NULL;
    }

    node = kzalloc(sizeof(*node), GFP_KERNEL);
    if (node == NULL)
        return NULL;
    binder_stats_created(BINDER_STAT_NODE);
    rb_link_node(&node->rb_node, parent, p);
    rb_insert_color(&node->rb_node, &proc->nodes);
    node->debug_id = ++binder_last_id;
    node->proc = proc;//保存当前线程的id
    node->ptr = ptr;
    node->cookie = cookie;
    node->work.type = BINDER_WORK_NODE;//设置binder_work的type
    //加入到binder_node的async_todo和binder_work两个队列。
    INIT_LIST_HEAD(&node->work.entry);
    INIT_LIST_HEAD(&node->async_todo);
    return node;
}

4 binder_loop()


void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;//此标记最后到binder_thread_write中对应//改变当前binder_thread里面的looper的标记(整型)
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//[4.2]
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);//[4.3]
        ...
    }
}

4.2 ioctl()

这里要注意的是,拿到数据是看todo链表中有没有数据,写数据也是写入todo链表中。其中数据是binder_transaction结构体,当binder_transaction()写入数据给线程或者进程的todo链表之后,然后进行唤醒操作,之后就可以通过binder_thread_read()方法进行读取数据到用户态的bwr中。

static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  void  __user *buffer, int size,
                  signed long *consumed, int non_block)
{
    void __user *ptr = buffer + *consumed;//bwr.read_buffer
    void __user *end = buffer + size;

    //当一开始读的时候,马上写入一个BR_NOOP,对于所有读操作数据头部都是BR_NOOP
    //给bwr.read_buffer内写的数据格式是:BR_NOOP+cmd+数据+cmd+数据...
    //如果保存返回结果的缓冲区中还没有数据先写入BR_NOOP消息
    if (*consumed == 0) {
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
    }

    ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));//如果没有数据的话,就停留在休眠状态

    while (1) {
        if (!list_empty(&thread->todo))//如果线程里面的todo链表有数据拿出来
            w = list_first_entry(&thread->todo, struct binder_work, entry);
        else if (!list_empty(&proc->todo) && wait_for_proc_work)
            //否则线程所属的进程链表有数据的话拿出来
            w = list_first_entry(&proc->todo, struct binder_work, entry);

        switch (w->type) {
        case BINDER_WORK_TRANSACTION: {//这个type是binder_thread_write最后添加链表的时候写的
            t = container_of(w, struct binder_transaction, work);//根据work得到binder_transaction
        } break;

        //调整优先级
        if (t->buffer->target_node) {
            struct binder_node *target_node = t->buffer->target_node;
            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            t->saved_priority = task_nice(current);
            if (t->priority < target_node->min_priority &&
                !(t->flags & TF_ONE_WAY))
                binder_set_nice(t->priority);
            else if (!(t->flags & TF_ONE_WAY) ||
                 t->saved_priority > target_node->min_priority)
                binder_set_nice(target_node->min_priority);
            cmd = BR_TRANSACTION;//由于是从驱动返回用户空间把命令改成BR_TRANSACTION
        }
        //这里进行构造binder_transaction_data,然后返回到service_manager.c中,这里ioctl就执行完毕
        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (void *)t->buffer->data +
                    proc->user_buffer_offset;
        tr.data.ptr.offsets = tr.data.ptr.buffer +
                    ALIGN(t->buffer->data_size,
                        sizeof(void *));
        //把tr数据复制到用户空间
        if (put_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (copy_to_user(ptr, &tr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);
        break;
    }
    return 0;
}

4.3 binder_parse()

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
        switch(cmd) {
        case BR_NOOP:
            break;
        case BR_TRANSACTION_COMPLETE:
            break;
        case BR_INCREFS:
        case BR_ACQUIRE:
        case BR_RELEASE:
        case BR_DECREFS:
            ptr += sizeof(struct binder_ptr_cookie);
            break;
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);//[4.3.1]调用传递进来的处理方法
                binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
            }
            ptr += sizeof(*txn);
            break;
        }
        case BR_REPLY: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: reply too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (bio) {
                bio_init_from_txn(bio, txn);
                bio = 0;
            } else {
                /* todo FREE BUFFER */
            }
            ptr += sizeof(*txn);
            r = 0;
            break;
        }
        case BR_DEAD_BINDER: {
            struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
            ptr += sizeof(binder_uintptr_t);
            death->func(bs, death->ptr);
            break;
        }
        case BR_FAILED_REPLY:
            r = -1;
            break;
        case BR_DEAD_REPLY:
            r = -1;
            break;
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}

4.3.1 svcmgr_handler()


int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
   ...
    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        //[4.3.1.1]找service
        handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
        bio_put_ref(reply, handle);
        return 0;

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;

    case SVC_MGR_LIST_SERVICES: {
        uint32_t n = bio_get_uint32(msg);

        if (!svc_can_list(txn->sender_pid)) {
            ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
                    txn->sender_euid);
            return -1;
        }
        si = svclist;
        while ((n-- > 0) && si)
            si = si->next;
        if (si) {
            bio_put_string16(reply, si->name);
            return 0;
        }
        return -1;
    }
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

4.3.1.1 do_find_service()

同时我们知道找到服务返回的是handle值,这个值就在内核中引用的Binder对象。



uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
    struct svcinfo *si = find_svc(s, len);[4.3.1.1.1]
    if (!si || !si->handle) {
        return 0;
    }
    if (!si->allow_isolated) {
        uid_t appid = uid % AID_USER;
        if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
            return 0;
        }
    }

    if (!svc_can_find(s, len, spid)) {
        return 0;
    }
    return si->handle;
}

4.3.1.1.1 find_svc()

从svclist服务列表中,根据服务名遍历查找是否已经注册。当服务已存在svclist,则返回相应的服务名,否则返回NULL。

struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
    struct svcinfo *si;

    for (si = svclist; si; si = si->next) {
        if ((len == si->len) &&
            !memcmp(s16, si->name, len * sizeof(uint16_t))) {
            return si;
        }
    }
    return NULL;
}

这里我们知道有svclist,但是我们并不知道从哪里来的这个数据结构,看下面

int do_add_service(struct binder_state *bs,
                   const uint16_t *s, size_t len,
                   uint32_t handle, uid_t uid, int allow_isolated,
                   pid_t spid)
{
    struct svcinfo *si;
    if (!handle || (len == 0) || (len > 127))
        return -1;

    if (!svc_can_register(s, len, spid)) {
        ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
             str8(s, len), handle, uid);
        return -1;
    }
    si = find_svc(s, len);
    if (si) {
        if (si->handle) {
            svcinfo_death(bs, si);
        }
        si->handle = handle;
    } else {
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            return -1;
        }
        si->handle = handle;
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = (void*) svcinfo_death;
        si->death.ptr = si;
        si->allow_isolated = allow_isolated;
        si->next = svclist;
        svclist = si;//不断向链表中添加
    }

    binder_acquire(bs, handle);
    binder_link_to_death(bs, handle, &si->death);
    return 0;
}

总结:

首先我们知道了ServiceManager做的事情就是管理Binder。那么我们也知道到ServiceManager的流程如下:

  • 1.打开open()驱动文件节点,并且通过mmap()分配虚拟空间,对应到真实的物理空间中去。
  • 2.然后通过binder_become_context_manager()创建一个Binder实体在内核
  • 3.进入循环状态,等待Client端的请求:binder_loop()。
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值