binder驱动层分析服务的添加过程

binder驱动的源码文件

/drivers/android/binder.c
/drivers/staging/android/binder.c

驱动层有两个核心复杂方法 binder_thread_write 和 binder_thread_read , 由于内容过多复杂不易全篇通读,本文我们就带着线索去看看,先把线索挑出来:

// ServiceManager 进程:获取判断 binder 驱动版本号是否一致
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) || (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
  goto fail_open;
}

// ServiceManager 进程:让 ServiceManager 进程成为管理者
int binder_become_context_manager(struct binder_state *bs)
{
  return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

// ServiceManager 进程:binder 线程进入循环等待
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));

// ServiceManager 进程:进入循环不断的读写 binder 内容
for (;;) {
  bwr.read_size = sizeof(readbuf);
  bwr.read_consumed = 0;
  bwr.read_buffer = (uintptr_t) readbuf;
  res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
  ...
}

// media 进程:添加 MediaPlayerService 服务
do {
  // 通过 ioctl 不停的读写操作,跟 Binder Driver 进行通信,转发给 ServiceManager 进程
  if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
    err = NO_ERROR;
    ...
} while (err == -EINTR); //当被中断,则继续执行

1.获取 Binder 驱动版本

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
    // 从 filp 获取 binder_proc 
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;
    // 进入休眠直到中断被唤醒
	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
		goto err_unlocked;
    // 从 binder_proc 获取 binder 线程
	thread = binder_get_thread(proc);
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
	case BINDER_VERSION: {
        // ubuf -> args -> vers
		struct binder_version __user *ver = ubuf;
		if (size != sizeof(struct binder_version)) {
			ret = -EINVAL;
			goto err;
		}
        // BINDER_CURRENT_PROTOCOL_VERSION 赋值给 vers->protocol_version
		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
			     &ver->protocol_version)) {
			ret = -EINVAL;
			goto err;
		}
		break;
	}

	default:
		ret = -EINVAL;
		goto err;
	}
	ret = 0;
	return ret;
}

2.ServiceManager成为 Binder 驱动管理者

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
    // 从 filp 获取 binder_proc 
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;
    // 进入休眠直到中断被唤醒
	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
		goto err_unlocked;
    // 从 binder_proc 获取 binder 线程
	thread = binder_get_thread(proc);
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
	case BINDER_SET_CONTEXT_MGR:
		ret = binder_ioctl_set_ctx_mgr(filp);
		if (ret)
			goto err;
		break;
	}

	default:
		ret = -EINVAL;
		goto err;
	}
	ret = 0;
	return ret;
}

static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
	kuid_t curr_euid = current_euid();
    // 管理者只有一个(只能被设置一次)
	if (binder_context_mgr_node != NULL) {
		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
		ret = -EBUSY;
		goto out;
	}
	if (uid_valid(binder_context_mgr_uid)) {
		...
	} else {
		binder_context_mgr_uid = curr_euid;
	}
    // 静态变量 binder_context_mgr_node = binder_new_node
	binder_context_mgr_node = binder_new_node(proc, 0, 0);
	if (binder_context_mgr_node == NULL) {
		ret = -ENOMEM;
		goto out;
	}
	binder_context_mgr_node->local_weak_refs++;
	binder_context_mgr_node->local_strong_refs++;
	binder_context_mgr_node->has_strong_ref = 1;
	binder_context_mgr_node->has_weak_ref = 1;
out:
	return ret;
}

static struct binder_node *binder_new_node(struct binder_proc *proc,
					   binder_uintptr_t ptr,
					   binder_uintptr_t cookie)
{
    // 从 proc->nodes 中获取根节点 
	struct rb_node **p = &proc->nodes.rb_node;
	struct rb_node *parent = NULL;
	struct binder_node *node;

	while (*p) {
		parent = *p;
        // 根据 parent 的偏移量获取 node 
		node = rb_entry(parent, struct binder_node, rb_node);

		if (ptr < node->ptr)
			p = &(*p)->rb_left;
		else if (ptr > node->ptr)
			p = &(*p)->rb_right;
		else
			return NULL;
	}
    // 刚开始肯定是 null ,创建一个 binder_node 
	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (node == NULL)
		return NULL;
	binder_stats_created(BINDER_STAT_NODE);
    // 添加到 binder_proc 的 nodes 中
	rb_link_node(&node->rb_node, parent, p);
    // 调整红黑树的颜色
	rb_insert_color(&node->rb_node, &proc->nodes);
	node->debug_id = ++binder_last_id;
	node->proc = proc;
	node->ptr = ptr;
	node->cookie = cookie;
	node->work.type = BINDER_WORK_NODE;
	INIT_LIST_HEAD(&node->work.entry);
	INIT_LIST_HEAD(&node->async_todo);
	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
		     "%d:%d node %d u%016llx c%016llx created\n",
		     proc->pid, current->pid, node->debug_id,
		     (u64)node->ptr, (u64)node->cookie);
	return node;
}

3.ServiceManager进入循环等待

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    // 从 filp 获取 binder_proc 
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    // 进入休眠直到中断被唤醒
    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;
    // 从 binder_proc 获取 binder 线程
    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    case BINDER_WRITE_READ:
		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
		if (ret)
			goto err;
		break;
    }

    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
    return ret;
}

static int binder_ioctl_write_read(struct file *filp,
				unsigned int cmd, unsigned long arg,
				struct binder_thread *thread)
{
	int ret = 0;
    // 从 filp 中获取 binder_proc 
	struct binder_proc *proc = filp->private_data;
    // arg 是上层传下来的 binder_write_read 的结构体对象地址
	void __user *ubuf = (void __user *)arg;
	struct binder_write_read bwr;

    // 将用户空间的 binder_write_read 拷贝到内核空间的 bwr
	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
    // 这里是 0 ,不进去
	if (bwr.write_size > 0) {
		...
	}
    // 进入这里不断的读取数据
	if (bwr.read_size > 0) {
		ret = binder_thread_read(proc, thread, bwr.read_buffer,
					 bwr.read_size,
					 &bwr.read_consumed,
					 filp->f_flags & O_NONBLOCK);
		trace_binder_read_done(ret);
		if (!list_empty(&proc->todo))
			wake_up_interruptible(&proc->wait);
		if (ret < 0) {
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
	}
	// 将内核空间的 bwr 数据拷贝到用户空间的 binder_write_read 
	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
out:
	return ret;
}

static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
{
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    // 数据的起始地址
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;
    // consumed == 0
	if (*consumed == 0) {
		ptr += sizeof(uint32_t);
	}

retry:
    // 如果线程事务栈和 todo 队列都为空,说明此时没有要当前线程处理的任务,将增加空闲线程的计数器(即将 wait_for_proc_work 设为1),让线程等待在**进程**的 wait 队列上
	wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);

	if (wait_for_proc_work)
		proc->ready_threads++;

	binder_unlock(__func__);

	if (wait_for_proc_work) {
		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) {
            // 线程还未进入 binder 循环,输出错误信息,并阻塞直到 binder_stop_on_user_error 小于2
		    wait_event_interruptible(binder_user_error_wait,
						 binder_stop_on_user_error < 2);
		}
		binder_set_nice(proc->default_priority);
        // 非阻塞
		if (non_block) {
			...
		} else{
            // 如果是阻塞的读操作,则让进程阻塞在 proc 的 wait 队列上,即servicemanager一直在等待。直到todo队列里有任务,即binder_has_proc_work(thread) 为 true,即进程有工作待处理
			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
        }
	} else {
		...
	}
    // 下面的代码目前还执行不到,需要等待 thread 线程的 todo 里面有内容
	return 0;
}

4.Binder 驱动添加系统服务

media添加服务的方法:

// media 进程:添加 MediaPlayerService 服务
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = 256;
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
   ...
    do {
        // 调用驱动层的 binder_ioctl 方法
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            err = -errno;
            
    } while (err == -EINTR);
    ...
    return err;
}

发现media既有读的数据又有写的数据.

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    // 从 filp 中获取 binder_proc 
    struct binder_proc *proc = filp->private_data;
    // arg 是上层传下来的 binder_write_read 的结构体对象地址
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    // 将用户空间的 binder_write_read 拷贝到内核空间的 bwr
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
    // write_size > 0 ,进入这里
    if (bwr.write_size > 0) {
		ret = binder_thread_write(proc, thread,
					  bwr.write_buffer,
					  bwr.write_size,
					  &bwr.write_consumed);
		trace_binder_write_done(ret);
		if (ret < 0) {
			bwr.read_consumed = 0;
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
    }
   
    if (bwr.read_size > 0) {
        ...
    }
    // 将内核空间的 bwr 数据拷贝到用户空间的 binder_write_read 
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

static int binder_thread_write(struct binder_proc *proc,
			struct binder_thread *thread,
			binder_uintptr_t binder_buffer, size_t size,
			binder_size_t *consumed)
{
	uint32_t cmd;
	// write_buffer , 有两个数据cmd和binder_transaction_data 
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
        // 获取到 cmd 命令 BC_TRANSACTION
		if (get_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		switch (cmd) {
        ...
		case BC_TRANSACTION:
		case BC_REPLY: {
			struct binder_transaction_data tr;
            // 把数据从用户空间拷贝到内核空间 binder_transaction_data 
			if (copy_from_user(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
			break;
		}
		...
		*consumed = ptr - buffer;
	}
	return 0;
}

static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply)
{
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	binder_size_t *offp, *off_end;
    // 目标 target_proc
	struct binder_proc *target_proc;
    // 目标 target_thread 
	struct binder_thread *target_thread = NULL;
    // 目标 target_node 
	struct binder_node *target_node = NULL;
	struct list_head *target_list;
	wait_queue_head_t *target_wait;
	struct binder_transaction *in_reply_to = NULL;
	struct binder_transaction_log_entry *e;
	uint32_t return_error;

	e->from_proc = proc->pid;
	e->from_thread = thread->pid;
    // handle 值,这里为0
	e->target_handle = tr->target.handle;
	e->data_size = tr->data_size;
	e->offsets_size = tr->offsets_size;

	if (reply) {
		...
	} else {
		if (tr->target.handle) {
			...
		} else {
            // 添加服务时传的 handle 值是 0 ,target_node是成为管理者的静态变量
			target_node = binder_context_mgr_node;
			if (target_node == NULL) {
				return_error = BR_DEAD_REPLY;
				goto err_no_context_mgr_node;
			}
		}
		target_proc = target_node->proc;
	}
	if (target_thread) {
		...
	} else {
		target_list = &target_proc->todo;
		target_wait = &target_proc->wait;
	}

	// binder_transaction *t;
	t = kzalloc(sizeof(*t), GFP_KERNEL);
	// binder_work *tcomplete;
	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);

	if (!reply && !(tr->flags & TF_ONE_WAY))
		t->from = thread;
	else
		t->from = NULL;
	t->sender_euid = task_euid(proc->tsk);
	t->to_proc = target_proc;
	t->to_thread = target_thread;
    // tr->code = ADD_SERVICE_TRANSACTION
	t->code = tr->code;
	t->flags = tr->flags;
    // 按需加载在目标进程开辟管理映射内存,我们在 mmap 的时候只映射了一个物理页,现在你传的数据有多大就开辟多大
	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
	t->buffer->allow_user_free = 0;
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;

	if (target_node)
		binder_inc_node(target_node, 1, 0, NULL);
    // 把数据拷贝到目标进程空间
	offp = (binder_size_t *)(t->buffer->data +
				 ALIGN(tr->data_size, sizeof(void *)));
	// 把数据拷贝到目标进程,t->buffer->data(新开辟的目标进程的地址),tr->data.ptr.buffer (是客户端的数据)
	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
			   tr->data.ptr.buffer, tr->data_size)) {
		binder_user_error("%d:%d got transaction with invalid data ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user(offp, (const void __user *)(uintptr_t)
			   tr->data.ptr.offsets, tr->offsets_size)) {
		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}

	off_end = (void *)offp + tr->offsets_size;
	 // 循环处理客户端传递的 flat_binder_object
	for (; offp < off_end; offp++) {
		struct flat_binder_object *fp;
        // 从目标进程中获取 flat_binder_object 
		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
        // 判断 type ,这里是 BINDER_TYPE_BINDER
		switch (fp->type) {
		case BINDER_TYPE_BINDER:
		case BINDER_TYPE_WEAK_BINDER: {
			struct binder_ref *ref;
            // fp->binder是客户端的binder的引用,从自己的进程中获取 binder_node 节点,如果没有,就创建一个binder_node
			struct binder_node *node = binder_get_node(proc, fp->binder);

			if (node == NULL) {
                // 根据 fp->binder 创建一个新的 binder_node 
				node = binder_new_node(proc, fp->binder, fp->cookie);
				if (node == NULL) {
					return_error = BR_FAILED_REPLY;
					goto err_binder_new_node_failed;
				}
			}
            // 从目标进程里面获取binder_node ,根据  binder_node 节点从目标进程中获取 binder_ref,而且desc在这里每次都加1
			ref = binder_get_ref_for_node(target_proc, node);
            // 把 fp->type 替换成 BINDER_TYPE_HANDLE
			if (fp->type == BINDER_TYPE_BINDER)
				fp->type = BINDER_TYPE_HANDLE;
			else
				...
            // 给 fp->handle 赋值,即handle值
			fp->handle = ref->desc;
			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
				       &thread->todo);
			trace_binder_transaction_node_to_ref(t, node, ref);
		} break;
		...
	}
    if (reply) {
		...
	} else if (!(t->flags & TF_ONE_WAY)) {
	    // 不是 one_way 
		BUG_ON(t->buffer->async_transaction != 0);
		// 需要回复
		t->need_reply = 1;
		// 记录了 transaction_stack  
		t->from_parent = thread->transact
		...
		}
    // 往目标进程中添加一个 BINDER_WORK_TRANSACTION,往目标进程todo队列塞了一个&t->work.entry
	t->work.type = BINDER_WORK_TRANSACTION;
	list_add_tail(&t->work.entry, target_list);
    // 自己进程中添加一个 BINDER_WORK_TRANSACTION_COMPLETE
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	list_add_tail(&tcomplete->entry, &thread->todo);
	//唤醒 target_wait即servicemanager
	if (target_wait)
		wake_up_interruptible(target_wait);
	return;
}

以上的binder_transation示意图如下:
在这里插入图片描述
传输数据时会先看看自己的进程中有没有 binder_node 节点,如果没有,就创建一个binder_node;然后在目标进程中
获取binder_node,如果servicemanager中没有,会进行创建binder_node并挂到refs_by_desc和refs_by_node的红黑树中,并替换type为BINDER_TYPE_HANDLE,同时计算handle值。

media服务添加过程的数据如下:
在这里插入图片描述
5.ServiceManager 进程处理添加请求

static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
{
     ....
	// todo 里面有数据了等等被唤醒
	binder_lock(__func__);

	if (wait_for_proc_work)
		proc->ready_threads--;
	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

	if (ret)
		return ret;
    // 开始不断循环读取数据 
	while (1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w;
		struct binder_transaction *t = NULL;
    
		if (!list_empty(&thread->todo)) {
			...
		} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
            // 获取 todo 队列中的第一条
			w = list_first_entry(&proc->todo, struct binder_work,
					     entry);
		} else {
			...
		}

		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
            // 进入这个分支
			t = container_of(w, struct binder_transaction, work);
		} break;
		...
		}

		if (!t)
			continue;

		if (t->buffer->target_node) {
            // 解析参数
			struct binder_node *target_node = t->buffer->target_node;
			tr.target.ptr = target_node->ptr;
			tr.cookie =  target_node->cookie;
			cmd = BR_TRANSACTION;
		} else {
			...
		}
		tr.code = t->code;
		tr.flags = t->flags;
		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
        // 解析 data 数据
		tr.data_size = t->buffer->data_size;
		tr.offsets_size = t->buffer->offsets_size;
		tr.data.ptr.buffer = (binder_uintptr_t)(
					(uintptr_t)t->buffer->data +
					proc->user_buffer_offset);
		tr.data.ptr.offsets = tr.data.ptr.buffer +
					ALIGN(t->buffer->data_size,
					    sizeof(void *));
        // 写入命令,首先把 命令 BR_TRANSACTION 写到 ptr ,即readbuffer 
		if (put_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
        //  把数据拷贝到用户空间,把数据原样不动拷贝到 readbuffer 
		if (copy_to_user(ptr, &tr, sizeof(tr)))
			return -EFAULT;
		ptr += sizeof(tr);
        // 从 todo 队列中移除
		list_del(&t->work.entry);
		break;
	}

done:
    ...
	return 0;
}

servicemanager的唤醒流程走完,把数据复制完,return回了servicemanager的service端

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;
    // 将 BC_ENTER_LOOPER 写入驱动,告诉驱动当前进程进入循环
    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
        // 不断的循环等待读取 binder 驱动的数据,这里就会等待,返回到这里,拿到bwr驱动写的值
        res  = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//在这里等待

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
        // 解析远程进程的 binder 驱动信息
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

总结:
servicemanager首先申请称为binder驱动的管理者,然后通过binder_ioctl_write_read中的binder_thread_read方法进入等待,判断todo队列中有没有任务待执行。
客户端通过handle=0找到servicemanager,并将数据拷贝给它(servicemanager)的内核和用户空间映射的地址,同时将客户端的service的binder弱引用赋值给servicemanager的两个红黑树(一个以ptr,另一个以handle值做key,每次handle值+1),type转换成BINDER_TYPE_HANDLE,并唤醒todo队列。
servicemanager驱动层被唤醒后,保存数据,将其复制到servicemanager的用户空间,即其service空间,然后进行addservice,findservce等操作。
在这里插入图片描述

典型问题解答:

  1. 驱动层单向通信流程回顾
    1.1. binder 驱动是怎么帮我们找到 ServiceManager 进程的?
    handle = 0 , 驱动层会创建一个静态的变量 binder_context_mgr_node

1.2. ServiceManager 进程是怎么进入等待怎么被唤醒的?
等待是在 wait 队列上等,条件是 todo 队列是否为空,客户端找到 tagert_prco 了之后把数据拷贝到目标进程,
同时把数据拷贝到目标进程往目标进程写入处理的命令,然后往自己进程写入一个接受请求的命令(让自己进入等待),最后唤醒服务进程的wait队列,这个时候服务进程被唤醒就会继续往下处理todo队列请求。

1.3. 数据是怎么传递的,handle 和 type 是怎么被计算和管理的?
binder 驱动会判断 type 是什么,然后呢会往自己的进程挂上一个 binder_node , 会往目标进程挂上两个 binder_node 一个挂在以 handle 值为 key 的红黑树上,一个挂在以 cookie 为 key 的红黑树上,handle 值是根据红黑树的数据了累加的。

1.4. 内存的拷贝次数有几次?
肯定不是 1 次,服务端把数据拷到内核再拷回去 2 次,客户端把数据拷到内核再拷回去 2 次,还有一次是客户端把数据拷到服务端映射的内存中,还有一次是从服务端的映射内存拷贝到驱动层创建的内存。6 次拷贝
比如客户端A进程拷贝数据到驱动端,并不是拷贝到A进程用户和内核空间映射的空间,它只是拷贝到驱动内存,然后从驱动的内存拷贝到servicemanager用户和内核空间映射的内存

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值