Android-Framework:Binder(3)-Binder driver
一、Binder驱动概述
Binder机制中主要的参与者有如下四者:client,service,service_manager,binder驱动;其中service和client是binder的调用者,servicemanager是service的管理者,binder驱动是binder的提供者。
其中只有binder驱动是运行在内核空间中,其他3者都是运行在用户空间;
大致如下图:
Binder驱动是跨进程的核心,事实上binder驱动和硬件设备是没关系的,只是实现方式和设备驱动程序是一样的:
1.Binder驱动工作于内核空间,以字符驱动设备中的misc设备注册在设备目录/dev下,用户通过/dev/binder访问该它;
2.提供open(),mmap(),poll(),ioctl()等标准文件操作接口;
3.Binder驱动负责进程间的binder通信建立,数据传递交互等底层支持;
二、Binder驱动中重要的结构体和协议
1.Binder驱动中重要的结构体
flat_binder_object //描述在Binder IPC中传递的对象,见下文
binder_write_read //存储一次读写操作的数据
binder_version //存储Binder的版本号
transaction_flags //描述事务的flag,例如是否是异步请求,是否支持fd
binder_transaction_data //存储一次事务的数据
binder_ptr_cookie //包含了一个指针和一个cookie
binder_handle_cookie //包含了一个句柄和一个cookie
binder_pri_desc //暂未用到
binder_pri_ptr_cookie //暂未用到
binder_node //描述Binder实体节点,即:对应了一个Server
binder_ref //描述对于Binder实体的引用
binder_buffer //描述Binder通信过程中存储数据的Buffer
binder_proc //描述使用Binder的进程
binder_thread //描述使用Binder的线程
binder_work //描述通信过程中的一项任务
binder_transaction //描述一次事务的相关信息
binder_deferred_state //描述延迟任务
binder_ref_death //描述Binder实体死亡的信息
binder_transaction_log //debugfs日志
binder_transaction_log_entry //debugfs日志条目
重要的几个结构体的主要关系如下图:
从上图中我们得知Binder驱动的核心是创建及维护了一个binder_proc类型的哈希表,这个链表记录了包括service_manager在内的所有service信息;
2. Binder驱动协议
2.1 控制协议
/kernel/msm-3.18/drivers/staging/android/uapi/binder.h
//读写操作,最常用的命令。IPC过程就是通过这个命令进行数据传递
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
//设置进程支持的最大线程数量
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
//设置自身为ServiceManager
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
//通知驱动Binder线程退出
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
//获取Binder驱动的版本号
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
//获取Binder驱动的版本号
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
2.2 驱动协议
驱动协议定义在kernel\msm-3.18\drivers\staging\android\uapi\binder.h中
binder_driver_command_protocol,定义了进程发生给binder驱动的命令:
enum binder_driver_command_protocol {
BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),//Binder事务,即:Client对于Server的请求
BC_REPLY = _IOW('c', 1, struct binder_transaction_data),//事务的应答,即:Server对于Client的回复
BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),//暂不支持
BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),//通知驱动释放Buffer
BC_INCREFS = _IOW('c', 4, __u32),//弱引用计数+1
BC_ACQUIRE = _IOW('c', 5, __u32),//强引用计数+1
BC_RELEASE = _IOW('c', 6, __u32),//强引用计数-1
BC_DECREFS = _IOW('c', 7, __u32),//弱引用计数-1
BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),//BR_INCREFS的回复
BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),//BR_ACQUIRE的回复
BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),//暂不支持
BC_REGISTER_LOOPER = _IO('c', 11),//通知驱动子线程ready
BC_ENTER_LOOPER = _IO('c', 12),//通知驱动主线程ready
BC_EXIT_LOOPER = _IO('c', 13),//通知驱动线程已经退出
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,struct binder_handle_cookie),//请求接收死亡通知
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,struct binder_handle_cookie),//去除接收死亡通知
BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),//已经处理完死亡通知
};
enum binder_driver_return_protocol {
BR_ERROR = _IOR('r', 0, __s32),//发生错误
BR_OK = _IO('r', 1),//操作完成
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),//通知进程收到一次Binder请求(Server端)
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),//binder驱动给Client传递server端的回执
BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),//暂未实现
BR_DEAD_REPLY = _IO('r', 5),//告知发送方对方已经死亡
BR_TRANSACTION_COMPLETE = _IO('r', 6),//驱动对于接受请求的确认回复
BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),//弱引用计数+1请求
BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),//强引用计数+1请求
BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),//强引用计数+1请求
BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),//弱引用计数-1请求
BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),//暂未实现
BR_NOOP = _IO('r', 12),//操作完成
BR_SPAWN_LOOPER = _IO('r', 13),//通知Binder进程创建一个新的线程
BR_FINISHED = _IO('r', 14),//暂未实现
BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),//发送死亡通知
BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),//清理死亡通知完成
BR_FAILED_REPLY = _IO('r', 17),//告知发送方通信目标不存在
};
我们知道binder机制事实上是一种进程通信机制,binder是C/S架构,Client和Server端的数据传递与交互都需要通过binder驱动进行中转传递;
三、Binder驱动源码浅析
Binder作为一个特殊的字符设备,其设备节点/dev/binder。主要代码实现在:
kernel/drivers/staging/binder.h
kernel/drivers/staging/binder.c
1. Binder驱动初始化
首先我们看binder驱动的初始化,binder_init():
//kernel/msm-3.18/drivers/staging/android/binder.c
static int __init binder_init(void)
{
int ret;
binder_deferred_workqueue = create_singlethread_workqueue("binder");
if (!binder_deferred_workqueue)
return -ENOMEM;
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); //创建binder设备文件夹
...
ret = misc_register(&binder_miscdev);//注册成为misc设备
...
return ret;
}
misc device本质上是字符设备,调用misc_register()的时候自动完成了为相应类添加设备、动态获取次设备号;Binder是一个miscellaneous类型的驱动,本身不对应任何硬件,所有的操作都在软件层。
这里指定了Binder设备的名称是“binder”。这样,在用户空间便可以通过对/dev/binder进行文件操作来使用Binder。
//kernel/msm-3.18/drivers/staging/android/binder.c
static struct miscdevice binder_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "binder",
.fops = &binder_fops //标识当前设备的接口函数
};
我们可以看到该设备的接口函数指针如下,这里除了owner之外,每一个字段都是一个函数指针,这些函数指针对应了用户空间在使用Binder设备时的操作。例如:binder_poll对应了poll系统调用的处理,binder_mmap对应了mmap系统调用的处理,其他类同.其中重要方法有binder_ioctl,binder_mmap,binder_open :
//kernel/msm-3.18/drivers/staging/android/binder.c
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
Binder驱动初始化总结:
需要使用Binder的进程,几乎总是先通过binder_open打开Binder设备,然后通过binder_mmap进行内存映射,之后,通过binder_ioctl来进行实际的操作;
可以看到,binder驱动的初始化主要工作就是创建binder设备节点,注册misc设备,完成必要的设备接口函数定义映射,以供软件对驱动设备的操作。
2. Binder驱动中的重要操作
在上篇文章Android Framework:Binder(2)-Service Manager 中讲述ServiceManager的初始化及处理消息时中遇到的重要的binder驱动操作,代码片段如下:
//打开binder驱动
bs = binder_open(128*1024)
bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC);//bs->fd会得到binder驱动的设备号
//内存映射
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
//将ServiceManager对应的binder句柄handle设置为0
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
// 向binder驱动去查询是否有向自己发的请求
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
下面我们来详细的看下他们都具体做了些什么:
2.1 打开binder驱动 binder_open()
这里的open函数,将会调用到当前设备的open接口,对于Binder设备来说,就是前面看到的binder_fops成员变量中的binder_open()函数:
//kernel\msm-3.18\drivers\staging\android\binder.c
//*nodp指向binder驱动节点
static int binder_open(struct inode *nodp, struct file *filp)
{
//首先构造出一个binder_proc数据结构
struct binder_proc *proc;
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
//获的当前的进程
get_task_struct(current);
proc->tsk = current;
INIT_LIST_HEAD(&proc->todo);//初始化Binder_proc队列
init_waitqueue_head(&proc->wait);
//保存进程的优先级
proc->default_priority = task_nice(current);
//将记录了将当前进程信息binder_proc对象保存在/dev/binder的private_data中,方便以后访问
binder_lock(__func__);
binder_stats_created(BINDER_STAT_PROC);
hlist_add_head(&proc->proc_node, &binder_procs);//将当前进程的proc_node添加到binder_procs列表中
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
binder_unlock(__func__);
...
return 0;
}
进程初次打开设备文件/dev/binder时,binder驱动为该进程创建一个binder_proc结构体,并将该结构体保存在全局hash列表binder_procs中,proc_node是该hash表的节点;也就是说在Binder驱动中,通过binder_procs记录了所有使用Binder的进程。每个初次打开Binder设备的进程都会被添加到这个列表中的。
这里可以看下binder_proc的数据结构:
struct binder_proc {
struct hlist_node proc_node; //用来链接所有的binder_proc到binder_procs的节点
// 每个使用了Binder机制的进程都有一个Binder线程池,用来处理进程间通信请求。threads以
// 线程ID作为key来组织进程的Binder线程池。进程可以调用ioctl将线程注册到Binder驱动中
// 当没有足够的空闲线程处理进程间通信请求时,驱动可以要求进程注册更多的线程到Binder线程
// 池中
struct rb_root threads; //binder threads红黑树根节点,链接当前进程上所有的binder thread
struct rb_root nodes; //nodes红黑树根节点,存放当前进程上所有的binder实体
struct rb_root refs_by_desc; //引用binder的红黑树根节点,通过decs id号来索引
struct rb_root refs_by_node; //引用binder的红黑树根节点,通过node来索引
//以上4棵树即是binder_proc的四棵红黑树,rb就是指red black
int pid; //当前进程的group leader的进程号
struct vm_area_struct *vma; //用户空间内存映射地址
struct mm_struct *vma_vm_mm; //内核空间内存映射地址
struct task_struct *tsk; //保存当前进程的task struck
struct files_struct *files; //保存打开的文件
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer; //内核虚拟空间起始地址
ptrdiff_t user_buffer_offset; //用户映射地址和内核虚拟空间地址之间的偏移
struct list_head buffers;
struct rb_root free_buffers; //free buffer的红黑树根节点
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages; //实际物理内存页面
size_t buffer_size; //分配的内存大小
uint32_t buffer_free; //剩下的free buffer
struct list_head todo; //待完成的事务
wait_queue_head_t wait; //等待信号
struct binder_stats stats; //当前binder的状态记录
struct list_head delivered_death;
int max_threads; //最多可以主动请求进程注册的线程数
int requested_threads;
int requested_threads_started;
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
};
在binder_proc的结构体定义中,我们看到了4个4个rb_root域,“rb”的意思是“red black”,即binder_proc的4棵红黑树;其中,nodes树用于记录binder实体,refs_by_desc树和refs_by_node树则用于记录binder代理。之所以会有两个代理树,是为了便于快速查找。
binder_open()小结:
可以看到binder_open()主要做的创建了binder_proc结构体对象,并将当前进程的信息保存在这个binder_proc类型的对象中,然后将这个binder_proc类型的对象插入到binder_proc类型的全局hash表binder_procs中,以便后面的查询处理等操作;
2.2 binder驱动内存映射 binder_mmap()
binder_mmap()主要完成binder驱动将同块物理内存映射至内核空间及用户空间,从而在binder传递数据过程中只需一次数据拷贝,具体代码实现如下:
//kernel\msm-3.18\drivers\staging\android\binder.c
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;需要映射的内核空间地址信息
struct binder_proc *proc = filp->private_data;//打开文件private_data取出当前proc的binder_proc结构体
const char *failure_string;
struct binder_buffer *buffer;
if (proc->tsk != current)
return -EINVAL;
//然后检查内存映射区域是否大于4M,由前面的在service_manager中binder_open(128*1024),我们知道,这里的内存映射大小是128K。
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M; //保证这块内存最多只有4M
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
mutex_lock(&binder_mmap_lock);
if (proc->buffer) {//判断是否已经映射过
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
//申请一段内存空间给内核进程
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
//得到映射的内核空间虚拟地址首地址
proc->buffer = area->addr;
//计算用户空间与映射的内核空间的地址偏移量
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
mutex_unlock(&binder_mmap_lock);
//得到映射地址的页数,映射分配内存时是按页去分配
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
if (proc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
//需要映射的空间大小
proc->buffer_size = vma->vm_end - vma->vm_start;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
//准备工作做好,接下来就调用binder_update_page_range分配实际的物理内存页面了并映射到用户和内核空间:
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
//申请的物理内存以binder_buffer的方式放到proc->buffers链表里。
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
return 0;
}
来看下binder_update_page_range具体实现:
//kernel\msm-3.18\drivers\staging\android\binder.c
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
struct page **page;
struct mm_struct *mm;
...
//循环所有的页数分配物理页面
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
BUG_ON(*page);
//分配物理页面 ,alloc_page是linux内核中的方法
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
//把这个物理页面插入到内核空间去
ret = map_kernel_range_noflush((unsigned long)page_addr,
PAGE_SIZE, PAGE_KERNEL, page);
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
...
user_page_addr =
(uintptr_t)page_addr + proc->user_buffer_offset;
//将这个物理页面插入到用户进程地址空间去
ret = vm_insert_page(vma, user_page_addr, page[0]);
...
return 0;
...
}
binder_mmap中会把同一块物理页面同时映射到进程空间和内核空间,当需要在两者之间传递数据时,只需要其中任意一方把数据拷贝到物理页面,另一方直接读取即可,也就是说,数据的跨进程传递,只需要一次拷贝就可以完成。这里也是binder驱动设计巧妙之处。
binder_mmap()小结:
2.3 Binder驱动数据读写交互 binder_ioctl()
上面有提到过,当service_manager在调用ioctl()向binder驱动读写数据时既调用了binder驱动中的binder_ioctl()函数:
//kernel\msm-3.18\drivers\staging\android\binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
...
binder_lock(__func__);
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ://重要的binder驱动读写操作
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS://设置最大binder线程数,ps,system_server是32个,一般其他进程是15个
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR://service_manager调用到设置自己为service管理员
ret = binder_ioctl_set_ctx_mgr(filp);//创建binder_context_mgr_node节点,并且设置binder_context_mgr_uid为ServiceManager的uid的过程。
if (ret)
goto err;
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
default:
ret = -EINVAL;
goto err;
}
ret = 0;
return ret;
}
我们可以看到binder_ioctl()中会根据传入的不同命令进行不同分支的操作,下面我们重点BINDER_WRITE_READ分支,该分支调用了binder_ioctl_write_read()函数:
//kernel\msm-3.18\drivers\staging\android\binder.c
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
//将用户传进来的参数保存到bwr中
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
if (bwr.write_size > 0) {//写操作
ret = binder_thread_write(proc, thread,bwr.write_buffer,bwr.write_size,&bwr.write_consumed);
...
}
if (bwr.read_size > 0) {//读操作
ret = binder_thread_read(proc, thread, bwr.read_buffer,bwr.read_size,&bwr.read_consumed,filp->f_flags & O_NONBLOCK);
...
}
//读写成功将返回值写会给调用端
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
...
}
从binder_ioctl_write_read()函数中看出bidner_ioctl()实际上在内部实现时区分了读和写驱动,写操作之后接着进行读操作。
2.3.1 binder_ioctl写操作binder_thread_write:
kernel\msm-3.18\drivers\staging\android\binder.c
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
//不断从binder_buffer所指向的地址获取cmd命令
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))///拷贝用户空间的cmd命令
return -EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
case BC_INCREFS://用于得到一个Service,而且得到的是Service的弱引用
case BC_ACQUIRE:// 与BC_INCREFS类似,得到一个Service,但是得到的是强引用
case BC_RELEASE:
case BC_DECREFS: {}
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {}
case BC_ATTEMPT_ACQUIRE:{}
case BC_ACQUIRE_RESULT:{}
case BC_FREE_BUFFER: {}
case BC_TRANSACTION://拷贝用户空间的binder_transaction_data
case BC_REPLY: {}
case BC_REGISTER_LOOPER:{}
case BC_ENTER_LOOPER:{}//Service将要进入Loop状态时,发送此消息
case BC_EXIT_LOOPER:{}//Service退出Loop状态
case BC_REQUEST_DEATH_NOTIFICATION:
case BC_CLEAR_DEATH_NOTIFICATION: {}
case BC_DEAD_BINDER_DONE: {}
default:
pr_err("%d:%d unknown command %d\n",
proc->pid, thread->pid, cmd);
return -EINVAL;
}
*consumed = ptr - buffer;
}
return 0;
}
1 当一个客户端对ServiceManager请求getService时,ServiceManager将会对Binder驱动发送这两种命令,目的就是把目标Service发送给客户端,同时增加目标Service的引用个数。我们先看BC_ACQUIRE:
kernel\msm-3.18\drivers\staging\android\binder.c
case BC_INCREFS:
case BC_ACQUIRE: {
uint32_t target;
struct binder_ref *ref;
const char *debug_string;
if (get_user(target, (uint32_t __user *)ptr))//1.通过get_user()函数得到要get的service名字
return -EFAULT;
ptr += sizeof(uint32_t);
if (target == 0 && binder_context_mgr_node &&
(cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
//取得的Service是ServiceManager
ref = binder_get_ref_for_node(proc,
binder_context_mgr_node);
if (ref->desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
proc->pid, thread->pid,
ref->desc);
}
} else
//取其他service 2.根据target中的service名称获取对应service的引用
ref = binder_get_ref(proc, target,
cmd == BC_ACQUIRE ||
cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
break;
}
...//引用基数修改
}
a. 如果是要获取service_manager的binder实体引用则使用binder_get_ref_for_node(proc,binder_context_mgr_node)直接从当前进程的ref红黑树中取得binder_node是binder_context_mgr_node的实体;
//kernel\msm-3.18\drivers\staging\android\binder.c
static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
struct binder_node *node)
{
struct rb_node *n;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
struct binder_ref *ref, *new_ref;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
if (node < ref->node)
p = &(*p)->rb_left;
else if (node > ref->node)
p = &(*p)->rb_right;
else
return ref;
}
//如果proc->refs_by_node没有的话就新建node并插入refs_by_desc树和refs_by_node树中
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
...
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
...
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
if (node) {
hlist_add_head(&new_ref->node_entry, &node->refs);
}
return new_ref;
}
b. 如果是其他service,需要根据传入的进程binder_proc对象和desc的service描述,在binder_proc结构体中的refs_by_desc红黑树中中查找相应的节点,将取到的节点传递给client:
//kernel\msm-3.18\drivers\staging\android\binder.c
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
uint32_t desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (desc < ref->desc) {
n = n->rb_left;
} else if (desc > ref->desc) {
n = n->rb_right;
} else if (need_strong_ref && !ref->strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
return ref;
}
}
return NULL;
}
c. 完成查找任务后需要根据是强引用还是弱引用更新目标service的引用基数:
//kernel\msm-3.18\drivers\staging\android\binder.c
static int binder_inc_ref(struct binder_ref *ref, int strong,
struct list_head *target_list)
{
int ret;
if (strong) {
if (ref->strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
ref->strong++;
} else {
if (ref->weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
ref->weak++;
}
return 0;
}
2. 当一个客户端需要传递数据给服务端时就会走BC_TRANSACTION分支:
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))//从用户空间拷贝过来数据放在tr位置的binder_transaction_data结构体中
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);//进行transaction的操作
break;
}
我们看下binder_transaction()函数的实现:
//kernel\msm-3.18\drivers\staging\android\binder.c
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
binder_user_error();
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error();
}
target_proc = target_thread->proc;
} else {
if (tr->target.handle) {//handle不为0即是非service_manager的其他服务
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle, true);// 由handle 找到相应 binder_ref,
target_node = ref->node;//由binder_ref 找到相应 binder_node
} else {//service_manager的handle为0
target_node = binder_context_mgr_node;//取得service_manager的binder_node
}
e->to_node = target_node->debug_id;
target_proc = target_node->proc;//由binder_node找到相应的binder_proc
...
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack; //取得trasaction的栈,
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)//取得当前binder_proc的binder_thread
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
if (target_thread) {//如果target_thread不为空,取得
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {//首次执行target_thread为空
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread; //非oneway的通信方式,把当前thread保存到transaction的from字段
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;//此次通信目标进程
t->to_thread = target_thread;
t->code = tr->code;//此次通信code
t->flags = tr->flags;
t->priority = task_nice(current);
//从目标进程中分配内存空间
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
...
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
...
//分别拷贝用户空间的binder_transaction_data中ptr.buffer和ptr.offsets到内核
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
}
...
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {//非reply 且 非oneway,则设置事务栈信息
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else { //非reply 且 oneway,则加入异步todo队列
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
//将新事务添加到目标队列
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
//将BINDER_WORK_TRANSACTION_COMPLETE添加到当前线程的todo队列
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait) {
if (reply || !(t->flags & TF_ONE_WAY)) {
preempt_disable();
wake_up_interruptible_sync(target_wait);//唤醒等待队列
preempt_enable_no_resched();
}
else {
wake_up_interruptible(target_wait);
}
}
return;
}
代码看下来,binder_trasaction主要做了以下几个操作:a.查询目标进程得到目标进程的binder_proc结构体信息;
b.将BINDER_WORK_TRANSACTION添加到目标队列target_list, 首次发起事务则目标队列为target_proc->todo, reply事务时则为target_thread->todo; oneway的非reply事务,则为target_node->async_todo;
c.将BINDER_WORK_TRANSACTION_COMPLETE添加到当前线程的todo队列。
2.3.2 binder_thread_read()
当前线程的todo队列里有数据, 接下来便会进入binder_thread_read()来处理相关的事务.
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
...
retry:
//当前线程是否有内容需要读取,todo队列有数据则为false
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
...
if (wait_for_proc_work) {
...
if (non_block) {
...
} else
当todo队列没有数据,则线程便在此处等待数据的到来
ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
if (non_block) {
...
} else
//当线程没有todo队列没有数据, 则进入当前线程wait队列等待
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
//进入读模式
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;//对于非阻塞的调用,直接返回
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo)) {
//读取线程todo列表中的节点
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
// 从进程todo对获取事务数据
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
//没有数据则返回retry重新再来一遍
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
//将读取的数据放入用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
...
} break;
case BINDER_WORK_NODE: {
break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
break;
}
if (!t)//是BINDER_WORK_TRANSACTION命令继续往下执行
continue;
if (t->buffer->target_node) {
...
cmd = BR_TRANSACTION;//命令设为BR_TRANSACTION
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;//设置为BR_REPLAY将结果返回至client端
}
...
//将cmd命令和数据写回用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
...
break;
}
done:
*consumed = ptr - buffer;
//当满足请求线程加已准备线程数等于0,已启动线程数小于最大线程数(15),
//且looper状态为已注册或已进入时创建新的线程。
if (proc->requested_threads + proc->ready_threads == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
// 生成BR_SPAWN_LOOPER命令,用于创建新的线程
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
}
return 0;
}
binder_ioctl小结:
四、Binder驱动学习总结
参考博客:
Android开发之Binder机制详解: http://www.android100.org/html/201402/23/5709.html
binder驱动-------之数据结构篇1 : http://blog.csdn.net/xiaojsj111/article/details/27361651
Service与Android系统设计(7)--- Binder驱动 : http://blog.csdn.net/21cnbao/article/details/8087354
理解Android Binder机制(1/3):驱动篇 : http://qiangbo.space/2017-01-15/AndroidAnatomy_Binder_Driver/
彻底理解Android Binder通信架构 : http://gityuan.com/2016/09/04/binder-start-service/
Binder机制2---Binder的数据结构以及Binder驱动:http://www.jcodecraeer.com/a/anzhuokaifa/androidkaifa/2015/0319/2620.html