binder 机制的管理者的角色Service Manager
Service Manager进程负责管理client server角色, 上下文管理者context_manager
- Service Manager 使用binder.c直接和Binder驱动通信。
- 其他的service使用IPCThreadState(libbinder.so)和Binder驱动通信。
framework/native/cmds/servicemanager
- binder.c
- binder.h
- service_manager.c
1. 成为管理者 BINDER_SET_CONTEXT_MGR
Service Manager
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
Binder Driver
ioctl 处理
//BINDER_WRITE_READ
//BINDER_SET_MAX_THREADS
//BINDER_SET_CONTEXT_MGR
//kernel
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
mutex_lock(&binder_lock);
thread = binder_get_thread(proc); //如果找不到则新建一个binder_thread结构
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
......
case BINDER_SET_CONTEXT_MGR:
if (binder_context_mgr_node != NULL) {
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
//全局变量 binder_context_mgr_node和 binder_context_mgr_uid 来记录binder实体和uid
// bind_node binder实体(context_mgr)
// static struct binder_node *binder_context_mgr_node;
// 守护进程context_mgr的uid 用户uid
// static uid_t binder_context_mgr_uid = -1;
if (binder_context_mgr_uid != -1) {
if (binder_context_mgr_uid != current->cred->euid) {
printk(KERN_ERR "binder: BINDER_SET_"
"CONTEXT_MGR bad uid %d != %d\n",
current->cred->euid,
binder_context_mgr_uid);
ret = -EPERM;
goto err;
}
} else
binder_context_mgr_uid = current->cred->euid;
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
//初始化引用计数
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
break;
......
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
mutex_unlock(&binder_lock);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
return ret;
}
binder驱动关键结构
binder_thread
binder_thread 就是运行这个ioctl的线程(在这里就是运行 binder_become_context_manager函数的线程)
binder_thread在binder_proc进程里面(rb_node红黑数rb_root节点)。
Binder Driver
struct binder_thread {
//关联的binder_proc进程
struct binder_proc *proc;
struct rb_node rb_node;
int pid;
//looper表示线程的状态
int looper;
//线程正在处理的事务
struct binder_transaction *transaction_stack;
struct list_head todo;
//write失败后,返回的错误码
uint32_t return_error; /* Write failed, return error code in read buf */
uint32_t return_error2; /* Write failed, return error code in read */
/* buffer. Used when sending a reply to a dead process that */
/* we are also waiting on */
wait_queue_head_t wait;
struct binder_stats stats;
};
binder_proc
//全局哈希表binder_proc
static HLIST_HEAD(binder_procs);
static HLIST_HEAD(binder_deferred_list);
static HLIST_HEAD(binder_dead_nodes);
struct binder_proc {
//可以查找proc在全局哈希表binder_procs中的位置
struct hlist_node proc_node;
//threads 红黑数, 用于处理用户请求的线程
struct rb_root threads;
//nodes binder实体红黑数,表示属于这个进程的所有Binder实体
struct rb_root nodes;
//binder引用 组成的红黑树, 以句柄来排序
struct rb_root refs_by_desc;
//binder引用 组成的红黑树,以它对应的binder实体的地址来排序
struct rb_root refs_by_node;
//进程id
int pid;
// 进程的内核虚拟内存
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
// 该进程映射的物理内存在内核空间中的起始位置
void *buffer;
// 内核虚拟地址与进程虚拟地址之间的差值
ptrdiff_t user_buffer_offset;
//这个链表连接所有的内存块,以地址的大小为顺序,各内存块首尾相连
struct list_head buffers;
//红黑树,连接所有的已和物理内存建立映射的虚拟内存块
struct rb_root free_buffers;
//红黑树,连接所有已经分配出去的虚拟内存块
struct rb_root allocated_buffers;
size_t free_async_space;
// 物理内存page页数组
struct page **pages;
// 物理内存的大小
size_t buffer_size;
uint32_t buffer_free;
// 该进程的待处理事件队列
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;
int requested_threads;
int requested_threads_started;
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
};
//looper表示线程的状态
enum {
BINDER_LOOPER_STATE_REGISTERED = 0x01, /*进程的非主线程进入Binder循环状态*/
BINDER_LOOPER_STATE_ENTERED = 0x02, /*进程的主线程进入Binder循环状态*/
BINDER_LOOPER_STATE_EXITED = 0x04, /*线程退出Binder循环状态*/
BINDER_LOOPER_STATE_INVALID = 0x08, /*线程处在一个无效的状态,表示出错*/
BINDER_LOOPER_STATE_WAITING = 0x10, /*线程的todo队列为空,进入等待请求的状态*/
BINDER_LOOPER_STATE_NEED_RETURN = 0x20 /*线程是否需要返回数据给进程的用户态*/
};
binder_node binder实体
struct binder_node {
int debug_id;
struct binder_work work;
//使用:rb_node来连入proc->nodes所表示的红黑树的节点,这棵红黑树用来组织属于这个进程的所有Binder实体
//销毁:dead_node进入到一个哈希表中去存放
union {
struct rb_node rb_node;
struct hlist_node dead_node;
};
//这个Binder实例所属于进程
struct binder_proc *proc;
//Binder引用连接起来构成一个链表
struct hlist_head refs;
//引用计数
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
void __user *ptr;
void __user *cookie;
unsigned has_strong_ref : 1;
unsigned pending_strong_ref : 1;
unsigned has_weak_ref : 1;
unsigned pending_weak_ref : 1;
unsigned has_async_transaction : 1;
unsigned accept_fds : 1;
int min_priority : 8;
struct list_head async_todo;
};
binder_get_thread从threads树中查找和current线程匹配的binder_thread节点
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;