Binder分析

分析Binder  我打算分成三部分,第一部分讲binder驱动,第二部分讲ServiceManager 的启动流程。第三部分讲服务(AMS)注册到ServiceManager的流程跟 Cliend通过ServiceManager获取AMS的流程

(只会贴关键代码,需要看完成代码自行下载,有地址)

 

一.Binder驱动分析


mmap  --- 能够让虚拟内存和指定物理内存直接联系起来  跟ServiceManager等服务端进行内存映射,拷贝一次数据 只要服务端跟binder通信都是采用内存映射

1.binder_init

kernel/drivers/staging/android/binder.c

// 4290 设备驱动入口函数
device_initcall(binder_init);

进入binder_init函数

// 4213
static int __init binder_init(void) 

// 4220 创建名为binder的单线程的工作队列
binder_deferred_workqueue = create_singlethread_workqueue("binder");

// 4269
ret = init_binder_device(device_name)

分析init_binder_device

kernel/drivers/staging/android/binder.c

// 4186
static int __init init_binder_device(const char *name)
{
int ret;
struct binder_device *binder_device;
// 4191 为binder设备分配内存
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
// 4195 初始化设备
binder_device->miscdev.fops = &binder_fops; // 设备的文件操作结构,这是
file_operations结构
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; // 次设备号 动态分配
binder_device->miscdev.name = name; // 设备名,"binder"
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
// 4202 misc驱动注册
ret = misc_register(&binder_device->miscdev);
// 4208 将hlist节点添加到binder_devices为表头的设备链表
hlist_add_head(&binder_device->hlist, &binder_devices);
return ret;

}

//native 方法跟驱动方法的映射

binder_fops结构体

static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};

binder_init主要做了以下事情:

1. 分配内存
2. 初始化设备
3. 放入链表  binder_devices

2.binder_open

static int binder_open(struct inode *nodp, struct file *filp)


// 3462 为binder_proc结构体在kernel分配内存空间
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
// 3465 将当前线程的task保存到binder进程的tsk
get_task_struct(current);
proc->tsk = current;
INIT_LIST_HEAD(&proc->todo); // 初始化todo列表
init_waitqueue_head(&proc->wait); // 初始化wait队列
proc->default_priority = task_nice(current); // 将当前进程的nice值转换为进程优先级
// 3474 同步锁,因为binder支持多线程访问
binder_lock(__func__);
binder_stats_created(BINDER_STAT_PROC); // binder_proc对象创建数加1
hlist_add_head(&proc->proc_node, &binder_procs); // 将proc_node节点添加到
binder_procs的队列头部
proc->pid = current->group_leader->pid; // 进程pid
INIT_LIST_HEAD(&proc->delivered_death); // 初始化已分发的死亡通知列表
filp->private_data = proc; // 将这个binder_proc与filp关联起来,这样下次通过filp就能找
到这个proc了

binder_unlock(__func__); // 释放同步锁

binder_open主要做了以下事情:

1. 创建binder_proc对象  
2. 当前进程信息,proc
3. filp->private_data = proc;  
4. 添加到binder_procs链表中

3.binder_mmap

kernel/drivers/staging/android/binder.c
// 3355
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
// 3366 保证映射内存大小不超过4M
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;


// 3382 同步锁,保证一次只有一个进程分配内存,保证多进程间的并发访问
mutex_lock(&binder_mmap_lock);


// 是否已经做过映射,执行过则进入if,goto跳转,释放同步锁后结束binder_mmap方法
if (proc->buffer) {
goto err_already_mapped;
}


// 采用 VM_IOREMAP方式,分配一个连续的内核虚拟内存,与进程虚拟内存大小一致
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);


// 内存分配不成功直接报错
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}


// 将proc中的buffer指针指向这块内核的虚拟内存
proc->buffer = area->addr;


// 计算出用户空间和内核空间的地址偏移量。地址偏移量 = 用户虚拟内存地址 - 内核虚拟内存地址
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;


mutex_unlock(&binder_mmap_lock); // 释放锁


// 3407 分配物理页的指针数组,数组大小为vma的等效page个数
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) /
PAGE_SIZE), GFP_KERNEL);


// 3418 分配物理页面,同时映射到内核空间和进程空间,先分配1个物理页。
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE,
vma)) {

//3425

list_add(&buffer->entry, &proc->buffers);// 将buffer连入buffers链表中
buffer->free = 1; // 此内存可用
binder_insert_free_buffer(proc, buffer);// 将buffer插入 proc->free_buffers 链表中
proc->free_async_space = proc->buffer_size / 2; // 异步的可用空闲空间大小
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;
proc->vma_vm_mm = vma->vm_mm;

 

进入binder_update_page_range
kernel/drivers/staging/android/binder.c


// 576
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)


// 609 allocate为1,代表分配内存过程。如果为0则代表释放内存过程
if (allocate == 0)
goto free_range;


// 624 分配一个page的物理内存  4kb
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
3-1.binder_insert_free_buffer


// 630 物理空间映射到虚拟内核空间
ret = map_kernel_range_noflush((unsigned long)page_addr,
PAGE_SIZE, PAGE_KERNEL, page);


// 641 物理空间映射到虚拟进程空间
ret = vm_insert_page(vma, user_page_addr, page[0]);


kernel/drivers/staging/android/binder.c
// 3355
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
// 3425
list_add(&buffer->entry, &proc->buffers);// 将buffer连入buffers链表中
buffer->free = 1; // 此内存可用
binder_insert_free_buffer(proc, buffer);// 将buffer插入 proc->free_buffers 链表中
proc->free_async_space = proc->buffer_size / 2; // 异步的可用空闲空间大小
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;
proc->vma_vm_mm = vma->vm_mm

 

进入binder_insert_free_buffer

kernel/drivers/staging/android/binder.c
// 494
static void binder_insert_free_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer)
// 511
while (*p) {
parent = *p;
buffer = rb_entry(parent, struct binder_buffer, rb_node);
// 计算得出空闲内存的大小
buffer_size = binder_buffer_size(proc, buffer);
if (new_buffer_size < buffer_size)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
rb_link_node(&new_buffer->rb_node, parent, p);
// 将 buffer插入 proc->free_buffers 链表中
rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);

 

struct vm_struct *area; --- 内核的虚拟内存

vma --- 进程的虚拟内存  --- 4M 驱动定的

binder_mmap主要做了以下事情:

1. 通过用户空间的虚拟内存大小 --- 分配一块内核的虚拟内存
2. 分配了一块物理内存  --- 4KB
3. 把这块物理内存分别映射到    用户空间的虚拟内存和内核的虚拟内存

 

 

 

4.binder_ioctl

 

kernel/drivers/staging/android/binder.c
// 3241
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4-1.binder_ioctl_write_read
// 3254 进入休眠状态,直到中断唤醒
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error
< 2);
// 3259 根据当前进程的pid,从binder_proc中查找binder_thread,
// 如果当前线程已经加入到proc的线程队列则直接返回,
// 如果不存在则创建binder_thread,并将当前线程添加到当前的proc
thread = binder_get_thread(proc);
// 3265 进行binder的读写操作
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)

4-1.进入binder_ioctl_write_read

kernel/drivers/staging/android/binder.c中

//3136

static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)


// 3150 把用户空间数据ubuf拷贝到bwr
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
// 3160
if (bwr.write_size > 0) { // 当写缓存中有数据,则执行binder写操作
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
}
if (bwr.read_size > 0) { // 当读缓存中有数据,则执行binder读操作
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
// 进程todo队列不为空,则唤醒该队列中的线程
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
}
// 3192 把内核空间数据bwr拷贝到ubuf
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 

 

binder_proc结构体

每个进程调用open()打开binder驱动都会创建该结构体,用于管理IPC所需的各种信息。

struct binder_proc {
struct hlist_node proc_node; // 进程节点
struct rb_root threads; // binder_thread红黑树的根节点
struct rb_root nodes; // binder_node红黑树的根节点
struct rb_root refs_by_desc; // binder_ref红黑树的根节点(以 handle为 key)
struct rb_root refs_by_node; // binder_ref红黑树的根节点(以 ptr为 key)
int pid; // 相应进程 id
struct vm_area_struct *vma; // 指向进程虚拟地址空间的指针
struct mm_struct *vma_vm_mm; // 相应进程的内存结构体
struct task_struct *tsk; // 相应进程的 task结构体
struct files_struct *files; // 相应进程的文件结构体
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer; // 内核空间的起始地址
ptrdiff_t user_buffer_offset; // 内核空间与用户空间的地址偏移量
struct list_head buffers; // 所有的 buffer
struct rb_root free_buffers; // 空闲的 buffer
struct rb_root allocated_buffers; // 已分配的 buffer
size_t free_async_space; // 异步的可用空闲空间大小
struct page **pages; // 指向物理内存页指针的指针
size_t buffer_size; // 映射的内核空间大小
uint32_t buffer_free; // 可用内存总大小
struct list_head todo; // 进程将要做的事
wait_queue_head_t wait; // 等待队列
struct binder_stats stats; // binder统计信息
struct list_head delivered_death; // 已分发的死亡通知
int max_threads; // 最大线程数
int requested_threads; // 请求的线程数
int requested_threads_started; // 已启动的请求线程数
int ready_threads; // 准备就绪的线程个数
long default_priority; // 默认优先级
struct dentry *debugfs_entry;
struct binder_context *context;

};

 

 

 

 

 

驱动介绍基本结束,

接下来看native层,看native是怎么去跟binder驱动关联的,因为java上层想要调用binder驱动必须通过(JNI)native层

系统在启动时会启动.zygote进程

zygote是由init进程通过解析 init.zygote.rc 文件而创建的,zygote所对应的可执行程序
app_process,所对应的源文件是 app_main.cpp ,进程名为zygote

// system/core/rootdir/init.zygote32.rc

service zygote /system/bin/app_process -Xzygote /system/bin --zygote --startsystem-server
class main
socket zygote stream 660 root system
onrestart write /sys/android_power/request_state wake
onrestart write /sys/power/state on
onrestart restart media
onrestart restart netd
writepid /dev/cpuset/foreground/tasks

启动zygote的入口函数是 app_main.cpp 中的main方法

frameworks/base/cmds/app_process/app_main.cpp
// 186
int main(int argc, char* const argv[])
// 248 将zygote标志位置为true。
if (strcmp(arg, "--zygote") == 0) {
zygote = true;
}
// 306 运行AndroidRuntime.cpp的start方法
if (zygote) {
runtime.start("com.android.internal.os.ZygoteInit", args, zygote);
}

进入AndroidRuntime::start

frameworks/base/core/jni/AndroidRuntime.cpp
// 1007
void AndroidRuntime::start(const char* className, const Vector<String8>&
options, bool zygote)
// 1051
if (startReg(env) < 0){}

进入startReg

frameworks/base/core/jni/AndroidRuntime.cpp

1440 int AndroidRuntime::startReg(JNIEnv* env)

// 1459 注册jni方法
if (register_jni_procs(gRegJNI, NELEM(gRegJNI), env) < 0) {

进入register_jni_procs

frameworks/base/core/jni/AndroidRuntime.cpp

// 1283
static int register_jni_procs(const RegJNIRec array[], size_t count, JNIEnv*
env)
{
// 循环注册jni方法
for (size_t i = 0; i < count; i++) {
if (array[i].mProc(env) < 0) {
return -1;
}
}
return 0;
}

进入gRegJNI

frameworks/base/core/jni/AndroidRuntime.cpp

static const RegJNIRec gRegJNI[] = {
// 1312
REG_JNI(register_android_os_Binder),
}

进入register_android_os_Binder

frameworks/base/core/jni/android_util_Binder.cpp
// 1282
int register_android_os_Binder(JNIEnv* env)
{
if (int_register_android_os_Binder(env) < 0)
return -1;
if (int_register_android_os_BinderInternal(env) < 0)
return -1;
if (int_register_android_os_BinderProxy(env) < 0)
return -1;
}

进入int_register_android_os_Binder

frameworks/base/core/jni/android_util_Binder.cpp
// 1241
static const JNINativeMethod gBinderProxyMethods[] = {
/* name, signature, funcPtr */
{"pingBinder", "()Z", (void*)android_os_BinderProxy_pingBinder},
{"isBinderAlive", "()Z", (void*)android_os_BinderProxy_isBinderAlive},
{"getInterfaceDescriptor", "()Ljava/lang/String;",
(void*)android_os_BinderProxy_getInterfaceDescriptor},
{"transactNative", "(ILandroid/os/Parcel;Landroid/os/Parcel;I)Z",
(void*)android_os_BinderProxy_transact},
{"linkToDeath", "(Landroid/os/IBinder$DeathRecipient;I)V",
(void*)android_os_BinderProxy_linkToDeath},
{"unlinkToDeath", "(Landroid/os/IBinder$DeathRecipient;I)Z",
(void*)android_os_BinderProxy_unlinkToDeath},
{"destroy", "()V", (void*)android_os_BinderProxy_destroy},
};
// 1252
const char* const kBinderProxyPathName = "android/os/BinderProxy";
// 1254
static int int_register_android_os_BinderProxy(JNIEnv* env)
{
// 查找文件 kBinderProxyPathName = "android/os/BinderProxy",返回对应Class对象
jclass clazz = FindClassOrDie(env, "java/lang/Error");
gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
// 通过gBinderProxyOffsets,保存Java层BinderProxy类的信息,为JNI层访问Java提供通道
clazz = FindClassOrDie(env, kBinderProxyPathName);
gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderProxyOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>", "
()V");
gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz,
"sendDeathNotice",

{ "setThreadStrictModePolicy", "(I)V",
(void*)android_os_Binder_setThreadStrictModePolicy },
{ "getThreadStrictModePolicy", "()I",
(void*)android_os_Binder_getThreadStrictModePolicy },
{ "flushPendingCommands", "()V",
(void*)android_os_Binder_flushPendingCommands },
{ "init", "()V", (void*)android_os_Binder_init },
{ "destroy", "()V", (void*)android_os_Binder_destroy },
{ "blockUntilThreadAvailable", "()V",
(void*)android_os_Binder_blockUntilThreadAvailable }
};
// 857
const char* const kBinderPathName = "android/os/Binder";
// 859
static int int_register_android_os_Binder(JNIEnv* env)
{
// 查找文件 kBinderPathName = "android/os/Binder",返回对应Class对象
jclass clazz = FindClassOrDie(env, kBinderPathName);
// 通过gBinderOffsets结构体,保存Java层Binder类的信息,为JNI层访问Java层提供通道
gBinderOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderOffsets.mExecTransact = GetMethodIDOrDie(env, clazz, "execTransact",
"(IJJI)Z");
gBinderOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
// 通过RegisterMethodsOrDie,将为gBinderMethods数组完成映射关系,从而为Java层访问
JNI层提供通道
return RegisterMethodsOrDie(
env, kBinderPathName,
gBinderMethods, NELEM(gBinderMethods));

 

int_register_android_os_BinderInternal

frameworks/base/core/jni/android_util_Binder.cpp
// 925
static const JNINativeMethod gBinderInternalMethods[] = {
/* name, signature, funcPtr */
{ "getContextObject", "()Landroid/os/IBinder;",
(void*)android_os_BinderInternal_getContextObject },
{ "joinThreadPool", "()V", (void*)android_os_BinderInternal_joinThreadPool
},
{ "disableBackgroundScheduling", "(Z)V",
(void*)android_os_BinderInternal_disableBackgroundScheduling },
{ "handleGc", "()V", (void*)android_os_BinderInternal_handleGc }
};
// 933
const char* const kBinderInternalPathName =
"com/android/internal/os/BinderInternal";
// 935
static int int_register_android_os_BinderInternal(JNIEnv* env)

{
// 查找文件kBinderInternalPathName =
"com/android/internal/os/BinderInternal",返回Class对象
jclass clazz = FindClassOrDie(env, kBinderInternalPathName);
// 通过gBinderInternalOffsets,保存Java层BinderInternal类的信息,为JNI层访问java
层提供通道
gBinderInternalOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderInternalOffsets.mForceGc = GetStaticMethodIDOrDie(env, clazz,
"forceBinderGc", "()V");
// 通过RegisterMethodsOrDie(),将为gBinderInternalMethods数组完成映射关系,从而为
Java层访问JNI层提供通道
return RegisterMethodsOrDie(
env, kBinderInternalPathName,
gBinderInternalMethods, NELEM(gBinderInternalMethods));
}

int_register_android_os_BinderProxy

frameworks/base/core/jni/android_util_Binder.cpp
// 1241
static const JNINativeMethod gBinderProxyMethods[] = {
/* name, signature, funcPtr */
{"pingBinder", "()Z", (void*)android_os_BinderProxy_pingBinder},
{"isBinderAlive", "()Z", (void*)android_os_BinderProxy_isBinderAlive},
{"getInterfaceDescriptor", "()Ljava/lang/String;",
(void*)android_os_BinderProxy_getInterfaceDescriptor},
{"transactNative", "(ILandroid/os/Parcel;Landroid/os/Parcel;I)Z",
(void*)android_os_BinderProxy_transact},
{"linkToDeath", "(Landroid/os/IBinder$DeathRecipient;I)V",
(void*)android_os_BinderProxy_linkToDeath},
{"unlinkToDeath", "(Landroid/os/IBinder$DeathRecipient;I)Z",
(void*)android_os_BinderProxy_unlinkToDeath},
{"destroy", "()V", (void*)android_os_BinderProxy_destroy},
};

// 1252
const char* const kBinderProxyPathName = "android/os/BinderProxy";
// 1254
static int int_register_android_os_BinderProxy(JNIEnv* env)
{
// 查找文件 kBinderProxyPathName = "android/os/BinderProxy",返回对应Class对象
jclass clazz = FindClassOrDie(env, "java/lang/Error");
gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
// 通过gBinderProxyOffsets,保存Java层BinderProxy类的信息,为JNI层访问Java提供通道
clazz = FindClassOrDie(env, kBinderProxyPathName);
gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderProxyOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>", "
()V");
gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz,
"sendDeathNotice",

"(Landroid/os/IBinder$DeathRecipient;)V");
gBinderProxyOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
gBinderProxyOffsets.mSelf = GetFieldIDOrDie(env, clazz, "mSelf",
"Ljava/lang/ref/WeakReference;");
gBinderProxyOffsets.mOrgue = GetFieldIDOrDie(env, clazz, "mOrgue", "J");
clazz = FindClassOrDie(env, "java/lang/Class");
gClassOffsets.mGetName = GetMethodIDOrDie(env, clazz, "getName", "
()Ljava/lang/String;");
// 通过RegisterMethodsOrDie(),将为gBinderProxyMethods数组完成映射关系,从而为Java
层访问JNI层提供通道
return RegisterMethodsOrDie(
env, kBinderProxyPathName,
gBinderProxyMethods, NELEM(gBinderProxyMethods));

}

native层结束

 

二、ServiceManager的启动流程

进程间通信需要通过binder去获取ServiceManager,再去得到(AMS,PMS)的ibinder对象

1.启动servicemanager进程

ServiceManager是由init进程通过解析init.rc文件而创建的,其所对应的可执行程序servicemanager,
所对应的源文件是service_manager.c,进程名为servicemanager。

system/core/rootdir/init.rc
// 602
service servicemanager /system/bin/servicemanager
class core
user system
group system
critical
onrestart restart healthd
onrestart restart zygote
onrestart restart media
onrestart restart surfaceflinger
onrestart restart drm

2.main

启动ServiceManager的入口函数是 service_manager.c 中的main()方法。

frameworks/native/cmds/servicemanager/service_manager.c
// 354
int main(int argc, char **argv)
// 358 打开 binder驱动,申请 128k字节大小的内存空间---见后面小节
bs = binder_open(128*1024);
// 364 设为守护进程,成为 binder大管理者---见后面小节
if (binder_become_context_manager(bs)) {
// 391 进入无限循环,处理client端发来的请求---见后面小节
binder_loop(bs, svcmgr_handler);

2-1.binder_open

frameworks/native/cmds/servicemanager/binder.c
// 96
struct binder_state *binder_open(size_t mapsize)
// 98 这个结构体记录了 service_manager 中有关于 binder 的所有信息
struct binder_state *bs;
// 107 打开 binder驱动,得到文件描述符
bs->fd = open("/dev/binder", O_RDWR);
// 123
bs->mapsize = mapsize; // service_manager自己设置的,大小为 128kb
/*通过系统调用,mmap内存映射,mmap必须是 page的整数倍(即 4kb的整数倍)*/
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);

 

2-2.binder_become_context_manager

frameworks/native/cmds/servicemanager/binder.c
// 146
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);

}

2-2-1.binder_ioctl

kernel/drivers/staging/android/binder.c
// 3241
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
// 3277
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);

2-2-2.binder_ioctl_set_ctx_mgr

kernel/drivers/staging/android/binder.c
// 3200
static int binder_ioctl_set_ctx_mgr(struct file *filp)
// 3208 保证只创建一次 mgr_node对象,不为 null就直接返回
if (context->binder_context_mgr_node) {
// 3216
/* uid是否有效,当前是无效的 */
if (uid_valid(context->binder_context_mgr_uid)) {
} else {
/* 设置当前线程 euid作为 service_manager的 uid */
context->binder_context_mgr_uid = curr_euid;
}
// 创建 service_manager实体
context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
// 3233 将 binder_context_mgr_node的强弱引用各加 1
context->binder_context_mgr_node->local_weak_refs++;
context->binder_context_mgr_node->local_strong_refs++;
context->binder_context_mgr_node->has_strong_ref = 1;
context->binder_context_mgr_node->has_weak_ref = 1;

2-2-2-1.binder_new_node

kernel/drivers/staging/android/binder.c

// 923
static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_uintptr_t ptr,
binder_uintptr_t cookie)
// 931 首次进来为空
while (*p) {
// 943 给新创建的binder_node 分配内核空间
node = kzalloc(sizeof(*node), GFP_KERNEL);
// 947 将新创建的 node对象添加到 proc红黑树
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
// 950 初始化 binder_node
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE; // 设置 binder_work的 type
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);

2-3.binder_loop

frameworks/native/cmds/servicemanager/binder.c

void binder_loop(struct binder_state *bs, binder_handler func)
// 378
bwr.write_size = 0; // 初始化为 0
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER; // 读写要处理的命令
binder_write(bs, readbuf, sizeof(uint32_t)); // 设置线程的 looper状态为循环状态
for (;;) {
bwr.read_size = sizeof(readbuf); // 不为 0,进入 binder_thread_read
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
/* 不断地 binder读数据,没有数据会进入休眠状态 */
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

2-3-1.binder_write

frameworks/native/cmds/servicemanager/binder.c
// 151
int binder_write(struct binder_state *bs, void *data, size_t len)
// 156
bwr.write_size = len; // 大于 0,进入 binder_thread_write
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data; // 此处 data为 BC_ENTER_LOOPER
bwr.read_size = 0; // read 不会进去
bwr.read_consumed = 0;
bwr.read_buffer = 0;
/* 设置线程的 looper状态为循环状态 */
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

2-3-2.binder_thread_write

kernel/drivers/staging/android/binder.c
// 2250
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
// 2262 获取命令,即 BC_ENTER_LOOPER
if (get_user(cmd, (uint32_t __user *)ptr))
// 2472
case BC_ENTER_LOOPER:
// 2481 设置该线程的 looper状态
thread->looper |= BINDER_LOOPER_STATE_ENTERED;

2-3-3.binder_thread_read

kernel/drivers/staging/android/binder.c
// 2652
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
// 2664 设置命令为 BR_NOOP
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
// 2671 wait_for_proc_work 为 true
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
// 2694 准备就绪的线程个数加 1
if (wait_for_proc_work)
proc->ready_threads++;
// 2702
if (wait_for_proc_work) {
if (non_block) { // 非阻塞操作,service_manager是阻塞的,所以 if不命中
} else // 进入 else,开始等待
ret = wait_event_freezable_exclusive(proc->wait,
binder_has_proc_work(proc, thread));
}

到这sm就注册完成了,总结起来就3步

1. 打开驱动,内存映射(设置大小 128K)
2. 设置 SM 为大管家 --- sm  作用  为了管理系统服务
   1. 创建 binder_node 结构体对象
   2. proc --》 binder_node
   3. 创建  work  和 todo --》类似 messageQueue   (保存数据)
3. BC_ENTER_LOOPER 命令
   1. 写入状态Loop
   2. 去binder读数据:binder_thread_read:ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 没有数据进入等待\\

获取service_manager

接下来将获取SM的native层 后面再分析java层,正确我们什么时候需要去获取sm,有两个

1. 注册服务到sm
2. 通过sm去获取服务

获取Service Manager是通过defaultServiceManager()方法来完成。

 

1.defaultServiceManager

frameworks/native/libs/binder/IServiceManager.cpp
// 33
sp<IServiceManager> defaultServiceManager()
{
/* 单例模式,如果不为空直接返回 */
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
/* 创建或者获取 SM时,SM可能未准备就绪,这时通过 sleep 1秒后,循环尝试获取直到成功
*/
while (gDefaultServiceManager == NULL) {
/* 分为三块分析 */
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));

if (gDefaultServiceManager == NULL)
sleep(1);
}

}
return gDefaultServiceManager;

}

1-1.ProcessState::self

frameworks/native/libs/binder/ProcessState.cpp
// 70
sp<ProcessState> ProcessState::self()
{
/* 单例模式 */
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState; // 实例化 ProcessState
return gProcess;
}

1-1-1.ProcessState::ProcessState

frameworks/native/libs/binder/ProcessState.cpp
// 339
ProcessState::ProcessState()
: mDriverFD(open_driver())
// 358 采用内存映射函数 mmap,给 binder分配一块大小为 (1M-8K)的虚拟地址空间,用来接收事务
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
mDriverFD, 0);

 

1-1-1-1.open_driver

frameworks/native/libs/binder/ProcessState.cpp
// 311
static int open_driver()
// 313 打开 /dev/binder设备,建立与内核的 Binder驱动的交互通道
int fd = open("/dev/binder", O_RDWR);
// 328 通过 ioctl设置 binder驱动,能支持的最大线程数
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);

 

1-2.ProcessState::getContextObject

 

frameworks/native/libs/binder/ProcessState.cpp
// 85
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
// 参数为0,获取service_manager服务
return getStrongProxyForHandle(0);
}

1-2-1.ProcessState::getStrongProxyForHandle

 

frameworks/native/libs/binder/ProcessState.cpp
// 179
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
// 查找 handle对应的资源项
handle_entry* e = lookupHandleLocked(handle);
// 192 当handle值所对应的IBinder不存在或弱引用无效时
if (b == NULL || !e->refs->attemptIncWeak(this)) {
// 214 通过ping操作测试binder是否准备就绪
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
// 220 创建BpBinder对象
b = new BpBinder(handle);

1-2-2.BpBinder::BpBinder

frameworks/native/libs/binder/BpBinder.cpp
// 89
BpBinder::BpBinder(int32_t handle)
: mHandle(handle)
{
/* 支持强弱引用计数,OBJECT_LIFETIME_WEAK表示目标对象的生命周期受弱指针控制 */
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
/* handle所对应的 bindle弱引用 + 1 */
IPCThreadState::self()->incWeakHandle(handle);
}

1-3.interface_cast

frameworks/native/include/binder/IInterface.h
// 41
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
// 等价于:IServiceManager::asInterface
return INTERFACE::asInterface(obj);
}

1-3-1.IServiceManager::asInterface

对于asInterface()函数,通过搜索代码,你会发现根本找不到这个方法是在哪里定义这个函数的, 其实是
通过模板函数来定义的。

 

frameworks/native/include/binder/IInterface.h
// 74
#define DECLARE_META_INTERFACE(INTERFACE) \
static const android::String16 descriptor; \
static android::sp<I##INTERFACE> asInterface( \
const android::sp<android::IBinder>& obj); \
virtual const android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
// 83
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
const android::String16 I##INTERFACE::descriptor(NAME); \
const android::String16& \
I##INTERFACE::getInterfaceDescriptor() const { \
return I##INTERFACE::descriptor; \
} \
android::sp<I##INTERFACE> I##INTERFACE::asInterface( \
const android::sp<android::IBinder>& obj) \
{ \
android::sp<I##INTERFACE> intr; \
if (obj != NULL) { \
intr = static_cast<I##INTERFACE*>( \
obj->queryLocalInterface( \
I##INTERFACE::descriptor).get()); \
if (intr == NULL) { \
intr = new Bp##INTERFACE(obj); \
} \
} \
return intr; \
} \
I##INTERFACE::I##INTERFACE() { } \
I##INTERFACE::~I##INTERFACE() { }

 

1-3-2.DECLARE_META_INTERFACE

 

frameworks/native/include/binder/IServiceManager.h
// 33
DECLARE_META_INTERFACE(ServiceManager)

展开即可得:

static const android::String16 descriptor;
static android::sp< IServiceManager > asInterface(const
android::sp<android::IBinder>& obj)
virtual const android::String16& getInterfaceDescriptor() const;
IServiceManager ();
virtual ~IServiceManager();

该过程主要是声明asInterface(),getInterfaceDescriptor()方法。

 

1-3-3.IMPLEMENT_META_INTERFACE

frameworks/native/libs/binder/IServiceManager.cpp
// 185
IMPLEMENT_META_INTERFACE(ServiceManager,"android.os.IServiceManager")

展开即可得:

const android::String16
IServiceManager::descriptor(“android.os.IServiceManager”);
const android::String16& IServiceManager::getInterfaceDescriptor() const
{
return IServiceManager::descriptor;
}
android::sp<IServiceManager> IServiceManager::asInterface(const
android::sp<android::IBinder>& obj)
{
android::sp<IServiceManager> intr;
if(obj != NULL) {
intr = static_cast<IServiceManager *>(
obj->queryLocalInterface(IServiceManager::descriptor).get());
if (intr == NULL) {
// 等价于 new BpServiceManager(BpBinder)
intr = new BpServiceManager(obj);
}
}
return intr;
}
IServiceManager::IServiceManager () { }
IServiceManager::~ IServiceManager() { }

 

1-4.BpServiceManager

 

frameworks/native/libs/binder/IServiceManager.cpp
// 126
class BpServiceManager : public BpInterface<IServiceManager>
// 129
BpServiceManager(const sp<IBinder>& impl)
: BpInterface<IServiceManager>(impl)

 

1-4-1.BpInterface::BpInterface

 

frameworks/native/include/binder/IInterface.h
// 134
template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
: BpRefBase(remote)

 

1-4-2.BpRefBase

frameworks/native/libs/binder/Binder.cpp
// 241 mRemote指向 new BpBinder(0),从而 BpServiceManager能够利用 Binder进行通过通信
BpRefBase::BpRefBase(const sp<IBinder>& o)
: mRemote(o.get()), mRefs(NULL), mState(0)

 

 

gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));

native层获取sm的流程总结为以下

1. ProcessState::self()->getContextObject(NULL)、           (创建服务)
   1. ProcessState::self()
      1. 打开驱动:binder
      2. 设置线程最大数目:15个
      3. mmap  -- 设置共享内存大小 --- (1M-8K) 普通服务的大小
   2. getContextObject
      1. 创建一个BpBinder --- 客户端的对象

2. interface_cast                                                                    (添加服务)
   1. new BpServiceManager(new BpBinder) ==》 new Proxy(binder==BinderProxy)
   2. remote.transact -->远程调用  
   3. remote == BpBinder

 

接下来分析addService流程

以AMS为例子添加进SM

先看AMS怎么启动的

1.SytemServer
1-1.run
1-2.startBootstrapServices
frameworks/base/services/java/com/android/server/SystemServer.java
// 167
public static void main(String[] args) {
new SystemServer().run();
}
frameworks/base/services/java/com/android/server/SystemServer.java
// 176
private void run() {
// 263 创建 SystemServiceManager
mSystemServiceManager = new SystemServiceManager(mSystemContext);
// 268
startBootstrapServices();

1-2.startBootstrapServices

frameworks/base/services/java/com/android/server/SystemServer.java
// 322
private void startBootstrapServices() {
// 329 获取 AMS 的对象
mActivityManagerService = mSystemServiceManager.startService(
ActivityManagerService.Lifecycle.class).getService();
// 378
mActivityManagerService.setSystemProcess();

这里就创建了AMS 再看setSystemProcess中怎么添加service_manager中的

1-3.setSystemProcess

frameworks/base/services/core/java/com/android/server/am/ActivityManagerService.
java
// 2172
public void setSystemProcess() {
// 2174 添加 AMS("activity")到 service_manager中
ServiceManager.addService(Context.ACTIVITY_SERVICE, this, true);

 

1-4.ServiceManager.addService

frameworks/base/core/java/android/os/ServiceManager.java
// 87
public static void addService(String name, IBinder service, boolean
allowIsolated) {
// 89 ---见后面小节(分别分析 getIServiceManager和 addService)
getIServiceManager().addService(name, service, allowIsolated);

这里分两步分析

getIServiceManager().addService(name, service, allowIsolated);

2.getIServiceManager

frameworks/base/core/java/android/os/ServiceManager.java
// 33
private static IServiceManager getIServiceManager() {
/* 采用单例形式返回 ServiceManagerProxy对象 */
if (sServiceManager != null) {
return sServiceManager;
}
// 相当于 new ServiceManagerProxy(new BinderProxy); ---见后面小节(分别分析
asInterface和 getContextObject)
sServiceManager =
ServiceManagerNative.asInterface(BinderInternal.getContextObject());
return sServiceManager;
}

2-1.BinderInternal.getContextObject

frameworks/base/core/java/com/android/internal/os/BinderInternal.java
// 88
public static final native IBinder getContextObject();

 

2-1-1.android_os_BinderInternal_getContextObject

frameworks/base/core/jni/android_util_Binder.cpp
// 899
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject
clazz)
{
/* 打开 binder驱动(ProcessState是单例的),创建 BpBinder(handle) 对象,并返回 */
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}

2-1-2.javaObjectForIBinder

frameworks/base/core/jni/android_util_Binder.cpp
// 547
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
// 563 从 BpBinder中查找 BinderProxy对象,第一次为 null
jobject object = (jobject)val->findObject(&gBinderProxyOffsets);
// 576 创建 BinderProxy对象
object = env->NewObject(gBinderProxyOffsets.mClass,
gBinderProxyOffsets.mConstructor);
// 580 BinderProxy.mObject成员变量记录 BpBinder对象
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
// 587 将 BinderProxy对象信息添加到 BpBinder的成员变量 mObjects中
val->attachObject(&gBinderProxyOffsets, refObject,
jnienv_to_javavm(env), proxy_cleanup);
// 593 BinderProxy.mOrgue成员变量记录死亡通知对象
env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>
(drl.get()));

2-2.ServiceManagerNative.asInterface

frameworks/base/core/java/android/os/ServiceManagerNative.java
// 33
static public IServiceManager asInterface(IBinder obj)
// 38 因为 obj为 BinderProxy,默认返回 null
IServiceManager in =
(IServiceManager)obj.queryLocalInterface(descriptor);
// 44
return new ServiceManagerProxy(obj);

 

 

getIServiceManager().addService(name, service, false);

总结如下:

- getIServiceManager --- new ServiceManagerProxy(new BinderProxy())
  - ServiceManagerNative.asInterface(BinderInternal.getContextObject())
    - BinderInternal.getContextObject  --- 返回 BinderProxy 对象
      - ProcessState::self()->getContextObject:创建一个BpBinder
      - javaObjectForIBinder -- BinderProxy 和 BpBinder 互相绑定
    - ServiceManagerNative.asInterface
      - 返回 ServiceManagerProxy

所以这里实际上是调用ServiceManagerProxy(new BinderProxy()).addService,去看ServiceManagerProxy中的addService方法

2-3.ServiceManagerProxy

frameworks/base/core/java/android/os/ServiceManagerNative.java$ServiceManagerPro
xy.java
// 109
class ServiceManagerProxy implements IServiceManager {
// mRemote为 BinderProxy对象
public ServiceManagerProxy(IBinder remote) {
mRemote = remote;
}

3.SMP.addService--1

frameworks/base/core/java/android/os/ServiceManagerNative.java$ServiceManagerPro
xy.java
// 142
public void addService(String name, IBinder service, boolean allowIsolated)
throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
// 148 此处 service == AMS
data.writeStrongBinder(service);

 

3-1.Parcel.writeStrongBinder

frameworks/base/core/java/android/os/Parcel.java
// 583
public final void writeStrongBinder(IBinder val) {
nativeWriteStrongBinder(mNativePtr, val);
}

3-2.android_os_Parcel_writeStrongBinder

frameworks/base/core/jni/android_os_Parcel.cpp
// 298
static void android_os_Parcel_writeStrongBinder(JNIEnv* env, jclass clazz, jlong
nativePtr, jobject object)
// 300 将java层 Parcel转换为 native层 Parcel
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
// 302 ---见后面小节(分别分析 ibinderForJavaObject和 writeStrongBinder)
const status_t err = parcel->writeStrongBinder(ibinderForJavaObject(env,
object));

 

3-2-1.ibinderForJavaObject

frameworks/base/core/jni/android_util_Binder.cpp
// 603
sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj)
// 607
if (env->IsInstanceOf(obj, gBinderOffsets.mClass)) { // 是否是 Java层的 Binder对
象,此处是 AMS,if命中
JavaBBinderHolder* jbh = (JavaBBinderHolder*)
env->GetLongField(obj, gBinderOffsets.mObject);
return jbh != NULL ? jbh->get(env, obj) : NULL; // 返回 JavaBBinder对象
}

 

3-2-1-1.JavaBBinderHolder.get

frameworks/base/core/jni/android_util_Binder.cpp$JavaBBinderHolder.cpp
// 317
sp<JavaBBinder> get(JNIEnv* env, jobject obj)
// 320
sp<JavaBBinder> b = mBinder.promote(); // 将弱指针升级为强指针,首次进来返回空指针
if (b == NULL) {
b = new JavaBBinder(env, obj); // 创建一个 JavaBBinder 对象并返回

记住:writeStrongBinder的参数是 JavaBBinder对象。

3-2-2.(Parcel.cpp)parcel->writeStrongBinder

frameworks/native/libs/binder/Parcel.cpp
// 872
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}

3-2-2-1.flatten_binder

frameworks/native/libs/binder/Parcel.cpp
// 205
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
const sp<IBinder>& binder, Parcel* out)
// 208
flat_binder_object obj;
// 212 当前进程有 Binder,所以本地 Binder不为空
IBinder *local = binder->localBinder();
// 224 Binder对象扁平化,转换成 flat_binder_object对象
obj.type = BINDER_TYPE_BINDER;
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
obj.cookie = reinterpret_cast<uintptr_t>(local);
// 234
return finish_flatten_binder(binder, obj, out);

3-2-2-2.finish_flatten_binder

frameworks/native/libs/binder/Parcel.cpp
// 199
inline static status_t finish_flatten_binder(
const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
{
// 将 flat_binder_object写入 out
return out->writeObject(flat, false);
}

 

3.SMP.addService--2

frameworks/base/core/java/android/os/ServiceManagerNative.java$ServiceManagerProxy.java
// 142
public void addService(String name, IBinder service, boolean allowIsolated)
throws RemoteException {
// 150 mRemote为 BinderProxy对象
mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);

3-1.BinderProxy.transact

frameworks/base/core/java/android/os/Binder.java$BinderProxy.java
// 501
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws
RemoteException {
// 503
return transactNative(code, data, reply, flags);

3-2.android_os_BinderProxy_transact

frameworks/base/core/jni/android_util_Binder.cpp
// 1083
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) // throws
RemoteException
// 1091 获取 data对象
Parcel* data = parcelForJavaObject(env, dataObj);
// 1095 获取 reply对象
Parcel* reply = parcelForJavaObject(env, replyObj);
// 1100 获取 BpBinder 对象
IBinder* target = (IBinder*)
env->GetLongField(obj, gBinderProxyOffsets.mObject);
// 1124
status_t err = target->transact(code, *data, reply, flags);

3-3.BpBinder::transact

frameworks/native/libs/binder/BpBinder.cpp
// 159
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
// 164
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);

3-4.IPCThreadState::transact

frameworks/native/libs/binder/IPCThreadState.cpp
// 548
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
// 552 数据错误检查
status_t err = data.errorCheck();
// 554
flags |= TF_ACCEPT_FDS;
TF_ACCEPT_FDS = 0x10:允许回复中包含文件描述符
TF_ONE_WAY:当前业务是异步的,不需要等待
TF_ROOT_OBJECT:所包含的内容是根对象
TF_STATUS_CODE:所包含的内容是 32-bit 的状态值
// 566 整理数据,并把结果存入 mOut 中。(在 talkWithDriver方法中才会将命令真正发送给
Binder驱动

err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
// 574
if ((flags & TF_ONE_WAY) == 0) { // 不是异步,if命中
if (reply) { // 不为空
err = waitForResponse(reply); // 等待回应事件
}
}

3-4-1.writeTransactionData

frameworks/native/libs/binder/IPCThreadState.cpp
// 904
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
// 934
mOut.writeInt32(cmd); // mOut写入命令为 BC_TRANSACTION
mOut.write(&tr, sizeof(tr)); // 写入 binder_transaction_data数据

 

3-5.IPCThreadState::waitForResponse--1

frameworks/native/libs/binder/IPCThreadState.cpp
// 712
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
// 717 循环等待结果
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;

 

3-5-1.talkWithDriver

frameworks/native/libs/binder/IPCThreadState.cpp
// 803
status_t IPCThreadState::talkWithDriver(bool doReceive)
// 812 读的 buffer是否为空。现在读为 null
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// 817
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0; // 读的时
候不能写 mOut
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data(); // 在 bwr中填写需要 write的大小和内容
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else { // needRead为 null,走 else
bwr.read_size = 0;
bwr.read_buffer = 0;3-5-2.binder_ioctl_write_read--1
3-5-2-1.binder_thread_write
3-5-2-2.binder_transaction
}
// 851
do { // while循环条件不会成立,只执行一次
/* 856 写入命令 BC_TRANSACTION */
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
} while (err == -EINTR);

3-5-2.binder_ioctl_write_read--1

kernel/drivers/staging/android/binder.c
// 3136
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
// 3161 通过这个函数写入用户的数据
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);

3-5-2-1.binder_thread_write

kernel/drivers/staging/android/binder.c
// 2250
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
// 2442
case BC_TRANSACTION:
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0); // 此处 cmd == BC_TRANSACTION,第四个参数为
false

3-5-2-2.binder_transaction

kernel/drivers/staging/android/binder.c
// 1829
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
// 1861 此处 reply为 false(cmd == BC_TRANSACTION)
if (reply) {
} else {
if (tr->target.handle) { // 不命中 if,handle不为 0,才会命中 if} else { // 此处走 else
/* 获取目标对象的 target_node,目标是 service_manager,所以可以直接使用全局变量
binder_context_mgr_node */
target_node = context->binder_context_mgr_node;
}
/* 1919 target_proc为 service_manager进程 */
target_proc = target_node->proc;
}
// 1954 找到 service_manager进程的 todo队列
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
// 1960 生成一个 binder_transaction 变量(即变量 t),用于描述本次要进行的
transaction(最后将其加入 target_thread->todo)。
// 这样当目标对象被唤醒时,它就可以从这个队列中取出需要做的工作。
t = kzalloc(sizeof(*t), GFP_KERNEL);
// 1967 生成一个binder_work变量(即变量 tcomplete),用于说明当前调用者线程有一宗未完成的
transaction(它最后会被添加到本线程的 todo队列中)
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
// 1996 给 transaction结构体赋值,即变量 t
if (!reply && !(tr->flags & TF_ONE_WAY)) // 非 oneway的通信方式,把当前 thread保存到
transaction的 from字段
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc; // 此次通信目标进程为 service_manager进程
t->to_thread = target_thread;
t->code = tr->code; // 此次通信 code = ADD_SERVICE_TRANSACTION
t->flags = tr->flags; // 此次通信 flags = 0
t->priority = task_nice(current);
// 2009 从 service_manager进程中分配 buffer(为完成本条 transaction申请内存,从
binder_mmap开辟的空间中申请内存)
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
// 2028 分别拷贝用户空间的 binder_transaction_data中 ptr.buffer和 ptr.offsets到内核
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
}
// 2059
for (; offp < off_end; offp++) {
// 2075
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct flat_binder_object *fp;3-5-2-2-1.binder_translate_binder
3-5-2.binder_ioctl_write_read--2--service_manager已被唤醒
fp = to_flat_binder_object(hdr);
/* 创建 binder_ref,service_manager的 binder引用对象---见后面小节 */
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
goto err_translate_failed;
}
} break;
}
// 2187
} else if (!(t->flags & TF_ONE_WAY)) {
// 2191
thread->transaction_stack = t; // 记录本次 transaction,以备后期查询
(service_manager通过这个知道是谁调用的,从而返回数据)
}
// 2201
t->work.type = BINDER_WORK_TRANSACTION; // 设置 t的类型为 BINDER_WORK_TRANSACTION
list_add_tail(&t->work.entry, target_list); // 将 t加入目标的处理队列中
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; // 设置 binder_work的类型为
BINDER_WORK_TRANSACTION_COMPLETE
list_add_tail(&tcomplete->entry, &thread->todo); // 当前线程有一个未完成的操作
if (target_wait)
wake_up_interruptible(target_wait);// 唤醒目标,即 service_manager

3-5-2-2-1.binder_translate_binder

kernel/drivers/staging/android/binder.c
// 1564
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
// 1592 创建一个 binder_ref
ref = binder_get_ref_for_node(target_proc, node);
// 1596 改变类型为 BINDER_TYPE_HANDLE
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;

 

Binder_Loop一直会等待数据,现在有了,会进去read

t->work.type = BINDER_WORK_TRANSACTION; -- 给sm -- 做事
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; -- 给client--挂起

3-5-2.binder_ioctl_write_read--2--此时service_manager已被wake_up_interruptible唤醒

     先看 client挂起流程

frameworks/native/cmds/servicemanager/Binder.c:

res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

进入ioctl

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)

// 3136
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
// 3174
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);

3-5-2-3.binder_thread_read

kernel/drivers/staging/android/binder.c
// 2652
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
// 2664 如果 consumed==0,则写入一个 BR_NOOP
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
// 2739 前面把一个 binder_work添加到 thread->todo队列中,所以 w不为空,类型为
BINDER_WORK_TRANSACTION_COMPLETE
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
// 2760 写入命令 BR_TRANSACTION_COMPLETE
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))

经过read之后实际上写入了这两个命令

- BR_NOOP ,BR_TRANSACTION_COMPLETE

到这takewitedriver 跑完了,出来继续循环

 

frameworks/native/libs/binder/IPCThreadState.cpp
// 712
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)

 

// 718 while循环,继续执行 talkWithDriver方法

while(1){
if ((err=talkWithDriver()) < NO_ERROR) break;

// 786 处理 BR_NOOP命令,什么也没干

// 731 处理 BR_TRANSACTION_COMPLETE命令
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish; // 当前为同步,不会进入 if,继续while循环

break;
// 718 再次执行 talkWithDriver方法,这个时候 bwr.write_size==0,bwr.read_size还是大于
0,所以直接执行驱动中 binder_thread_read        todo不为空
if ((err=talkWithDriver()) < NO_ERROR) break;

3-6.binder_thread_read--Client线程进入等待

kernel/drivers/staging/android/binder.c
// 2652
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
// 2664 放入 BR_NOOP命令
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
// 2671 此时 wait_for_proc_work为false
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
// 2717
if (non_block) { // 是阻塞模式的,所以 if不会命中
} else // 进入等待,直到 service_manager 来唤醒
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));

 

到这挂起Client也结束了,接下来看唤醒之后sm做了什么,之前章节我们知道,sm去读todo数据,没有就会等待,这时候唤醒了就要处理BINDER_WORK_TRANSACTION

前面讲的wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 进入等待

3-7.binder_thread_read--service_manager开始处理消息

kernel/drivers/staging/android/binder.c
// 2652
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
// 2757 主要是把用户的请求复制到 service_manager中并对各种队列进行调整
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
// 2898 设置命令为 BR_TRANSACTION
cmd = BR_TRANSACTION;

这时候SM就要处理这个命令 进入SM的binder_loop

3-8.binder_loop

frameworks/native/cmds/servicemanager/binder.c
// 372
void binder_loop(struct binder_state *bs, binder_handler func)
// 397 对 getService请求进行解析
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);

3-9.binder_parse

frameworks/native/cmds/servicemanager/binder.c
// 204
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
// 230
case BR_TRANSACTION: {
// 237
if (func) {
// 243 为 reply初始化
bio_init(&reply, rdata, sizeof(rdata), 4);
// 245
res = func(bs, txn, &msg, &reply); // 由 svcmgr_handler 处理请求
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res); // 将 reply发给
binder驱动
}

3-9-1.service_manager

frameworks/native/cmds/servicemanager/service_manager.c
// 251
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
// 309
case SVC_MGR_ADD_SERVICE:
// 316 注册指定服务
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))

3-9-1-1.do_add_service

frameworks/native/cmds/servicemanager/service_manager.c
// 201
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,

uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
// 214
if (!svc_can_register(s, len, spid, uid)) {
// 220
si = find_svc(s, len);
if (si) {
if (si->handle) {
svcinfo_death(bs, si); // 服务已注册时,释放相应的服务
}
si->handle = handle; // 重新放入新的
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) { // 内存不足,无法分配足够内存
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));// 内存拷贝服务信息
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist; // svclist保存所有已注册的服务
svclist = si;
}
/* 以 BC_ACQUIRE命令,handle为目标的信息,通过 ioctl发送给 binder驱动,binder_ref强引用
加 1操作 */
binder_acquire(bs, handle);
/* 以 BC_REQUEST_DEATH_NOTIFICATION命令的信息,通过 ioctl发送给 binder驱动,主要用于清
理内存等收尾工作 */
binder_link_to_death(bs, handle, &si->death);

 

3-9-2.binder_send_reply

frameworks/native/cmds/servicemanager/binder.c
// 170
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
// 182
data.cmd_free = BC_FREE_BUFFER; // free buffer命令
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY; // reply命令
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) { // status == 0
} else {
data.txn.flags = 0;

data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data)); // 向 Binder驱动通信

3-9-2-1. binder_thread_write

kernel/drivers/staging/android/binder.c
// 2250
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);

 

总结一下:

getIServiceManager().addService(name, service, false);

- getIServiceManager --- new ServiceManagerProxy(new BinderProxy())
  - ServiceManagerNative.asInterface(BinderInternal.getContextObject())
    - BinderInternal.getContextObject  --- 返回 BinderProxy 对象
      - ProcessState::self()->getContextObject:创建一个BpBinder
      - javaObjectForIBinder -- BinderProxy 和 BpBinder 互相绑定
    - ServiceManagerNative.asInterface
      - 返回 ServiceManagerProxy
- addService
  - data.writeStrongBinder(service); -- service == AMS --- 将AMS 放入 data中
  - mRemote.transact --- mRemote == BinderProxy
    - 获取BpBinder  --- IPCThreadState::transact
      - 1.writeTransactionData --- out 写入命令 --write  --- cmd == BC_TRANSACTION
      - 2.waitForResponse
        - talkWithDriver -- 非常重要 --- 代码非常长
          - binder_transaction
            - handle == 0 --》 sm
            - 1. target_node
              2. proc
              3. todo,wait
              4. 创建t,tcomplete,
              5. 数据拷贝
              6. binder_transaction_binder --> handle
              7. thread->transaction_stack = t; ---> 方便sm找到client
              8. t->work.type = BINDER_WORK_TRANSACTION; -- 给sm -- 做事
              9. tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; -- 给client--挂起
              10. wake_up_interruptible 唤醒sm
      - client挂起
        - BR_NOOP ,BR_TRANSACTION_COMPLETE
        - wait_event_freezable --- 挂起
      - sm处理添加服务
        - BINDER_WORK_TRANSACTION --- 要处理 cmd == BR_TRANSACTION
        - 1. reply初始化
          2. res = func(bs, txn, &msg, &reply); --- 函数指针 --- svcmgr_handler作用:获取或者添加 service
             1. sm是用 svclist  保存所有服务的
          3. binder_send_reply --- bc_reply
          4. t->work.type = BINDER_WORK_TRANSACTION; --- 给Client
                 list_add_tail(&t->work.entry, target_list);
                 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; -- 给SM --- 被挂起
                 list_add_tail(&tcomplete->entry, &thread->todo);
          5. wake_up_interruptible(target_wait); -- 唤醒 Client
      - client 被唤醒
        - BINDER_WORK_TRANSACTION --- cmd = BR_REPLY;

SM 处理 onTransact

- IPCThreadState::executeCommand

  -  error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
                                &reply, tr.flags);

  - JavaBBinder.onTransact --- C++

  - jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,
                code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags); -- Binder.java.execTransact 方法

 

获取服务跟添加服务流程类似,这里不作过多分析.
 

 

 

Android BinderAndroid操作系统中的一个IPC(进程间通信)机制,用于实现进程之间的通信和数据传输。Binder的源码主要位于frameworks/native目录下。 在Binder的源码中,最核心的部分是Binder驱动和Binder服务。Binder驱动是位于内核空间的组件,负责处理进程间的数据传输和交互。Binder服务是位于用户空间的组件,负责提供接口和功能来进行进程间通信。 在Binder的源码中,主要涉及到以下几个重要的文件和目录: 1. drivers目录:包含了Binder驱动的代码,其中最重要的文件是binder.c,它实现了Binder驱动的核心逻辑。 2. include目录:包含了Binder的头文件,其中最重要的文件是binder.h,它定义了Binder的接口和数据结构。 3. libbinder目录:包含了Binder服务的代码,其中最重要的文件是IBinder.cpp和BpBinder.cpp,它们分别实现了Binder服务的接口和代理类。 4. services目录:包含了一些系统级别的Binder服务,例如Package Manager Service和Activity Manager Service。 如果你想深入了解Android Binder的源码,可以参考以下资源: 1. Android 源码:你可以从Android官网或者GitHub上获取Android源码,并在frameworks/native目录下查看Binder相关的代码。 2. Android系统架构指南:Android官网提供了关于Android系统架构的详细文档,其中有关IPC和Binder的章节对于理解Binder的实现原理和源码结构很有帮助。 3. 《深入理解Android:卷2》一书中有关于Binder的详细介绍和源码解析,可以作为参考资料。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值