分析Android P的servicemanager的IBinder上下文管理(一)

frameworks/native/cmds/servicemanager
├── Android.bp
├── bctest.c
├── binder.c
├── binder.h
├── service_manager.c
├── servicemanager.rc
└── vndservicemanager.rc

通过init进程解析servicemanager.rc启动servicemanager进程,

service servicemanager /system/bin/servicemanager
    class core animation
    user system
    group system readproc
    critical
    onrestart restart healthd
    onrestart restart zygote
    onrestart restart audioserver
    onrestart restart media
    onrestart restart surfaceflinger
    onrestart restart inputflinger
    onrestart restart drm
    onrestart restart cameraserver
    onrestart restart keystore
    onrestart restart gatekeeperd
    writepid /dev/cpuset/system-background/tasks
    shutdown critical

 

再来看service_manager.c文件中入口函数:main

int main(int argc, char** argv)
{
    struct binder_state *bs;
    union selinux_callback cb;
    char *driver;

    if (argc > 1) {
        driver = argv[1];
    } else {
        driver = "/dev/binder";//binder的字符设备节点路径
    }

    bs = binder_open(driver, 128*1024);//打开/dev/binder节点并映射128KB的内存到这个节点上
    if (!bs) {
#ifdef VENDORSERVICEMANAGER
        ALOGW("failed to open binder driver %s\n", driver);
        while (true) {
            sleep(UINT_MAX);//打开失败,进入loop循环
        }
#else
        ALOGE("failed to open binder driver %s\n", driver);
#endif
        return -1;
    }

    if (binder_become_context_manager(bs)) {//成为binder的上下文管理者
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    cb.func_audit = audit_callback;//设置selinux的回调接口
    selinux_set_callback(SELINUX_CB_AUDIT, cb);
    cb.func_log = selinux_log_callback;
    selinux_set_callback(SELINUX_CB_LOG, cb);

#ifdef VENDORSERVICEMANAGER
    sehandle = selinux_android_vendor_service_context_handle();
#else
    sehandle = selinux_android_service_context_handle();
#endif
    selinux_status_open(true);

    if (sehandle == NULL) {
        ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
        abort();
    }

    if (getcon(&service_manager_context) != 0) {//selinux获取service_manager上下文对象失败,返回
        ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
        abort();
    }


    binder_loop(bs, svcmgr_handler);//设置回调函数:svcmgr_handler,然后进入binder的loop循环

    return 0;
}

系统启动,没有传入参数,所以binder的驱动节点为:"/dev/binder";
首先定义一个指针:bs 和 一个selinux的回调接口:cb;
struct binder_state *bs; 表示binder_state的状态;

在frameworks/native/cmds/servicemanager/binder.c文件中定义:
fd:是文件描述符,即表示打开的/dev/binder设备文件描述符;
mapped:是把设备文件/dev/binder映射到进程空间的起始地址;
mapsize:是上述映射空间的大小。
有什么用?????
保存"/dev/binder"节点的句柄;
保存映射空间内存的起始地址;
保存映射空间内存的大小;

struct binder_state
{
    int fd;//binder的驱动节点文件句柄
    void *mapped;//映射空间的起始地址
    size_t mapsize;//映射内存空间的大小
};

通过函数 binder_open 打开字符设备文件节点"/dev/binder",并且将该文件通过mmap映射到本进程的地址空间。地址空间为128KB:

bs = binder_open(driver, 128*1024);

通过函数 binder_become_context_manager 告诉Binder驱动程序,我要成为Binder的上下文管理者

if (binder_become_context_manager(bs)) {
    ALOGE("cannot become context manager (%s)\n", strerror(errno));
    return -1;
}

通过函数 selinux_set_callback 设置selinux的回调接口:

cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);

通过函数 selinux_android_service_context_handle 获取sehandler指针,也就是得到selinux的操作对象

#ifdef VENDORSERVICEMANAGER
    sehandle = selinux_android_vendor_service_context_handle();
#else
    sehandle = selinux_android_service_context_handle();
#endif
    selinux_status_open(true);

    if (sehandle == NULL) {
        ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
        abort();
    }
    if (getcon(&service_manager_context) != 0) {
        ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
        abort();
    }

通过函数 binder_loop 进入binder的等待流程,以上就是service_manager.c启动的基本流程;
主要做了五件事情:
1、获取"/dev/binder"字符设备节点;
2、打开"/dev/binder"字符设备节点并为其映射内存空间;
3、成为binder上下文的管理者;
4、设置selinux的回调接口;
5、进入binder等待流程,用于接收其他binder服务传递过来的数据

 

下面讲细节----------------------------:

查看 "/dev/binder" 的文件权限:
crw-rw-rw- 1 root root u:object_r:binder_device:s0 10,  36 2019-08-12 02:55 /dev/binder
linux中c表示字符设备文件,b表示块设备文件,l表示符号链接文件,r表示可读权限,w表示可写权限。
linux文件属性解读:
文件类型:
-:普通文件 (f)
d:目录文件
b:块设备文件 (block)
c:字符设备文件 (character)
l:符号链接文件(symbolic link file)
p:命令管道文件(pipe)
s:套接字文件(socket)
文件权限: 9位,每3位一组,每一组:rwx(读,写,执行),当改组不具有某一权限用-代替。
第一组为: 文件拥有者的权限, 该文件的拥有者可以读写,但不可执行;
第二组为: 同群组的权限
第三组为: 其他非本群组的权限

第一、打开 "/dev/binder" 节点,并返回一个结构体 binder_state 的指针,操作 /dev/binder 字符设备文件节点的句柄:

bs = binder_open(driver, 128*1024);
struct binder_state *binder_open(const char* driver, size_t mapsize)
{
    struct binder_state *bs;//声明binder的指针
    //声明binder的版本对象,在kernel4.4/include/uapi/linux/android/binder.h文件中声明;
    //作用是对比用户空间和内核空间的binder版本是否一致???,需要一致才能进行binder通信
    struct binder_version vers;

    bs = malloc(sizeof(*bs));//为bs结构体的指针分配内存
    if (!bs) {//内存分配失败,直接返回
        errno = ENOMEM;
        return NULL;
    }

    bs->fd = open(driver, O_RDWR | O_CLOEXEC);//通过open函数打开/dev/binder的节点,返回节点文件的句柄
    if (bs->fd < 0) {//打开成功的状态为>=0;<0为打开文件节点失败,跳转到 fail_open 代码段
        fprintf(stderr,"binder: cannot open %s (%s)\n",
                driver, strerror(errno));
        goto fail_open;
    }
		//比对用户空间的 binder 版本和内核 binder 驱动的版本是否一致,如果不一致,进入打开失败的流程
		//#define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
		//带读写参数的ioctl命令;
    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
        fprintf(stderr,
                "binder: kernel driver version (%d) differs from user space version (%d)\n",
                vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
        goto fail_open;
    }

    bs->mapsize = mapsize;//设置/dev/binder的内存空间大小
    //开始分配mapsize大小的内存,并返回内存空间地址的起始指针
    //第一个参数NULL:指向欲映射的内存起始地址,通常设为 NULL,代表让系统自动选定地址,映射成功后返回该地址。
    //第二个参数mapsize:代表将文件中多大的部分映射到内存,映射到调用进程地址空间的字节数,它从被映射文件开头offset个字节开始算起。
    //第三个参数PROT_READ:指定共享内存的访问权限,可取如下几个值的或:PROT_READ(可读) , PROT_WRITE (可写), PROT_EXEC (可执行), PROT_NONE(不可访问)。
    //第四个参数MAP_PRIVATE:这个flags由以下几个常值指定:MAP_SHARED , MAP_PRIVATE , MAP_FIXED,其中,MAP_SHARED , MAP_PRIVATE必选其一,而MAP_FIXED则不推荐使用。
    //第五个参数bs->fd:为即将映射到进程空间的文件描述字,一般由open()返回;
    //第六个参数0:表示从文件头开始映射。
    //返回值是void *类型,分配成功后,被映射成虚拟内存地址。
    //说明mmap函数的链接:https://www.csdn.net/gather_2f/MtTakg3sNDU2MC1ibG9n.html
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
    if (bs->mapped == MAP_FAILED) {//分配失败,不能将内存映射到设备上
        fprintf(stderr,"binder: cannot map device (%s)\n",
                strerror(errno));
        goto fail_map;
    }
		//返回binder的指针
    return bs;

fail_map://映射内存失败,关闭/dev/binder的句柄
    close(bs->fd);
fail_open://打开/dev/binder失败,释放bs指针的内存;
    free(bs);
    return NULL;
}

通过 binder_open 的调用,参数mapsize的大小为128*1024,即128KB。在使用函数open打开设备文件/dev/binder的时候,
Binder驱动程序中的函数binder_open就会被调用,它会为当前进程创建一个 binder_proc 结构体,描述当前进程的Binder进程间通信状态。
调用函数mmap将设备文件/dev/binder映射到进程的地址空间,它请求映射的地址空间大小为mapsize,
即请求Binder驱动程序为进程分配128K大小的内核缓冲区。映射后得到的地址空间的起始地址和大小分别保存在一个binder_state结构体bs的成员变量mapped和mapsize中。
最后,将binder_state结构体bs返回给调用者,即函数main。

ioctl命令说明(下面会用到):
_IO    定义不带参数的ioctl命令
_IOW    定义带写参数的ioctl命令(copy_from_user)
_IOR    定义带读参数的ioctl命令(copy_to_user)
_IOWR    定义带读写参数的ioctl命令


第二、调用 binder_become_context_manager 成为binder驱动的上下文管理者,代码很简单,但是在内核中的代码就比较复杂;

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

通过ioctrl命令函数设置binder上下文的管理者;

IO控制命令 BINDER_SET_CONTEXT_MGR 的定义:
在kernel4.4/include/uapi/linux/android/binder.h中定义:

#define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
#define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)

它只有一个整型参数,用来描述一个与serviceManager对应的Binder本地对象的地址值。
由于与service Manager对应的Binder本地对象是一个虚拟的对象,并且它的地址值等于0,
因此,函数 binder_become_context_manager 就将IO控制命令BINDER_SET_CONTEXT_MGR的参数设置为0


而接收 BINDER_SET_CONTEXT_MGR 命令的处理函数在如下文件中定义:
kernel4.4/drivers/android/binder.c

钩子函数:ioctl是Linux中常见的系统调用,它用于对底层设备的一些特性进行控制的用户态接口,
应用程序在调用ioctl进行设备控制时,最后会调用到设备注册 struct file_operations 结构体对象时的
unlocked_ioctl或者compat_ioctl两个钩子上,具体是调用哪个钩子判断标准如下:
compat_ioctl : 32位的应用运行在64位的内核上,这个钩子被调用。

unlocked_ioctl: 64位的应用运行在64位的内核或者32位的应用运行在32位的内核上,则调用这个钩子。
Binder做为Android中进程间高效通信的核心组件,其底层是以misc设备驱动的形式实现的,但它本身并没有实现read,write操作,所有的控制都是通过ioctl操作来实现。
在Binder驱动的struct file_operations定义中可见,它的compat_ioctl和unlocked_ioctl两个钩子的的实现都是对应到binder_ioctl上的。

static const struct file_operations binder_fops = {
	.owner = THIS_MODULE,
	.poll = binder_poll,
	.unlocked_ioctl = binder_ioctl,
	.compat_ioctl = binder_ioctl,
	.mmap = binder_mmap,
	.open = binder_open,
	.flush = binder_flush,
	.release = binder_release,
};

//下面需要用到的结构体:binder_context上下文结构体
struct binder_context {
	struct binder_node *binder_context_mgr_node;
	struct mutex context_mgr_node_lock;//上下文管理者节点锁

	kuid_t binder_context_mgr_uid;//当前上下文管理的uid
	const char *name;//上下文的名称
};
/**
 * struct binder_node - binder node bookkeeping
 * @debug_id:             unique ID for debugging
 *                        (invariant after initialized)
 * @lock:                 lock for node fields
 * @work:                 worklist element for node work
 *                        (protected by @proc->inner_lock)
 * @rb_node:              element for proc->nodes tree
 *                        (protected by @proc->inner_lock)
 * @dead_node:            element for binder_dead_nodes list
 *                        (protected by binder_dead_nodes_lock)
 * @proc:                 binder_proc that owns this node
 *                        (invariant after initialized)
 * @refs:                 list of references on this node
 *                        (protected by @lock)
 * @internal_strong_refs: used to take strong references when
 *                        initiating a transaction
 *                        (protected by @proc->inner_lock if @proc
 *                        and by @lock)
 * @local_weak_refs:      weak user refs from local process
 *                        (protected by @proc->inner_lock if @proc
 *                        and by @lock)
 * @local_strong_refs:    strong user refs from local process
 *                        (protected by @proc->inner_lock if @proc
 *                        and by @lock)
 * @tmp_refs:             temporary kernel refs
 *                        (protected by @proc->inner_lock while @proc
 *                        is valid, and by binder_dead_nodes_lock
 *                        if @proc is NULL. During inc/dec and node release
 *                        it is also protected by @lock to provide safety
 *                        as the node dies and @proc becomes NULL)
 * @ptr:                  userspace pointer for node
 *                        (invariant, no lock needed)
 * @cookie:               userspace cookie for node
 *                        (invariant, no lock needed)
 * @has_strong_ref:       userspace notified of strong ref
 *                        (protected by @proc->inner_lock if @proc
 *                        and by @lock)
 * @pending_strong_ref:   userspace has acked notification of strong ref
 *                        (protected by @proc->inner_lock if @proc
 *                        and by @lock)
 * @has_weak_ref:         userspace notified of weak ref
 *                        (protected by @proc->inner_lock if @proc
 *                        and by @lock)
 * @pending_weak_ref:     userspace has acked notification of weak ref
 *                        (protected by @proc->inner_lock if @proc
 *                        and by @lock)
 * @has_async_transaction: async transaction to node in progress
 *                        (protected by @lock)
 * @sched_policy:         minimum scheduling policy for node
 *                        (invariant after initialized)
 * @accept_fds:           file descriptor operations supported for node
 *                        (invariant after initialized)
 * @min_priority:         minimum scheduling priority
 *                        (invariant after initialized)
 * @inherit_rt:           inherit RT scheduling policy from caller
 *                        (invariant after initialized)
 * @async_todo:           list of async work items
 *                        (protected by @proc->inner_lock)
 *
 * Bookkeeping structure for binder nodes.
 */
struct binder_node {
	int debug_id;
	spinlock_t lock;
	struct binder_work work;
	union {
		struct rb_node rb_node;
		struct hlist_node dead_node;
	};
	struct binder_proc *proc;
	struct hlist_head refs;
	int internal_strong_refs;
	int local_weak_refs;
	int local_strong_refs;
	int tmp_refs;
	binder_uintptr_t ptr;
	binder_uintptr_t cookie;
	struct {
		/*
		 * bitfield elements protected by
		 * proc inner_lock
		 */
		u8 has_strong_ref:1;
		u8 pending_strong_ref:1;
		u8 has_weak_ref:1;
		u8 pending_weak_ref:1;
	};
	struct {
		/*
		 * invariant after initialization
		 */
		u8 sched_policy:2;
		u8 inherit_rt:1;
		u8 accept_fds:1;
		u8 min_priority;
	};
	bool has_async_transaction;
	struct list_head async_todo;
};

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	......
	case BINDER_SET_CONTEXT_MGR:
		ret = binder_ioctl_set_ctx_mgr(filp);
		if (ret)
			goto err;
		break;
	......
}

static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
	int ret = 0;
	//先获得Binder驱动程序为service_manager进程创建的binder_prop结构体,并且保存在变量指针proc中
	struct binder_proc *proc = filp->private_data;
	//从proc中获取binder的上下文变量指针context;
	struct binder_context *context = proc->context;
	struct binder_node *new_node;//新建binder_node节点。有什么用呢?在创建节点的时候用到了
	kuid_t curr_euid = current_euid();//获取当前的service_manager的euid

	mutex_lock(&context->context_mgr_node_lock);//开始加锁
	//变量binder_context_mgr_node用来描述与Binder进程间通信机制的上下文管理者相对应的一个Binder实体对象,如果它不等于NULL,
	//那么说明前面已经有组件将自己注册为Binder进程间通信机制的上下文管理者了。由于Binder驱动程序不允许重复注册Binder进程间通信
	//机制的上下文管理者,因此,在这种情况下,它就直接出错返回了。
	if (context->binder_context_mgr_node) {
		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
		ret = -EBUSY;
		goto out;
	}
	//检查当前进程是否具有注册Context Manager的SEAndroid安全权限,该函数在:kernel4.4/security/security.c中定义
	ret = security_binder_set_context_mgr(proc->tsk);
	if (ret < 0)
		goto out;
	//变量binder_context_mgr_uid用了描述注册了Binder进程间通信机制的上下文管理者的进程的有效用户ID,
	//如果它的值不等于-1,就说明前面已经有一个进程注册了Binder进程间通信机制的上下文管理者了。
	//在这种情况下,Binder驱动程序就需要检查当前进程的有效用户ID是否等于变量binder_context_mgr_uid的值。
	//如果不等于,那么它就直接出错返回了。Binder驱动程序允许同一个进程重复使用IO控制命令BINDER_SET_CONTEXT_MGR来注册BInder进程间通信机制的上下文管理者,
	//因为该进程前一次使用IO控制命令BINDER_SET_CONTEXT_MGR时,可能没有成功地将一个组件注册为Binder进程间通信机制的上下文管理者。这种情况是很有可能的,
	if (uid_valid(context->binder_context_mgr_uid)) {//检测uid是否有效
		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {//检测ui是否一致
			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
			       from_kuid(&init_user_ns, curr_euid),
			       from_kuid(&init_user_ns,
					 context->binder_context_mgr_uid));
			ret = -EPERM;
			goto out;
		}
	} else {
		context->binder_context_mgr_uid = curr_euid;//设置euid
	}
	//调用函数binder_new_node创建一个new_node对象
	new_node = binder_new_node(proc, NULL);
	if (!new_node) {
		ret = -ENOMEM;
		goto out;
	}
	binder_node_lock(new_node);//开始锁住新的节点
	new_node->local_weak_refs++;//引用加1
	new_node->local_strong_refs++;
	new_node->has_strong_ref = 1;
	new_node->has_weak_ref = 1;
	context->binder_context_mgr_node = new_node;//将新的new_node赋值给binder_context_mgr_node
	binder_node_unlock(new_node);//解锁新的节点;
	binder_put_node(new_node);//将new_node移除和释放
out:
	mutex_unlock(&context->context_mgr_node_lock);//解锁context->context_mgr_node_lock
	return ret;
}

第三、调用binder_loop(bs, svcmgr_handler);进入循环等待Client进程请求的流程:
由于ServiceManager需要在系统运行期间为Service组件和Client组建提供服务,因此,它就需要通过一个无限循环来等待和处理Service组件和Client组件的进程间通信请求,
这是通过binder_loop函数来实现的。binder_loop函数定义在"frameworks/base/cmds/servicemanager/binder.c"文件中:
 

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;//声明一个binder的读写结构体对象,在kernel4.4/include/uapi/linux/android/binder.h中定义:
    uint32_t readbuf[32];//读的缓存为32

    bwr.write_size = 0;//初始化写数据的大小为0
    bwr.write_consumed = 0;//初始化写数据的消费的大小为0
    bwr.write_buffer = 0;//初始化写缓存的大小为0
		
		//进入looper状态,定义:
		//BC_ENTER_LOOPER = _IO('c', 12),
		//BC_EXIT_LOOPER = _IO('c', 13),
    readbuf[0] = BC_ENTER_LOOPER;
    //定义文件:kernel4.4/include/uapi/linux/android/binder.h
		//向binder驱动写数据,将 BC_ENTER_LOOPER 控制命令通过 BINDER_WRITE_READ 命令写到binder驱动中,告诉binder驱动进入looper状态
    binder_write(bs, readbuf, sizeof(uint32_t));

		//进入for循环等待并读取binder消息
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

				//binder_ioctl函数->binder_ioctl_write_read函数
				//路径:kernel4.4/drivers/android/binder.c
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//读写设备节点

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
				//解析binder,调用 svcmgr_handler 的回调函数
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

kernel4.4/include/uapi/linux/android/binder.h

/*
 * On 64-bit platforms where user code may run in 32-bits the driver must
 * translate the buffer (and local binder) addresses appropriately.
 */

struct binder_write_read {
	binder_size_t		write_size;	/* bytes to write */
	binder_size_t		write_consumed;	/* bytes consumed by driver */
	binder_uintptr_t	write_buffer;
	binder_size_t		read_size;	/* bytes to read */
	binder_size_t		read_consumed;	/* bytes consumed by driver */
	binder_uintptr_t	read_buffer;
};

由上函数可知,
第一个参数:bs指向前面在函数binder_open中创建的一个binder_state结构体; 
第二个参数:func就是下面说的svcmgr_handler函数,它是用来处理Service组件和Client组件的进程间通信请求的。
由于ServiceManager进程的主线程是主动成为一个Binder线程的,因此,它就使用BC_ENTER_LOOPER协议将自己注册到Binder驱动程序中。
    代码中,首先将BC_ENTER_LOOPER协议代码写入到缓冲区readbuf中,接着调用函数binder_write将它发送到Binder驱动程序中。函数binder_write的实现如下所示:

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;

    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}

由于BC_ENTER_LOOPER协议是通过IO控制命令BINDER_WRITE_READ发送到Binder驱动程序中的,而IO控制命令BINDER_WRITE_READ后面跟的参数是一个binder_write_read结构体。
因此,binder_write函数首先定义了一个binder_wirte_read结构体bwr,接着将data所指向的一块缓冲区作为它的输入缓冲区。
接下来,将binder_write_read结构体bwr的输出缓冲区设置为空,这样,当前线程将自己注册到Binder驱动程序中之后,就会马上返回到用户空间,
而不会在Binder驱动程序中等待Client进程的通信请求。
由于参数data所指向的一块缓冲区的内容已经被设置为BC_ENTER_LOOPER协议代码,因此,接下来就可以调用函数ioctl将当前线程注册到Binder驱动程序中了。
IO控制命令BINDER_WRITE_READ是由Binder驱动程序中的函数binder_ioctl负责处理的,代码如下所示:
    

 代码首先还是用函数binder_get_thread来获取与当前线程对应的一个binder_thread结构体,并且保存在变量thread中。
 当前线程即为ServiceManager进程的主线程,前面它将ServiceManager注册为BInder进程间通信机制的上下文管理者时,
 Binder驱动程序已经为它创建过一个binder_thread结构体了,因此,调用函数binder_get_thread时,就可以直接获得该binder_thread结构体。
 copy_from_user是从用户空间传过来的一个binder_write_read结构体拷贝出来,并且保存在变量bwr中。


主要的操作就只有上面几行代码,其余的都已省略。
binder_write函数会调用 ioctl 发送 BC_ENTER_LOOPER 命令告诉 binder 驱动当前线程已进入消息循环状态。
接下来的死循环,从 binder 驱动读取消息到 &bwr,如果没有消息就会阻塞直到被唤醒,读取到消息后再调用binder_parse解析 &bwr 中的消息内容。
binder_parse会调用 func 函数处理请求。func 参数传入的值是一个指向svcmgr_handler函数的函数指针,所以具体的如添加查找函数的请求处理主要都在 svcmgr_handler函数中了。
处理完请求回到binder_parse 后调用binder_send_reply向驱动返回处理的结果(根据请求类型如果不需要回复就没有这一步)

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        case BR_NOOP:
            break;
        case BR_TRANSACTION_COMPLETE:
            break;
        case BR_INCREFS:
        case BR_ACQUIRE:
        case BR_RELEASE:
        case BR_DECREFS:
#if TRACE
            fprintf(stderr,"  %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
#endif
            ptr += sizeof(struct binder_ptr_cookie);
            break;
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);
                if (txn->flags & TF_ONE_WAY) {
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else {
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ptr += sizeof(*txn);
            break;
        }
        case BR_REPLY: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: reply too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (bio) {
                bio_init_from_txn(bio, txn);
                bio = 0;
            } else {
                /* todo FREE BUFFER */
            }
            ptr += sizeof(*txn);
            r = 0;
            break;
        }
        case BR_DEAD_BINDER: {
            struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
            ptr += sizeof(binder_uintptr_t);
            death->func(bs, death->ptr);
            break;
        }
        case BR_FAILED_REPLY:
            r = -1;
            break;
        case BR_DEAD_REPLY:
            r = -1;
            break;
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}

往binder_loop函数中传递的那个函数指针是svcmar_handler,它的代码如下所示:

int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;
    uint32_t dumpsys_priority;

    //ALOGI("target=%p code=%d pid=%d uid=%d\n",
    //      (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);

    if (txn->target.ptr != BINDER_SERVICE_MANAGER)
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    // Equivalent to Parcel::enforceInterface(), reading the RPC
    // header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it
    // further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);
    s = bio_get_string16(msg, &len);
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }

    if (sehandle && selinux_status_updated() > 0) {
#ifdef VENDORSERVICEMANAGER
        struct selabel_handle *tmp_sehandle = selinux_android_vendor_service_context_handle();
#else
        struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
#endif
        if (tmp_sehandle) {
            selabel_close(sehandle);
            sehandle = tmp_sehandle;
        }
    }

    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
        bio_put_ref(reply, handle);
        return 0;

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        dumpsys_priority = bio_get_uint32(msg);
        if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,
                           txn->sender_pid))
            return -1;
        break;

    case SVC_MGR_LIST_SERVICES: {
        uint32_t n = bio_get_uint32(msg);
        uint32_t req_dumpsys_priority = bio_get_uint32(msg);

        if (!svc_can_list(txn->sender_pid, txn->sender_euid)) {
            ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
                    txn->sender_euid);
            return -1;
        }
        si = svclist;
        // walk through the list of services n times skipping services that
        // do not support the requested priority
        while (si) {
            if (si->dumpsys_priority & req_dumpsys_priority) {
                if (n == 0) break;
                n--;
            }
            si = si->next;
        }
        if (si) {
            bio_put_string16(reply, si->name);
            return 0;
        }
        return -1;
    }
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

查找binder的服务

struct svcinfo
{
    struct svcinfo *next; //指针,指向下一个svcinfo的结构体指针
    uint32_t handle; //句柄
    struct binder_death death; //结构体的binder_death对象
    int allow_isolated;//是否允许隔离
    uint32_t dumpsys_priority;//dumpsys 的优先级
    size_t len;//服务的字符个数
    uint16_t name[0];//服务名称,是char *的指针类型
};

struct svcinfo *svclist = NULL;

//在svclist中查找是否有s16的服务
struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
    struct svcinfo *si;//声明一个si的结构体指针

    for (si = svclist; si; si = si->next) {//循环遍历svclist的服务列表;
        if ((len == si->len) &&//服务名称的字符长度是否相等
            !memcmp(s16, si->name, len * sizeof(uint16_t))) {//前面len * sizeof(uint16_t)的字符内容进行比较,为0则说明找到服务名称
            return si;
        }
    }
    return NULL;
}


//查找服务,
uint32_t do_find_service(const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
    struct svcinfo *si = find_svc(s, len);//查找是否有这个服务名称

    if (!si || !si->handle) {
        return 0;
    }

    if (!si->allow_isolated) {
        // If this service doesn't allow access from isolated processes,
        // then check the uid to see if it is isolated.
        uid_t appid = uid % AID_USER;
        if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
            return 0;
        }
    }
		//做selinux的检测
    if (!svc_can_find(s, len, spid, uid)) {
        return 0;
    }

    return si->handle;
}


//添加binder服务
int do_add_service(struct binder_state *bs, const uint16_t *s, size_t len, uint32_t handle,
                   uid_t uid, int allow_isolated, uint32_t dumpsys_priority, pid_t spid) {
    struct svcinfo *si;

    //ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
    //        allow_isolated ? "allow_isolated" : "!allow_isolated", uid);

    if (!handle || (len == 0) || (len > 127))
        return -1;

    if (!svc_can_register(s, len, spid, uid)) {
        ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
             str8(s, len), handle, uid);
        return -1;
    }

    si = find_svc(s, len);
    if (si) {
        if (si->handle) {
            ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
                 str8(s, len), handle, uid);
            svcinfo_death(bs, si);
        }
        si->handle = handle;
    } else {
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
                 str8(s, len), handle, uid);
            return -1;
        }
        si->handle = handle;
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = (void*) svcinfo_death;
        si->death.ptr = si;
        si->allow_isolated = allow_isolated;
        si->dumpsys_priority = dumpsys_priority;
        si->next = svclist;
        svclist = si;
    }

    binder_acquire(bs, handle);
    binder_link_to_death(bs, handle, &si->death);
    return 0;
}

//binder服务的管理者id
uint16_t svcmgr_id[] = {
    'a','n','d','r','o','i','d','.','o','s','.',
    'I','S','e','r','v','i','c','e','M','a','n','a','g','e','r'
};

这个svclist指针链表是保存服务的链表,是链表指针

struct svcinfo *svclist = NULL;

至此,,,servicemanager.c的流程基本上就到此为止;

参考链接:http://ju.outofmemory.cn/entry/111989
ioctl说明参考链接:https://blog.csdn.net/zifehng/article/details/59576539

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值