上一章节我们看到了binder原理;大家了解到,Binder仅仅是一个进程操作的中介;提供内存申请;提供根据指定内存地址读写数据而已;而Binder机制需要运转的话,需要有一个进程专门负责根据名字查询岁对应进程的内存地址的;在Android中,这个进程就是Service_manager;今天我们分析他的源码:
首先从main入口开始:
int main(int argc, char **argv)
{
//详见Binder源码分析一节
struct binder_state *bs;
//打开Binder驱动,申请内存大小为128x1024
bs = binder_open(128*1024);
if (!bs) {
ALOGE("failed to open binder driver\n");
return -1;
}
//将自己注册为Binder的管理者
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
selinux_enabled = is_selinux_enabled();
sehandle = selinux_android_service_context_handle();
if (selinux_enabled > 0) {
if (sehandle == NULL) {
ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
abort();
}
if (getcon(&service_manager_context) != 0) {
ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
abort();
}
}
union selinux_callback cb;
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
svcmgr_handle = BINDER_SERVICE_MANAGER;
//调用Binder中的binder_loop方法,传递调用者的内信息和处理方法指针
//看Binder源码分析一节可以知道,binder_loop里面是一个循环处理消息的逻辑
binder_loop(bs, svcmgr_handler);
return 0;
}
首先在将binder_loop的大概代码贴出来:
void binder_loop(struct binder_state *bs, binder_handler func)
{
……
binder_write(bs, readbuf, sizeof(uint32_t));
//进入循环
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//进行读写
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
。。。。。。
//返回结果
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
。。。。。。
}
}
接着
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
//返回的参数,重点跟踪
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
//没有到消息的尾部
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
#if TRACE
fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
//根据命令来解析
switch(cmd) {
。。。。。。
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
//处理消息
//对应Service_manager中的svcmgr_handler
res = func(bs, txn, &msg, &reply);
//将处理结果写入结果
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
//跳过这一条
ptr += sizeof(*txn);
break;
}
。。。。。。
}
return r;
}
那么我们首先分析func方法;在Service_manager里对应的是
svcmgr_handle;我们看看svcmgr_handle的源码:
//首先看入参的意义:
第一个代表调用者进程的信息
第二个代表服务进程的一些相关信息,例如名字等
第三个应该是所需要处理的信息所在的内存地址
第四个应该是处理完成后将结果写会的内存区域
因此这个方法的大概作用是:根据txn查找服务进程—》调用服务进程处理掉msg区域的数据—》然后将处理结果写回到reply区域;
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
//这个数据结构定义如下
struct svcinfo
{
struct svcinfo *next;
uint32_t handle;
struct binder_death death;
int allow_isolated;
size_t len;
uint16_t name[0];
};
struct svcinfo *svclist = NULL;
//根据上面的定义可以看出,struct svcinfo是一个链表结构的数据结构
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
//ALOGI("target=%x code=%d pid=%d uid=%d\n",
// txn->target.handle, txn->code, txn->sender_pid, txn->sender_euid);
//以前说过,Service_managerr是一个查找别的进程地址的进程,他自己的地址为0;因此别的进程想要获取Service_manager服务必须要将地址传递为0才可以
if (txn->target.handle != svcmgr_handle)
return -1;
//测试命令,测试service_manager是否在运行
if (txn->code == PING_TRANSACTION)
return 0;
// Equivalent to Parcel::enforceInterface(), reading the RPC
// header with the strict mode policy mask and the interface name.
// Note that we ignore the strict_policy and don't propagate it
// further (since we do no outbound RPCs anyway).
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
//处理业务
switch(txn->code) {
//业务类型:获取服务,检查服务
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
//注册服务
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
//参考binder源码
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
//
case SVC_MGR_LIST_SERVICES: {
uint32_t n = bio_get_uint32(msg);
if (!svc_can_list(txn->sender_pid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn->sender_euid);
return -1;
}
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
// //应该是将数据按照一定的格式复制到reply所在的内存区域中,详情可以参考Binder源码章节
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
现在我们应该大概知道Service_manager是怎么和Binder配合的了;
Service_manager有一个svcinfo结构的svclist链表;然后Service_manager提供将进程注册功能;所有的进程都必须将自己的那块资源地址和注册的名字存入svclist中;然后每次客户端要获取服务端的消息的时候,就根据注册的名字通过Service_manager的svclist来查询服务端所在的内存资源地址;然后首先将请求的指令数据通过Binder复制到指定的服务端的内存区域,然后服务端进行处理;最后将处理结果写会客户端指定的内存位置;这个是大概的流程;
详细的流程还需要大家个人仔细的研究;本节需要Binder源码分析那一章的基础;希望大家能将两章结合起来看。
剩余主要代码:
uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
struct svcinfo *si;
if (!svc_can_find(s, len, spid)) {
ALOGE("find_service('%s') uid=%d - PERMISSION DENIED\n",
str8(s, len), uid);
return 0;
}
si = find_svc(s, len);
//ALOGI("check_service('%s') handle = %x\n", str8(s, len), si ? si->handle : 0);
if (si && si->handle) {
if (!si->allow_isolated) {
// If this service doesn't allow access from isolated processes,
// then check the uid to see if it is isolated.
uid_t appid = uid % AID_USER;
if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
return 0;
}
}
return si->handle;
} else {
return 0;
}
}
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
//ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
// allow_isolated ? "allow_isolated" : "!allow_isolated", uid);
if (!handle || (len == 0) || (len > 127))
return -1;
if (!svc_can_register(s, len, spid)) {
ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
str8(s, len), handle, uid);
return -1;
}
si = find_svc(s, len);
if (si) {
if (si->handle) {
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;
svclist = si;
}
binder_acquire(bs, handle);
binder_link_to_death(bs, handle, &si->death);
return 0;
}