Service Manager分析

一、什么是Service Manager

ServiceManager是Binder的守护进程,在Android上如果service manager挂掉,所有采用Binder通信的进程服务都会挂掉。
ServiceManager本身也是一个Binder服务,handle固定为0。提供注册服务,查询服务的功能。应用程序相要通过Binder向一个service发送数据,必须先通过Service Manager获取该service的handle然后才能通过binder驱动与service通信。

二、源码位置

Service Manager的代码位于
frameworks/native/cmds/servicemanager
通过查看Android.mk可以得知servicemanager可执行文件由service_manager.c binder.c编译而来

include $(CLEAR_VARS)
LOCAL_SHARED_LIBRARIES := liblog libcutils libselinux
LOCAL_SRC_FILES := service_manager.c binder.c
LOCAL_CFLAGS += $(svc_c_flags)
LOCAL_MODULE := servicemanager
LOCAL_INIT_RC := servicemanager.rc
include $(BUILD_EXECUTABLE)

三、流程分析

Service Manager既然也是一个service,那按照流程,Service Manager的逻辑流程就应该如下:
1.打开binder驱动
2.向binder驱动注册一个service,这个service就是servicemanager
3.进入一个loop循环,轮询binder驱动,查看是否有其他的进行要与service manager通信(注册service 查询service等)

从源码跟进踪一下,service_manager.c binder.c只有一个main()函数,所以肯定是从service_manager.c的main()函数开始执行的

int main()
{
    struct binder_state *bs;			//用于记录当前进程内binder驱动相关的数据:文件描述符,mmap映射地址,以及大小

    bs = binder_open(128*1024);	//binder_open()函数打开binder驱动,并进行mmap映射
    if (!bs) {
        ALOGE("failed to open binder driver\n");
        return -1;
    }

    if (binder_become_context_manager(bs)) {	//这里是告诉binder驱动,当前进程是servicemanager,相当于注册了一个handle为0的service
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    selinux_enabled = is_selinux_enabled();
    sehandle = selinux_android_service_context_handle();
    selinux_status_open(true);

    if (selinux_enabled > 0) {
        if (sehandle == NULL) {
            ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
            abort();
        }

        if (getcon(&service_manager_context) != 0) {
            ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
            abort();
        }
    }

    union selinux_callback cb;
    cb.func_audit = audit_callback;
    selinux_set_callback(SELINUX_CB_AUDIT, cb);
    cb.func_log = selinux_log_callback;
    selinux_set_callback(SELINUX_CB_LOG, cb);

    binder_loop(bs, svcmgr_handler);	//进入一个loop循环,并不断轮询binder驱动,svcmgr_handler是在收到数据时的处理函数

    return 0;
}

binder_open()函数打开binder驱动,并进行mmap映射

struct binder_state *binder_open(size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    bs = malloc(sizeof(*bs));
    if (!bs) {
        errno = ENOMEM;
        return NULL;
    }

    bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC);	//打开"/dev/binder"驱动
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open device (%s)\n",
                strerror(errno));
        goto fail_open;
    }

    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
        fprintf(stderr,
                "binder: kernel driver version (%d) differs from user space version (%d)\n",
                vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
        goto fail_open;
    }

    bs->mapsize = mapsize;
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);	//mmap内存映射
    if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n",
                strerror(errno));
        goto fail_map;
    }

    return bs;

fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return NULL;
}

binder_become_context_manager是告诉驱动,当前进程是service manager,需要结合驱动一起看

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

在做完上面的初始化之后,Binder驱动会进入一个binder_loop的函数中,并开始轮询binder驱动,检查是否有发送给自己的数据

void binder_loop(struct binder_state *bs, binder_handler func)				//binder_loop(bs, svcmgr_handler);
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;						
    binder_write(bs, readbuf, sizeof(uint32_t));				//向binder驱动写入数据,告诉binder驱动,service manager开始监听

    for (;;) {																		//死循环
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);                   //读取数据到一个binder_write_read 结构体中

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }

        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);		//解析并处理读取到的数据,这里传入了svcmgr_handler函数
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

可以看到这个binder_loop在进入循环之前,会先告诉驱动,service manager开始监听驱动了,binder驱动在收到这个消息之后,才能开始正常的工作。
在这里将数据写入binder驱动用的是binder_write(),这是binder.c里面封装的一个函数,主要是对ioctl进行封装,但是在读取时,又是直接用的ioctl,没有对读取函数进行封装:

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;

    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}

通过上面的代码,可以看到,在与binder驱动通信时,传给binder驱动的是一个struct binder_write_read的结构体
这个结构体声明定义在*/external/kernel-headers/original/uapi/linux/android/binder.h*

struct binder_write_read {
        binder_size_t           write_size;     /* bytes to write */
        binder_size_t           write_consumed; /* bytes consumed by driver */
        binder_uintptr_t        write_buffer;
        binder_size_t           read_size;      /* bytes to read */
        binder_size_t           read_consumed;  /* bytes consumed by driver */
        binder_uintptr_t        read_buffer;
};

如果service manager在轮询binder的时候,发现有来自其他进程的通信,则会进入binder_parse来处理这些信息

int binder_parse(struct binder_state *bs, struct binder_io *bio,    //binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;     //ptr = binder_write_read.read_buff

    while (ptr < end) {                         		//循环解析ptr中的数据
        uint32_t cmd = *(uint32_t *) ptr;		//取出read_buff中的cmmand字段
        ptr += sizeof(uint32_t);
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        case BR_NOOP:
            break;
        case BR_TRANSACTION_COMPLETE:
            break;
        case BR_INCREFS:
        case BR_ACQUIRE:
        case BR_RELEASE:
        case BR_DECREFS:
#if TRACE
            fprintf(stderr,"  %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
#endif
            ptr += sizeof(struct binder_ptr_cookie);
            break;
        case BR_TRANSACTION: {			//传输数据的命令
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;		//取出保存数据的结构体
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);																						//调试用函数,可以打印出txn中的内容
            if (func) {																												//在这里,func是传入的svcmgr_handler
                unsigned rdata[256/4];
                struct binder_io msg;																						//用struct binder_io来接收txn中的数据
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);																		//用txn初始化struct binder_io结构体
                res = func(bs, txn, &msg, &reply);																//使用传入的函数,来处理消息
                if (txn->flags & TF_ONE_WAY) {
                    binder_free_buffer(bs, txn->data.ptr.buffer);											//不需要发送replay
                } else {
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);						//发送replay
                }
            }
            ptr += sizeof(*txn);
            break;
        }
        case BR_REPLY: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: reply too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (bio) {
                bio_init_from_txn(bio, txn);
                bio = 0;
            } else {
                /* todo FREE BUFFER */
            }
            ptr += sizeof(*txn);
            r = 0;
            break;
        }
        case BR_DEAD_BINDER: {
            struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
            ptr += sizeof(binder_uintptr_t);
            death->func(bs, death->ptr);
            break;
        }
        case BR_FAILED_REPLY:
            r = -1;
            break;
        case BR_DEAD_REPLY:
            r = -1;
            break;
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}
struct binder_transaction_data {
        /* The first two are only used for bcTRANSACTION and brTRANSACTION,
         * identifying the target and contents of the transaction.
         */
        union {
                /* target descriptor of command transaction */
                __u32   handle;
                /* target descriptor of return transaction */
                binder_uintptr_t ptr;
        } target;
        binder_uintptr_t        cookie; /* target object cookie */
        __u32           code;           /* transaction command */

        /* General information about the transaction. */
        __u32           flags;
        pid_t           sender_pid;
        uid_t           sender_euid;
        binder_size_t   data_size;      /* number of bytes of data */
        binder_size_t   offsets_size;   /* number of bytes of offsets */

        /* If this transaction is inline, the data immediately
         * follows here; otherwise, it ends with a pointer to
         * the data buffer.
         */
        union {
                struct {
                        /* transaction data */
                        binder_uintptr_t        buffer;
                        /* offsets from buffer to flat_binder_object structs */
                        binder_uintptr_t        offsets;
                } ptr;
                __u8    buf[8];
        } data;
};

接下来则是svcmgr_handler处理数据

int svcmgr_handler(struct binder_state *bs,                 //func(bs, txn, &msg, &reply);
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;

    //ALOGI("target=%p code=%d pid=%d uid=%d\n",
    //      (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);

    if (txn->target.ptr != BINDER_SERVICE_MANAGER)
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    // Equivalent to Parcel::enforceInterface(), reading the RPC
    // header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it
    // further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);
    s = bio_get_string16(msg, &len);						//读取string16的svcmgr_id, len是读取到的长度
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||                 
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }

    if (sehandle && selinux_status_updated() > 0) {
        struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
        if (tmp_sehandle) {
            selabel_close(sehandle);
            sehandle = tmp_sehandle;
        }
    }

    switch(txn->code) {										//根据txn->code来判断这次通信是想要使用service manager的什么功能
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
        bio_put_ref(reply, handle);
        return 0;

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;

    case SVC_MGR_LIST_SERVICES: {
        uint32_t n = bio_get_uint32(msg);

        if (!svc_can_list(txn->sender_pid, txn->sender_euid)) {
            ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
                    txn->sender_euid);
            return -1;
        }
        si = svclist;
        while ((n-- > 0) && si)
            si = si->next;
        if (si) {
            bio_put_string16(reply, si->name);
            return 0;
        }
        return -1;
    }
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

根据上面的代码可以看出,service manager中设计三种结构体

  • struct binder_write_read:与binder驱动传输数据时用的结构体
  • struct binder_io:保存了binder ipc通信的数据,可以直接从里面读取到client放入的数据
  • struct binder_transaction_data:保存在binder_write_read的read_buffer或write_buffer里面,内含对service的数据封装

注册服务的过程

通过上面的代码分析可以知道,注册服务的函数就是在 svcmgr_handler里面调用的
当txn->code为SVC_MGR_ADD_SERVICE时就是注册服务

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);		//s是service的名字
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg);					//获取当前进程中的handle
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;

又调用了bio_get_ref来获取这个service的描述符(引用)

uint32_t bio_get_ref(struct binder_io *bio)
{
    struct flat_binder_object *obj;		

    obj = _bio_get_obj(bio);
    if (!obj)
        return 0;

    if (obj->type == BINDER_TYPE_HANDLE)
        return obj->handle;

    return 0;
}

bio_get_ref是获取binder对象的handle,这个handle相当于一个文件描述符。
每个service,在binder驱动中都是有一个binder对象与之对应的,但这些对象并不是开放给所有进程的。
每个进程都会有像文件描述符一样的链表,这个链表里面记录了这个进程所有打开的service的handle
这里的这个hande就是servicemanager这个进程中的这个service的描述符,这个描述符是从binder_io结构体中拿到的,也就是驱动传递过来的,描述符链表实际上是保存在驱动里面的,每个binder进程都有独立的描述符链表,在与binder驱动通信时,就是依靠这个描述符来指定service的。
添加service最后会把新的service记录到一个svclist的全局链表中。

/*
 * This is the flattened representation of a Binder object for transfer
 * between processes.  The 'offsets' supplied as part of a binder transaction
 * contains offsets into the data where these structures occur.  The Binder
 * driver takes care of re-writing the structure type and data as it moves
 * between processes.
 */
struct flat_binder_object {
        /* 8 bytes for large_flat_header. */
        __u32           type;
        __u32           flags;

        /* 8 bytes of data. */
        union {
                binder_uintptr_t        binder; /* local object */
                __u32                   handle; /* remote object */
        };

        /* extra data associated with local object */
        binder_uintptr_t        cookie;
};

进程间传输的数据被称为 Binder 对象,它是一个 flat_binder_object,其中 类型 type 描述了 Binder 对象的类型,flags 则表述了传输方式,如异步、无返回等。
而 flat_binder_object 中的 union 联合体 就是要传输的数据,当类型为 BINDER 时, 数据就是一个本地对象 *binder,而类型为 HANDLE 时,数据则是一个远程对象 handle。
flat_binder_object 在进程间传递时, Binder 驱动会修改它的类型和数据。
local object和remote object最终都是指向service

查询服务的过程

case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
        bio_put_ref(reply, handle);
        return 0;
uint32_t do_find_service(const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
    struct svcinfo *si = find_svc(s, len);
...
    return si->handle;
}
struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
    struct svcinfo *si;

    for (si = svclist; si; si = si->next) {
        if ((len == si->len) &&
            !memcmp(s16, si->name, len * sizeof(uint16_t))) {
            return si;
        }
    }
    return NULL;
}

查找service就是根据service的名字去svclist链表里面查找,如果有的话就会把这个service的handle放到replay里面去,这个replay里面的handle,在经过binder驱动时,会检查client进程是否打开过这个service,没有打开则会在client进程的描述符链表中新增这个service,并将handle换成client进程描述符链表中的值。
client应用程序在收到service的handle之后就可以利用这个handle向service发起通信

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值