Binder大总结

从framwork通信到native

我们通过startService()的调用来分析Binder的通信过程。我们将有一篇文章专门讲解startsService()

这里只是说明流程:

这里写图片描述

当调用AMP.startService()

1 startService()

public ComponentName startService(...) throws RemoteException
{
    ...
    mRemote.transact(START_SERVICE_TRANSACTION, data, reply, 0);
    ComponentName res = ComponentName.readFromParcel(reply);
    ...
    return res;
}

我们知道mRemote是BinderProxy对象,这里进入transact看看

2 BinderProxy.transact()

final class BinderProxy implements IBinder {
    public boolean transact(...) throws RemoteException {
        ...
        return transactNative(code, data, reply, flags);
    }
}

然后调用到native中:

3 android_os_BinderProxy_transact()

static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
    jint code, jobject dataObj, jobject replyObj, jint flags)
{
    Parcel* data = parcelForJavaObject(env, dataObj);
    Parcel* reply = parcelForJavaObject(env, replyObj);
    //初始化在register_android_os_Binder()
    //gBinderProxyOffsets.mObject中保存的是new BpBinder(handle)对象
    IBinder* target = (IBinder*) env->GetLongField(obj, gBinderProxyOffsets.mObject);
    ...
    status_t err = target->transact(code, *data, reply, flags);
    ...
    signalExceptionForError(env, obj, err, true , data->dataSize());
    return JNI_FALSE;
}

4 BpBinder::transact()

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);//[5]
        ...
        return status;
    }
    return DEAD_OBJECT;
}

5 IPCThreadState::transact()

继续向下调用

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    ....
    if (err == NO_ERROR) {
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);//[6]
    }
    // 采用非oneway的方式,需要等待服务端的返回结果
    if ((flags & TF_ONE_WAY) == 0) {
        if (reply) {
            err = waitForResponse(reply);//[7]
        }else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
    } else {
        err = waitForResponse(NULL, NULL);
    }
    return err;
}

6 IPCThreadState::waitForResponse()

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;//[7]
        err = mIn.errorCheck();
        cmd = mIn.readInt32();
        switch (cmd) {
        case BR_DEAD_REPLY: 
            err = DEAD_OBJECT;         goto finish;
        case BR_FAILED_REPLY: 
            err = FAILED_TRANSACTION;  goto finish;
        case BR_REPLY: ...             goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }
}

7 IPCThreadState::talkWithDriver()

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;
    ...
    do {
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            err = -errno;
        ...
    } while (err == -EINTR);
    ...
        return NO_ERROR;
    }
    return err;
}

从native通信到驱动

  • 1.首先系统在加载的时候会调用binder_init(),misc_register()进行创建设备节点和加载设备
  • 2.接下来就可以使用设备,使用设备是使用三个函数:

open()//打开设备节点
mmap()//分配空间
ioctl()//和驱动层进行通信

这三个函数进行着对应着binder系统驱动层的主要操作。和驱动层的对应关系是:

用户态和驱动态函数方法规则:
用户态           驱动态
open()    ->    binder_open()
mmap()    ->    binder_mmap()
ioctl()   ->    binder_ioctl()

下来我们说下这三个函数具体做了什么事情。

binder_open()
1.分配一个binder_proc(binder进程)
2.初始化binder_proc中的todo链表
3.初始化binder_proc中的wait队列
4.初始化binder_proc中的默认优先级
5.file文件指针的private_data变量指向binder_proc数据

binder_mmap()
1.将binder_open()函数中生成的binder_proc赋值到这里
2.设置虚拟内存大小不能超过4M
3.将内核的空间首地址分配proc->buffer

binder_ioctl()
首先得明白这个函数参数是什么?
其次才是这个函数利用这些传递进来的参数做了什么?
static long binder_ioctl(
        //用户态 bs = binder_open(128*1024);其中bs->fd是filp
        struct file *filp, 
        //执行的命令
        unsigned int cmd,
        //数据的首地址void __user *ubuf = (void __user *)arg;
        //copy_from_user(&bwr, ubuf, sizeof(bwr)
        unsigned long arg
    )

那么具体binder_ctl做什么事情呢?

    1.wait_event_interruptible();//等待被唤醒
    2.binder_lock(__func__);//上锁
    3.binder_get_thread(proc);//得到当前线程
    4.switch (cmd) {//然后执行对应命令
    5.case BINDER_WRITE_READ: {//进行读取的命令
    6.copy_from_user(&bwr, ubuf, sizeof(bwr)//从用户态获取了bwr数据
    7.if (bwr.write_size > 0) {//需要写入
    8. binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
    9.} 
    10.if (bwr.read_size > 0) {//需要读取
    11. ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
    12.}

核心步骤就是看有没有写的数据和读的数据。如果有写的数据那么就写入,如果有读的数据就读取。

在这个之前,我们看下bwr是个什么结构体:

struct binder_write_read {
    signed long write_size;
    signed long write_consumed;
    unsigned long   write_buffer;
    signed long read_size;
    signed long read_consumed;
    unsigned long   read_buffer;
};

一:

svcmgr_publish(
        bs,       //bs = binder_open(128*1024);
        svcmgr,   //调用那个Binder要是service_manager就是0
        argv[1],  //argv[1]是声明要调用的函数
        &token    //&token回调的函数指针
    );
int svcmgr_publish(
    struct binder_state *bs,
    uint32_t target,
    const char *name, 
    void *ptr
    )

二:

int status;
unsigned iodata[512/4];
struct binder_io msg, reply;

bio_init(&msg, iodata, sizeof(iodata), 4);//初始化之后的结构如下图。
bio_put_uint32(&msg, 0);
bio_put_string16_x(&msg, SVC_MGR_NAME);
bio_put_string16_x(&msg, name);
bio_put_obj(&msg, ptr);
//然后调用
binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE)

这里写图片描述

三:

int binder_call(
        struct binder_state *bs,
        struct binder_io *msg, 
        struct binder_io *reply,
        uint32_t target, //用户空间目标进程
        uint32_t code//SVC_MGR_ADD_SERVICE
    )
struct binder_write_read bwr;
struct {
    uint32_t cmd;
    struct binder_transaction_data txn;
} __attribute__((packed)) writebuf;
//将信息封装到writebuf中
writebuf.cmd = BC_TRANSACTION;
writebuf.txn.target.handle = target;
writebuf.txn.code = code;
writebuf.txn.flags = 0;
writebuf.txn.data_size = msg->data - msg->data0;
writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);
writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;
writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;
//将writebuf封装到bwr中
bwr.write_size = sizeof(writebuf);
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) &writebuf;
最后进行传送:
ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

然后被传送到:

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)

通过上面的传递参数分析,传递给底层的是binder_write_read结构体。

在用户态的Binder.c中需要注意一个一点就是:

循环读取从驱动传递上来的数据
bwr.write_size = sizeof(writebuf);
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) &writebuf;
for (;;) {
    bwr.read_size = sizeof(readbuf);
    bwr.read_consumed = 0;
    bwr.read_buffer = (uintptr_t) readbuf;

    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
...
}

然后我们进入真正的读数据和写数据

Binder.c->binder_ioclt()

if (bwr.write_size > 0) {//需要写入
            ret = binder_thread_write(
                proc, 
                thread, 
                (void __user *)bwr.write_buffer, //封装的write_buffer
                bwr.write_size, 
                &bwr.write_consumed
             );
        }
if (bwr.read_size > 0) {//需要读取
            ret = binder_thread_read(
                proc, thread, 
                (void __user *)bwr.read_buffer, 
                bwr.read_size, 
                &bwr.read_consumed, 
                filp->f_flags & O_NONBLOCK
             );
        }
int binder_thread_write(
    struct binder_proc *proc, 
    struct binder_thread *thread,
    void __user *buffer, 
    int size, 
    signed long *consumed
){
    void __user *ptr = buffer + *consumed;//start->bwr.write_buffer
    void __user *end = buffer + size;//end->bwr.write_buffer
    get_user(cmd, (uint32_t __user *)ptr);//BC_TRANSACTION
    swtich(cmd){
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;
            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }
    }
}

由此进入最核心的方法,跨进程的操作再次进行:

//binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
static void binder_transaction(
        struct binder_transaction_data *tr, 
        int reply
    ){
        if (tr->target.handle) {//if传进来的handle不是0,在当前进程下,根据handle找到binder_ref结构体
            struct binder_ref *ref;
            ref = binder_get_ref(proc, tr->target.handle);//查找binder_ref结构体
            target_node = ref->node;//通过ref找到对应的node节点
        } else {
        //handle是0表示是service_manager
            target_node = binder_context_mgr_node;//这个是特殊的进程,在binder_ioctl中创建
        }
        target_proc = target_node->proc;//拿到目标进程的结构体描述
        //这块分配了t->buffer,传入参数是目的进程,意思是:从目的进程里面分配内存给t->buffer
        t->buffer = binder_alloc_buf(target_proc, tr->data_size,
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
        //将data和offset都复制进去,到这里,已经把数据拷贝到了目的进程
        copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size);
        //tr->data.ptr.offsets是flat_binder_object的指针数组,就是binder_io前面那四个字节的数据
        copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size);
        fp = (struct flat_binder_object *)(t->buffer->data + *offp);
        switch (fp->type) {
            //binder实体(在flat_binder_object里面有个参数是binder还是handle)
            case BINDER_TYPE_BINDER:
            case BINDER_TYPE_WEAK_BINDER: {
                struct binder_ref *ref;
                struct binder_node *node = binder_get_node(proc, fp->binder);
                if (node == NULL) {
                    //创建binder_node节点,给当前进程创建
                    node = binder_new_node(proc, fp->binder, fp->cookie);
                    //根据当前进程的node,然后创建出ref引用给目的进程
                    ref = binder_get_ref_for_node(target_proc, node);
                    //将type改成引用,BINDER_TYPE_BINDER是实体的type,BINDER_TYPE_HANDLE是引用
                    if (fp->type == BINDER_TYPE_BINDER)
                        fp->type = BINDER_TYPE_HANDLE;
                    else
                        fp->type = BINDER_TYPE_WEAK_HANDLE;
                    }
                ...
        }
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);//把binder_transaction结构体放入链表中去
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
        wake_up_interruptible(target_wait);
}

将数据目标进程找到,然后将binder_transaction从用户空间拷贝到内核空间,然后将其添加到todo链表中,寻找todo链表的时候是,如果当前线程有todo链表,然后搜寻当前线程的todo链表要不然寻找当前进程的todo链表


static int binder_thread_read(
        struct binder_proc *proc,
        struct binder_thread *thread,
        void  __user *buffer, 
        int size,
        signed long *consumed, 
        int non_block
    ){
    void __user *ptr = buffer + *consumed;//start->bwr.read_buffer
    void __user *end = buffer + size;//end->bwr.read_buffer
    put_user(BR_NOOP, (uint32_t __user *)ptr)//先放入BR_NOOP标志
    while (1) {
        if (!list_empty(&thread->todo))//如果线程里面的todo链表有数据拿出来
            w = list_first_entry(&thread->todo, struct binder_work, entry);
        else if (!list_empty(&proc->todo) && wait_for_proc_work)
            //否则线程所属的进程链表有数据的话拿出来
            w = list_first_entry(&proc->todo, struct binder_work, entry);
        else {
        }
        switch (w->type) {
            //这个type是binder_thread_write最后添加链表的时候写的
            case BINDER_WORK_TRANSACTION: {
                //根据work得到binder_transaction
                t = container_of(w, struct binder_transaction, work);
            } break;

            //也是从binder_thread_write最后添加链表的时候写的,目的是释放w的空间。
            case BINDER_WORK_TRANSACTION_COMPLETE: {
                cmd = BR_TRANSACTION_COMPLETE;
                if (put_user(cmd, (uint32_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(uint32_t);

                binder_stat_br(proc, thread, cmd);
                list_del(&w->entry);
                kfree(w);
                binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
            } break;
        }
    }
    ...
    if (t->buffer->target_node) {
        struct binder_node *target_node = t->buffer->target_node;
        tr.target.ptr = target_node->ptr;
        tr.cookie =  target_node->cookie;
        t->saved_priority = task_nice(current);
        if (t->priority < target_node->min_priority &&
            !(t->flags & TF_ONE_WAY))
            binder_set_nice(t->priority);
        else if (!(t->flags & TF_ONE_WAY) ||
             t->saved_priority > target_node->min_priority)
            binder_set_nice(target_node->min_priority);
        cmd = BR_TRANSACTION;//由于是从驱动返回用户空间把命令改成BR_TRANSACTION
    } else {
        tr.target.ptr = NULL;
        tr.cookie = NULL;
        cmd = BR_REPLY;
    }
    //这里进行构造binder_transaction_data,然后返回到service_manager.c中,这里ioctl就执行完毕
    tr.data_size = t->buffer->data_size;
    tr.offsets_size = t->buffer->offsets_size;
    tr.data.ptr.buffer = (void *)t->buffer->data +proc->user_buffer_offset;
    tr.data.ptr.offsets = tr.data.ptr.buffer +ALIGN(t->buffer->data_size,sizeof(void *));
    if (put_user(cmd, (uint32_t __user *)ptr))

}

通信模型:

这里写图片描述

从native通信到Java层

由于Binder在java层的构造方法

private native final void init(); 

public Binder() {
    init();
    ...
}

追寻下去

static void android_os_Binder_init(JNIEnv* env, jobject obj)
{
    JavaBBinderHolder* jbh = new JavaBBinderHolder();
    jbh->incStrong((void*)android_os_Binder_init);
    env->SetLongField(obj, gBinderOffsets.mObject, (jlong)jbh);//将JavaBBinderHolder对象设置给gBinderOffsets.mObject
}

看下JavaBBinderHolder构造:

class JavaBBinderHolder : public RefBase
{
public:
    sp<JavaBBinder> get(JNIEnv* env, jobject obj)
    {
        ...
        sp<JavaBBinder> b = mBinder.promote();
        if (b == NULL) {
            b = new JavaBBinder(env, obj);//[1.1]
            mBinder = b;
        }
        return b;
    }
    ...
private:
    Mutex           mLock;
    wp<JavaBBinder> mBinder;
};

我们在这里看到了gBinderOffsets.mExecTransact这个是java层方法名字,我们找到赋值的对应代码

static int int_register_android_os_Binder(JNIEnv* env)
{
    jclass clazz = FindClassOrDie(env, kBinderPathName);

    gBinderOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    gBinderOffsets.mExecTransact = GetMethodIDOrDie(env, clazz, "execTransact", "(IJJI)Z");
    gBinderOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");

    return RegisterMethodsOrDie(
        env, kBinderPathName,
        gBinderMethods, NELEM(gBinderMethods));
}

也就是说在调用int_register_android_os_Binder的时候execTransact这个java层的方法的id会存到gBinderOffsets.mExecTransac变量中。

class JavaBBinder : public BBinder
{
public:
    JavaBBinder(JNIEnv* env, jobject object)
        : mVM(jnienv_to_javavm(env)), mObject(env->NewGlobalRef(object))
    {
        android_atomic_inc(&gNumLocalRefs);
        incRefsCreated(env);
    }
   ...
protected:
   ...
    virtual status_t onTransact(
        uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0)
    {
        JNIEnv* env = javavm_to_jnienv(mVM);
        IPCThreadState* thread_state = IPCThreadState::self();
        const int32_t strict_policy_before = thread_state->getStrictModePolicy();
        //这个代码调用的是execTransact方法将这些数据传递。
        jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,
            code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags);

        if (code == SYSPROPS_TRANSACTION) {
            BBinder::onTransact(code, data, reply, flags);
        }
        return res != JNI_FALSE ? NO_ERROR : UNKNOWN_TRANSACTION;
    }
        ...
private:
    JavaVM* const   mVM;
    jobject const   mObject;//对应的是Java层的Binder
};

到了这里,我们已经将Java层的Binder服务和Native层的BBinder联系起来。,从驱动层上来调用onTransact()方法就可以到达Java层的execTransact()方法。

在这里我们也注意一个问题,由于onTransact()方法是BBinder的函数,而现在JavaBBinder覆写了这个函数,所以调用的时候会调用到execTransact(),那么在那块才调用onTransact()呢?

BpBinder::transact()->
IPCThreadState::transact()->
IPCThreadState::waitForResponse()->
IPCThreadState::executeCommand()

最后execTransact()又会调用Binder的onTransaction()函数

这部分小结:

我们先从Binder的构造出发,因为Binder服务可以是SystemServer添加的服务,也可以是自己创建的匿名服务,但是都会调用构造方法。发现构造方法中调用native层的init方法,init()方法做的事情就是创建JavaBBinderHolder对象这个对象中get方法会得到JavaBBinder对象,JavaBBinder对象继承BBinder类。并且重写BBinder的onTransact()方法,onTransact()会调用java层的execTransact(),这样就可以将Java层和native层的连接起来。

然后我们分析了从驱动如何调用到BBinder的onTransact()方法,我们的调用顺序是:
BpBinder::transact()-> IPCThreadState::transact()-> IPCThreadState::waitForResponse()-> IPCThreadState::executeCommand()
我们上篇文章分析了BpBinder是BpServiceManager成员变量mRemote也就是native层通过sp<IServiceManager> sm(defaultServiceManager());方法得到的.

其中BpBinder可以调用到BpBinder::transact()->IPCThreadState::self()->transact()和驱动进行通信.这个方法会调用到IPCThreadState::executeCommand()这个方法会调用

sp<BBinder> b((BBinder*)tr.cookie);
error = b->transact(tr.code, buffer, &reply, tr.flags);

其中b->transact()会调用onTransact()这里注意b是BBinder我们在java代码中看到是JavaBBinder继承与BBinder,所以此时会调用JavaBBinder的onTransact(),在这个回调函数中我们将可以看到调用execTransact(),然后我们在Binder.java的execTransact()方法中看到又调用Binder.java的onTransact()方法,所以回调了我们生成的aidl文件中的onTransact()方法。

最后我们也从java层出发看了是如何通信到底层的,我们在aidl产生的文件中可以看到

sayhello():
mRemote.transact(Stub.TRANSACTION_sayhello, _data, _reply, 0);

其中mRemote就是BpBinder。

这样我们就从java到底层完成了整个流程的创建。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值