Binder进程间通信二次总结

版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://blog.csdn.net/jltxgcy/article/details/76286238

    0x00 从进程的角度来看Android系统启动


    1、SystemServer:java层binder机制的构建者,最初一个主线程和一个子线程来处理Binder进程间通信请求。另外一个子线程处理Hander线程间通信请求。

    通过ServiceManager来加载ActiviryMangerService,以便在应用程序进程中可以通过Binder进程间通信访问到ActiviryMangerService。

ActivityManagerService m = mSelf;  
  
ServiceManager.addService("activity", m);  
    2、servicemanager:binder dns,C++编写。

    3、Zygote:所有进程的孵化器,SystemServer通过socket进行进程间通信请求。

    4、MediaServer:C++层binder机制的构建者,最初一个主线程和一个子线程来处理Binder进程间通信请求。

sp<ProcessState> proc(ProcessState::self());
125        sp<IServiceManager> sm = defaultServiceManager();
126        ALOGI("ServiceManager: %p", sm.get());
127        AudioFlinger::instantiate();
128        MediaPlayerService::instantiate();
129        CameraService::instantiate();
130        AudioPolicyService::instantiate();
131        registerExtensions();
132        ProcessState::self()->startThreadPool();
133        IPCThreadState::self()->joinThreadPool();
void MediaPlayerService::instantiate() {
    defaultServiceManager()->addService(
            String16("media.player"), new MediaPlayerService());
}
     在C++层通过servicemanager添加MediaPlayerService。

    0x01 一次同步Binder进程间通信


    Client:处理从Binder Driver返回的命令

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;

        cmd = mIn.readInt32();

        IF_LOG_COMMANDS() {
            alog << "Processing waitForResponse Command: "
                << getReturnString(cmd) << endl;
        }

        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;

        case BR_DEAD_REPLY:
            err = DEAD_OBJECT;
            goto finish;

        case BR_FAILED_REPLY:
            err = FAILED_TRANSACTION;
            goto finish;

        case BR_ACQUIRE_RESULT:
            {
                LOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
                const int32_t result = mIn.readInt32();
                if (!acquireResult) continue;
                *acquireResult = result ? NO_ERROR : INVALID_OPERATION;
            }
            goto finish;

        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                LOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t),
                            freeBuffer, this);
                    } else {
                        err = *static_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(NULL,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t), this);
                    }
                } else {
                    freeBuffer(NULL,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(size_t), this);
                    continue;
                }
            }
            goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }

finish:
    if (err != NO_ERROR) {
        if (acquireResult) *acquireResult = err;
        if (reply) reply->setError(err);
        mLastError = err;
    }

    return err;
}
    Client:向Binder Driver发送命令
int  
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  
            void __user *buffer, int size, signed long *consumed)//注意consumed为指针  
{  
    uint32_t cmd;  
    void __user *ptr = buffer + *consumed;//起始位置  
    void __user *end = buffer + size;//末尾位置  
  
    while (ptr < end && thread->return_error == BR_OK) {  
        if (get_user(cmd, (uint32_t __user *)ptr))//cmd为BC_TRANSACTION  
            return -EFAULT;  
        ptr += sizeof(uint32_t);//取出命令后,ptr自增长  
        ......  
        switch (cmd) {  
        ......  
        case BC_TRANSACTION:  
        case BC_REPLY: {  
            struct binder_transaction_data tr;  
  
            if (copy_from_user(&tr, ptr, sizeof(tr)))//将进程间通信数据读取出来,并且保存在binder_transation_data结构体tr中  
                return -EFAULT;  
            ptr += sizeof(tr);  
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);//调用函数binder_transaction来处理进程发送给它的BC_TRANSACTION命令协议  
            break;  
        }  
                .......  
        *consumed = ptr - buffer;//consumed为传入的参数size,因此数据已经全部使用完毕  
    }  
    return 0;  
}  


    Server:处理从Binder Driver返回的命令
void IPCThreadState::joinThreadPool(bool isMain)//默认值为true  
{  
    .........  
  
    mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);//isMain为true,BC_ENTER_LOOPER  
      
    ........  
          
    status_t result;  
    do {  
        int32_t cmd;  
          
        .......  
        result = talkWithDriver();//将自己注册到Binder线程池中,一个无线循环中不断等待进程间通信请求  
        if (result >= NO_ERROR) {  
            size_t IN = mIn.dataAvail();  
            if (IN < sizeof(int32_t)) continue;  
            cmd = mIn.readInt32();  
            ........  
  
  
            result = executeCommand(cmd);//处理进程间通信请求  
        }  
          
       .........  
        if(result == TIMED_OUT && !isMain) {//一直为false,因为isMain为true  
            break;  
        }  
    } while (result != -ECONNREFUSED && result != -EBADF);  
  
    ........  
      
    mOut.writeInt32(BC_EXIT_LOOPER);//退出Binder线程池  
    talkWithDriver(false);  
}  
status_t IPCThreadState::executeCommand(int32_t cmd)

   BBinder* obj;
   RefBase::weakref_type* refs;
   status_t result = NO_ERROR;

   switch (cmd) {
   case BR_ERROR:
       result = mIn.readInt32();
       break;

   case BR_OK:
       break;

   case BR_ACQUIRE:
       ......
       break;

   case BR_RELEASE:
       ......
       break;

   case BR_INCREFS:
       ......
       break;

   case BR_DECREFS:
       ......
       break;

   case BR_ATTEMPT_ACQUIRE:
       ......
       break;

   case BR_TRANSACTION:
       {
           binder_transaction_data tr;
           result = mIn.read(&tr, sizeof(tr));
           LOG_ASSERT(result == NO_ERROR,
               "Not enough command data for brTRANSACTION");
           if (result != NO_ERROR) break;

           Parcel buffer;
           buffer.ipcSetDataReference(
               reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
               tr.data_size,
               reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
               tr.offsets_size/sizeof(size_t), freeBuffer, this);

           const pid_t origPid = mCallingPid;
           const uid_t origUid = mCallingUid;

           mCallingPid = tr.sender_pid;
           mCallingUid = tr.sender_euid;

           int curPrio = getpriority(PRIO_PROCESS, mMyThreadId);
           if (gDisableBackgroundScheduling) {
               if (curPrio > ANDROID_PRIORITY_NORMAL) {
                   // We have inherited a reduced priority from the caller, but do not
                   // want to run in that state in this process.  The driver set our
                   // priority already (though not our scheduling class), so bounce
                   // it back to the default before invoking the transaction.
                   setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL);
               }
           } else {
               if (curPrio >= ANDROID_PRIORITY_BACKGROUND) {
                   // We want to use the inherited priority from the caller.
                   // Ensure this thread is in the background scheduling class,
                   // since the driver won't modify scheduling classes for us.
                   // The scheduling group is reset to default by the caller
                   // once this method returns after the transaction is complete.
                   androidSetThreadSchedulingGroup(mMyThreadId,
                                                   ANDROID_TGROUP_BG_NONINTERACT);
               }
           }

           //LOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);

           Parcel reply;
           IF_LOG_TRANSACTIONS() {
               TextOutput::Bundle _b(alog);
               alog << "BR_TRANSACTION thr " << (void*)pthread_self()
                   << " / obj " << tr.target.ptr << " / code "
                   << TypeCode(tr.code) << ": " << indent << buffer
                   << dedent << endl
                   << "Data addr = "
                   << reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer)
                   << ", offsets addr="
                   << reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl;
           }
           if (tr.target.ptr) {
               sp<BBinder> b((BBinder*)tr.cookie);
               const status_t error = b->transact(tr.code, buffer, &reply, 0);
               if (error < NO_ERROR) reply.setError(error);

           } else {
               const status_t error = the_context_object->transact(tr.code, buffer, &reply, 0);
               if (error < NO_ERROR) reply.setError(error);
           }

           //LOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n",
           //     mCallingPid, origPid, origUid);

           if ((tr.flags & TF_ONE_WAY) == 0) {
               LOG_ONEWAY("Sending reply to %d!", mCallingPid);
               sendReply(reply, 0);
           } else {
               LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
           }

           mCallingPid = origPid;
           mCallingUid = origUid;

           IF_LOG_TRANSACTIONS() {
               TextOutput::Bundle _b(alog);
               alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
                   << tr.target.ptr << ": " << indent << reply << dedent << endl;
            }

        }
        break;

    case BR_DEAD_BINDER:
        {
            ......
        } break;

    case BR_CLEAR_DEATH_NOTIFICATION_DONE:
        {
            BpBinder *proxy = (BpBinder*)mIn.readInt32();
            proxy->getWeakRefs()->decWeak(proxy);
        } break;

    case BR_FINISHED:
        result = TIMED_OUT;
        break;

    case BR_NOOP:
        break;

    case BR_SPAWN_LOOPER:
        mProcess->spawnPooledThread(false);
        break;

    default:
        printf("*** BAD COMMAND %d received from Binder driver\n", cmd);
        result = UNKNOWN_ERROR;
        break;
    }

    if (result != NO_ERROR) {
        mLastError = result;
    }

    return result;
}
    Server:向Binder Driver发送命令

    同Client一样是通过binder_thread_write来处理的,Server会调用sendreply-->waitForResponse-->talkWithDriver-->binder_thread_write来向Binder Driver发送命令。


    0x02 ProcessState和IPCThreadState在Binder进程间通信的作用

    SystemServer、用户进程都会在onZygoteInit时初始化一个进程唯一的Process,在这里会open、mmap Binder Driver

virtual void onZygoteInit()  
    {  
        sp<ProcessState> proc = ProcessState::self();  
        if (proc->supportsProcesses()) {  
            LOGV("App process: starting thread pool.\n");  
            proc->startThreadPool();  
        }         
    }

    IPCThreadState每个进程的线程唯一标示,talkWithDriver、joinThreadPool和executeCommand都是这个类里面的方法。

    我们再来看下IPCTheadState和ProcessState在Binder进程间通信中的作用。


    BpBinder use IPCThreadState,而IPCThreadState里面的mProcess指向ProcessState。



    在这IPCThreadState use了BBinder,同样IPCThreadState里面的mProcess指向ProcessState。


    0x03 Binder线程池

    1、Binder主线程:进程创建过程会调用startThreadPool()过程中再进入spawnPooledThread(true),来创建Binder主线程。编号从1开始,也就是意味着binder主线程名为binder_1,并且主线程是不会退出的。
    2、Binder普通线程:是由Binder Driver来根据是否有空闲的binder线程来决定是否创建binder线程,回调spawnPooledThread(false) ,isMain=false,该线程名格式为binder_x。
    3、Binder其他线程:其他线程是指并没有调用spawnPooledThread方法,而是直接调用IPC.joinThreadPool(),将当前线程直接加入binder线程队列。例如: mediaserver、system_server和servicemanager的主线程都是binder线程。

void ProcessState::startThreadPool()
{
    AutoMutex _l(mLock);    //多线程同步
    if (!mThreadPoolStarted) {
        mThreadPoolStarted = true;
        spawnPooledThread(true);  【见小节2.3】
    }
}
    对于Binder普通线程会在binder_thread_read时判断是否需要新增Binder线程,如果需要则从Binder Driver返回BR_SPAWN_LOOPER。

binder_thread_read(){
  ...
retry:
    //当前线程todo队列为空且transaction栈为空,则代表该线程是空闲的
    wait_for_proc_work = thread->transaction_stack == NULL &&
        list_empty(&thread->todo);

    if (thread->return_error != BR_OK && ptr < end) {
        ...
        put_user(thread->return_error, (uint32_t __user *)ptr);
        ptr += sizeof(uint32_t);
        goto done; //发生error,则直接进入done
    }

    thread->looper |= BINDER_LOOPER_STATE_WAITING;
    if (wait_for_proc_work)
        proc->ready_threads++; //可用线程个数+1
    binder_unlock(__func__);

    if (wait_for_proc_work) {
        if (non_block) {
            ...
        } else
            //当进程todo队列没有数据,则进入休眠等待状态
            ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
    } else {
        if (non_block) {
            ...
        } else
            //当线程todo队列没有数据,则进入休眠等待状态
            ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
    }

    binder_lock(__func__);
    if (wait_for_proc_work)
        proc->ready_threads--; //可用线程个数-1
    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

    if (ret)
        return ret; //对于非阻塞的调用,直接返回

    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;

        //先考虑从线程todo队列获取事务数据
        if (!list_empty(&thread->todo)) {
            w = list_first_entry(&thread->todo, struct binder_work, entry);
        //线程todo队列没有数据, 则从进程todo对获取事务数据
        } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
            w = list_first_entry(&proc->todo, struct binder_work, entry);
        } else {
            ... //没有数据,则返回retry
        }

        switch (w->type) {
            case BINDER_WORK_TRANSACTION: ...  break;
            case BINDER_WORK_TRANSACTION_COMPLETE:...  break;
            case BINDER_WORK_NODE: ...    break;
            case BINDER_WORK_DEAD_BINDER:
            case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
            case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
                struct binder_ref_death *death;
                uint32_t cmd;

                death = container_of(w, struct binder_ref_death, work);
                if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
                  cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
                else
                  cmd = BR_DEAD_BINDER;
                put_user(cmd, (uint32_t __user *)ptr;
                ptr += sizeof(uint32_t);
                put_user(death->cookie, (void * __user *)ptr);
                ptr += sizeof(void *);
                ...
                if (cmd == BR_DEAD_BINDER)
                  goto done; //Binder驱动向client端发送死亡通知,则进入done
                break;
        }

        if (!t)
            continue; //只有BINDER_WORK_TRANSACTION命令才能继续往下执行
        ...
        break;
    }

done:
    *consumed = ptr - buffer;
    //创建线程的条件
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED))) {
        proc->requested_threads++;
        // 生成BR_SPAWN_LOOPER命令,用于创建新的线程
        put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer);
    }
    return 0;
}
    处理BR_SPAWN_LOOPER命令,新增Binder线程:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
    status_t result = NO_ERROR;
    switch ((uint32_t)cmd) {
      ...
      case BR_SPAWN_LOOPER:
          //创建新的binder线程 【见小节2.3】
          mProcess->spawnPooledThread(false);
          break;
      ...
    }
    return result;
}


    0x04 如何找到目标进程或者线程

    Client--->Server:

    if (target_thread) {//target_thread为NULL  
        ........  
        target_list = &target_thread->todo;  
        target_wait = &target_thread->wait;  
    } else {  
        target_list = &target_proc->todo;//target_list和target_wait分别指向该目标进程target_proc的todo队列和wait等待队列  
        target_wait = &target_proc->wait;  
    }  
    如果能找到对应的线程,则优先发送到线程的todo列表中;如果找不到对应的线程,则发送到对应进程的todo列表。

    在接收方,如果线程没有任务需要处理,那么会睡眠等待在进程的wait列表中。

    如果唤醒的是进程,实际上唤醒的是在进程等待wait队列中其中一个线程来处理。

    那么什么时候会唤醒指定的线程呢?思考以下场景:

    当进程P1的线程T1向进程P2发送请求时,驱动会先查看一下线程T1是否也正在处理来自P2某个线程请求但尚未完成(没有发送回复)。这种情况通常发生在两个进程都有Binder实体并互相对发时请求时。假如驱动在进程P2中发现了这样的线程,比如说T2,就会要求T2来处理T1的这次请求。因为T2既然向T1发送了请求尚未得到返回包,说明T2肯定(或将会)阻塞在读取返回包的状态。这时候可以让T2顺便做点事情,总比等在那里闲着好。而且如果T2不是线程池中的线程还可以为线程池分担部分工作,减少线程池使用率。

    Server--->Client:

    根据thread->transaction_stack->from找到目标线程,即Client线程(一般为主线程)。


    0x05 Client--->Server 为什么只拷贝数据一次?

    Client端向Server端发送数据时,数据是存放在Server端的内核缓冲区中,Client完成把数据从用户空间拷贝到Server端的内核缓冲区kernel address space;在Server端通过user address space就能访问缓冲区的内容。因为user address space和kernel address space都映射到缓冲区。他们之间存在一个固定的偏移user_buffer_offset。所以Client端向Server端传递数据只是一次拷贝。

    发送端:

t->buffer = binder_alloc_buf(target_proc, tr->data_size,  
    tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));//分配了binder_buffer结构体 
    接收端:

tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;//内核缓冲区的内核空间地址和用户空间地址相差一个固定值,并且保存在它的成员变量user_buffer_offset中 
    

展开阅读全文

没有更多推荐了,返回首页