android binder简单理解二

上一篇中servicemanager打开了/dev/binder,进入for loop,在等待命令wait_event_interruptible(thread->wait, binder_has_thread_work(thread))。 那什么时候才会有命令呢?

servicemanager是负责管理其他servicemanager的,也就是说它会等到其它service把自己通过addservice add给servicmanager,然后client通过servicmanager获取到相应的service。当然service应该是先add再get。所以我们看看add service到底做了些什么:


addservice:

这个addservice是哪里来呢? 不妨以mediaservice为例,网上很多文章都喜欢用它,这里引用用了

大运天成赖搏击

 的博客

http://www.cnblogs.com/linucos/archive/2012/05/24/2516852.html

简单在这里说一下,具体看上面的链接

framework/av/media/mediaserver/main_mediaserver.cpp

 40 int main(int argc, char** argv)
 41 {
 42     signal(SIGPIPE, SIG_IGN);
 43     char value[PROPERTY_VALUE_MAX];
 44     bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
 45     pid_t childPid;
 46     // FIXME The advantage of making the process containing media.log service the parent process of
 47     // the process that contains all the other real services, is that it allows us to collect more
 48     // detailed information such as signal numbers, stop and continue, resource usage, etc.
 49     // But it is also more complex.  Consider replacing this by independent processes, and using
 50     // binder on death notification instead.
 51     if (doLog && (childPid = fork()) != 0) {
........//不进这里
118     } else {
119         // all other services
120         if (doLog) {
121             prctl(PR_SET_PDEATHSIG, SIGKILL);   // if parent media.log dies before me, kill me also
122             setpgid(0, 0);                      // but if I die first, don't kill my parent
123         }
124         sp<ProcessState> proc(ProcessState::self());
125         sp<IServiceManager> sm = defaultServiceManager();
126         ALOGI("ServiceManager: %p", sm.get());
127         AudioFlinger::instantiate();
128         MediaPlayerService::instantiate();
129         CameraService::instantiate();
130         AudioPolicyService::instantiate();
131         registerExtensions();
132         ProcessState::self()->startThreadPool();
133         IPCThreadState::self()->joinThreadPool();
134     }
135 }

sp<ProcessState> proc(ProcessState::self());

这个函数打开了了/dev/binde:  mDriverFD(open_driver())

做了一下mmap: mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE,mDriverFD, 0);
sp<IServiceManager> sm = defaultServiceManager();

gDefaultServiceManager = interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));)

                   (ProcessState::self()->getContextObject(NULL) // 这里self返回

        sp<ProcessState> proc(ProcessState::self());这一行的gProcess
        那么变成gProcess->getContextObject(0);

之后变成 gDefaultServiceManager = interface_cast<IServiceManager>(new BpBinder(0));

            看看new BpBinder(0)

          BpBinder::BpBinder(int32_t handle):mHandle(handle){

                              IPCThreadState::self()->incWeakHandle(handle)

         }

       IPCThreadState::self() 基本上等于new IPCThreadState

      然后new IPCThreadState

       
678 IPCThreadState::IPCThreadState()
 679     : mProcess(ProcessState::self()),
 680       mMyThreadId(androidGetTid()),
 681       mStrictModePolicy(0), 
 682       mLastTransactionBinderFlags(0)
 683 {       
 684     pthread_setspecific(gTLS, this);
 685     clearCaller();
 686     mIn.setDataCapacity(256);//定义了两个buffer用来传送数据
 687     mOut.setDataCapacity(256);
 688 }

再回到了gDefaultServiceManager = interface_cast<IServiceManager>(new BpBinder(0));

interface_cast,这个我觉得时为了兼容多个类而设计的东西,代人

gDefaultServiceManager = IServiceManager::asInterface(new BpBinder(0));

frameworks/native/include/binder/IServiceManager.h

30 class IServiceManager : public IInterface
 31 {
 32 public:
 33     DECLARE_META_INTERFACE(ServiceManager);
 34 
.........
39     virtual sp<IBinder>         getService( const String16& name) const = 0;
49     virtual status_t            addService( const String16& name,
 50                                             const sp<IBinder>& service,
 51                                             bool allowIsolated = false) = 0;
。。。。
}
增加了几个函数,这两个是关键。

frameworks/native/include/binder/IInterface.h

 74 #define DECLARE_META_INTERFACE(INTERFACE)                               \
 75     static const android::String16 descriptor;                          \
 76     static android::sp<I##INTERFACE> asInterface(                       \
 77             const android::sp<android::IBinder>& obj);                  \
 78     virtual const android::String16& getInterfaceDescriptor() const;    \
 79     I##INTERFACE();                                                     \
 80     virtual ~I##INTERFACE();                                            \
 81 

增加了一个名称descriptor这个东西一般时类似这样的android.os.IServiceManager

这之后gDefaultServiceManager = IServiceManager::asInterface(new BpBinder(0))= new BpServiceManager(new BpBinder(0));

这样 sp<IServiceManager> sm = defaultServiceManager(); 返回的实际是BpServiceManager,它的remote对象是BpBinder,传入的那个handle参数是0。现在重新回到MediaService

207 void MediaPlayerService::instantiate() {
 208     defaultServiceManager()->addService(
 209             String16("media.player"), new MediaPlayerService());
 210 }  

这里的defaultServiceManager()就是之前的那个sm

 34 sp<IServiceManager> defaultServiceManager()
 35 {
 36     if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
  .....
}

终于到了addService。

156     virtual status_t addService(const String16& name, const sp<IBinder>& service,
157             bool allowIsolated)
158     {
159         Parcel data, reply;
160         data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());//descriptor = android.os.IServiceManager
161         data.writeString16(name);//name=media.player
162         data.writeStrongBinder(service);//service= MediaPlayerService对象的一个引用,这个很重要下面单独拿出来看看
163         data.writeInt32(allowIsolated ? 1 : 0);
164         status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);//remote=<span style="color:#000000;"><span style="color:#000000;">BpBinder</span></span>
165         return err == NO_ERROR ? reply.readExceptionCode() : err;
166     }
159 status_t BpBinder::transact(
160     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
161 {
162     // Once a binder has died, it will never come back to life.
163     if (mAlive) {
164         status_t status = IPCThreadState::self()->transact(
165             mHandle, code, data, reply, flags);
166         if (status == DEAD_OBJECT) mAlive = 0;
167         return status;
168     }
169 
170     return DEAD_OBJECT;
171 }

这里看看:writeStrongBinder
 707 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
 708 {
 709     return flatten_binder(ProcessState::self(), val, this);
 710 }
flattern_binder有两个函数,差别只是binder参数一个是sp,一个是wp,sp是强指针,wp是弱指针,关于强弱指针可以看
http://blog.csdn.net/luoshengyang/article/details/6786239


status_t flatten_binder(const sp<ProcessState>& proc,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;
    
    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    if (binder != NULL) {
        IBinder *local = binder->localBinder();
BBinder* BBinder::localBinder()
    {
        return this;这里的binder是new MediaPlayerService,所以返回是MediaPlayerService对象,
    }
*/
     if (!local) {
            BpBinder *proxy = local->remoteBinder();
            if (proxy == NULL) {
                LOGE("null proxy");
            }
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type =BINDER_TYPE_HANDLE;
            obj.handle = handle;
            obj.cookie = NULL;
        } else {
            obj.type = BINDER_TYPE_BINDER;;//进这里
            obj.binder = local->getWeakRefs()//这里返回MediaPlayerServiced对象
            obj.cookie = local
        }
    } else {
        obj.type = BINDER_TYPE_BINDER;
        obj.binder = NULL;
        obj.cookie = NULL;
    }
    
    return finish_flatten_binder(binder, obj, out);
}



546 status_t IPCThreadState::transact(int32_t handle,
 547                                   uint32_t code, const Parcel& data,
 548                                   Parcel* reply, uint32_t flags)
 549 {

.......
564         err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
//这个实际上是把相关的命令写到mIn mOut,这两个参数的具体内容是重点,等一下说。
。。。。
572     if ((flags & TF_ONE_WAY) == 0) {
//flag默认是0, frameworks/native/include/binder/BpBinder.h
580         if (reply) {
 581             err = waitForResponse(reply);
 582         }
.。。。
}

896 status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
 897     int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
 898 {
 899     binder_transaction_data tr;
 900 
 901     tr.target.handle = handle;//handle=0
 902     tr.code = code;//code = ADD_SERVICE_TRANSACTION
 903     tr.flags = binderFlags;//flags=0
 904     tr.cookie = 0;
 905     tr.sender_pid = 0;
 906     tr.sender_euid = 0;
 907 
 908     const status_t err = data.errorCheck();//这里的大它是flattern_binder的data
 909     if (err == NO_ERROR) {
 910         tr.data_size = data.ipcDataSize();
 911         tr.data.ptr.buffer = data.ipcData();
 912         tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);
 913         tr.data.ptr.offsets = data.ipcObjects();
 914     } else if (statusBuffer) {
 915         tr.flags |= TF_STATUS_CODE;
 916         *statusBuffer = err;
 917         tr.data_size = sizeof(status_t);
 918         tr.data.ptr.buffer = statusBuffer;
 919         tr.offsets_size = 0;
 920         tr.data.ptr.offsets = NULL;
 921     } else {
 922         return (mLastError = err);
 923     }
 924 
 925     mOut.writeInt32(cmd);//cmd = BC_TRANSACTION
 926     mOut.write(&tr, sizeof(tr));
 927 
 928     return NO_ERROR;
 929 }


waitForResponse(reply);

 704 status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
 705 {       
 706     int32_t cmd;
 707     int32_t err;
 708         
 709     while (1) {
 710         if ((err=talkWithDriver()) < NO_ERROR) break;//终于看到和driver通信的希望了。
 711         err = mIn.errorCheck();
 712         if (err < NO_ERROR) break;
 713         if (mIn.dataAvail() == 0) continue;
 714      .......
}

talkWithDriver(),好戏开场了,看它是怎么talk的:

 795 status_t IPCThreadState::talkWithDriver(bool doReceive)
 796 {       
 797     if (mProcess->mDriverFD <= 0) {
 798         return -EBADF;
 799     }   
 800     
 801     binder_write_read bwr;
 802         
 803     // Is the read buffer empty?
 804     const bool needRead = mIn.dataPosition() >= mIn.dataSize();//mIn没被附值,除了capacity是256,其他都是0
 805         
 806     // We don't want to write anything if we are still reading
 807     // from data left in the input buffer and the caller
 808     // has requested to read the next data.
 809     const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;//outAvail= sizeof(<pre name="code" class="cpp">binder_transaction_data)+sizeof(int)
 810         
 811     bwr.write_size = outAvail;
 812     bwr.write_buffer = (long unsigned int)mOut.data();
 813                 
 814     // This is what we'll read.
 815     if (doReceive && needRead) {
 816         bwr.read_size = mIn.dataCapacity();
 817         bwr.read_buffer = (long unsigned int)mIn.data();
 818     } else {
 819         bwr.read_size = 0;//进这里
 820         bwr.read_buffer = 0;
 821     }
 822 
 823     IF_LOG_COMMANDS() {
 824         TextOutput::Bundle _b(alog);
 825         if (outAvail != 0) {
 826             alog << "Sending commands to driver: " << indent;
 827             const void* cmds = (const void*)bwr.write_buffer;//cmds=BC_TRANSACTION
 828             const void* end = ((const uint8_t*)cmds)+bwr.write_size;
 829             alog << HexDump(cmds, bwr.write_size) << endl;
 830             while (cmds < end) cmds = printCommand(alog, cmds);
 831             alog << dedent;
 832         }
 833         alog << "Size of receive buffer: " << bwr.read_size
 834             << ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
 835     }
 836 
 837     // Return immediately if there is nothing to do.
 838     if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
 839 
 840     bwr.write_consumed = 0;
 841     bwr.read_consumed = 0;
 842     status_t err;
 843     do {
 844         IF_LOG_COMMANDS() {
 845             alog << "About to read/write, write size = " << mOut.dataSize() << endl;
 846         }
 847 #if defined(HAVE_ANDROID_OS)
 848         if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)//进入驱动的ioctl,看下面的ioctl分析
 849             err = NO_ERROR;
 850         else
 851             err = -errno;
 852 #else
 853         err = INVALID_OPERATION;
 854 #endif
 855         if (mProcess->mDriverFD <= 0) {
 856             err = -EBADF;
 857         }
 858         IF_LOG_COMMANDS() {
 859             alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
 860         }
 861     } while (err == -EINTR);
 862 
 863     IF_LOG_COMMANDS() {
 864         alog << "Our err: " << (void*)err << ", write consumed: "
 865             << bwr.write_consumed << " (of " << mOut.dataSize()
 866                         << "), read consumed: " << bwr.read_consumed << endl;
 867     }
 868 
 869     if (err >= NO_ERROR) {
 870         if (bwr.write_consumed > 0) {
 871             if (bwr.write_consumed < (ssize_t)mOut.dataSize())
 872                 mOut.remove(0, bwr.write_consumed);
 873             else
 874                 mOut.setDataSize(0);
 875         }
 876         if (bwr.read_consumed > 0) {
 877             mIn.setDataSize(bwr.read_consumed);
 878             mIn.setDataPosition(0);
 879         }
863     IF_LOG_COMMANDS() {
 864         alog << "Our err: " << (void*)err << ", write consumed: "
 865             << bwr.write_consumed << " (of " << mOut.dataSize()
 866                         << "), read consumed: " << bwr.read_consumed << endl;
 867     }
 868 
 869     if (err >= NO_ERROR) {
 870         if (bwr.write_consumed > 0) {
 871             if (bwr.write_consumed < (ssize_t)mOut.dataSize())
 872                 mOut.remove(0, bwr.write_consumed);
 873             else
 874                 mOut.setDataSize(0);
 875         }
 876         if (bwr.read_consumed > 0) {
 877             mIn.setDataSize(bwr.read_consumed);
 878             mIn.setDataPosition(0);
 879         }
 880         IF_LOG_COMMANDS() {
 881             TextOutput::Bundle _b(alog);
 882             alog << "Remaining data size: " << mOut.dataSize() << endl;
 883             alog << "Received commands from driver: " << indent;
 884             const void* cmds = mIn.data();
 885             const void* end = mIn.data() + mIn.dataSize();
 886             alog << HexDump(cmds, mIn.dataSize()) << endl;
 887             while (cmds < end) cmds = printReturnCommand(alog, cmds);
 888             alog << dedent;
 889         }
 890         return NO_ERROR;
 891     }
 892 
 893     return err;
 894 }

 ioctl,因为上篇说过,所以直接进入binder_thread_write, case: BC_TRANSACTION进入binder_transaction 

1378 static void binder_transaction(struct binder_proc *proc,
1379                    struct binder_thread *thread,
1380                    struct binder_transaction_data *tr, int reply)
1381 {
。。。。
1395     e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);//call_type=1
1396     e->from_proc = proc->pid;//进程id?
1397     e->from_thread = thread->pid;//线程id?
1398     e->target_handle = tr->target.handle;//0
1399     e->data_size = tr->data_size;//tr就是writeTransactionData那里赋值的
1400     e->offsets_size = tr->offsets_size;//1* sizeof(size_t),详细看native/libs/binder/Parcel.cpp的 mObjectsSize
1401 
1402     if (reply) {
......//不进这里
 1445     } else {
1446         if (tr->target.handle) {
    .......//如上面说的,传进来的handle是0,0是默认给servicemanager用的。
1457         } else {
1458             target_node = binder_context_mgr_node;//这个在上一篇有说 binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
1459             if (target_node == NULL) {
1460                 return_error = BR_DEAD_REPLY;
1461                 goto err_no_context_mgr_node;
1462             }
1463         }
1464         e->to_node = target_node->debug_id;
1465         target_proc = target_node->proc;
1466         if (target_proc == NULL) {
1467             return_error = BR_DEAD_REPLY;
1468             goto err_dead_binder;
1469         }
1470         if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
.......//flags=0 thread->transaction_stack=null,这个地方应该是有client和mediaser通信之后才会进来
                 }
 1490     }
1491     if (target_thread) {
1492         e->to_thread = target_thread->pid;
1493         target_list = &target_thread->todo;
1494         target_wait = &target_thread->wait;
1495     } else {
1496         target_list = &target_proc->todo;
1497         target_wait = &target_proc->wait;//这个wait,是上一篇中的wait_event_interruptible(thread->wait, binder_has_thread_work(thread))
1498     }
1499     e->to_proc = target_proc->pid;
1500 
1501     /* TODO: reuse incoming transaction for reply */
1502     t = kzalloc(sizeof(*t), GFP_KERNEL);
1503     if (t == NULL) {
1504         return_error = BR_FAILED_REPLY;
1505         goto err_alloc_t_failed;
1506     }
1507     binder_stats_created(BINDER_STAT_TRANSACTION);// binder_stats.obj_created[type]++;不知道有什么用
1508 
1509     tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1510     if (tcomplete == NULL) {
1511         return_error = BR_FAILED_REPLY;
1512         goto err_alloc_tcomplete_failed;
1513     }
1514     binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);// binder_stats.obj_created[type]++;不知道有什么用
1515 
1516     t->debug_id = ++binder_last_id;
1517     e->debug_id = t->debug_id;
1518 
1536     if (!reply && !(tr->flags & TF_ONE_WAY))
1537         t->from = thread;//当然不会=null啦
1538     else
1539         t->from = NULL;
1540     t->sender_euid = proc->tsk->cred->euid;
1541     t->to_proc = target_proc;
1542     t->to_thread = target_thread;
1543     t->code = tr->code;//ADD_SERVICE_TRANSACTION
1544     t->flags = tr->flags;//0
1545     t->priority = task_nice(current);
1546     t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1547         tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1548     if (t->buffer == NULL) {
1549         return_error = BR_FAILED_REPLY;
1550         goto err_binder_alloc_buf_failed;
1551     }
1552     t->buffer->allow_user_free = 0;
1553     t->buffer->debug_id = t->debug_id;
1554     t->buffer->transaction = t;
1555     t->buffer->target_node = target_node;
1556     if (target_node)
1557         binder_inc_node(target_node, 1, 0, NULL);//加强一下指针,防止被free掉
1558 
1559     offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
1560 
         //下面是把上面talkWithDriver binder_write_read bwr 的值也就是IServiceManager.cpp 
 addService中的data copy到内存空间
1561     if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
1562         binder_user_error("binder: %d:%d got transaction with invalid "
1563             "data ptr\n", proc->pid, thread->pid);
1564         return_error = BR_FAILED_REPLY;
1565         goto err_copy_data_failed;
1566     }
1567     if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
1568         binder_user_error("binder: %d:%d got transaction with invalid "
1569             "offsets ptr\n", proc->pid, thread->pid);
1570         return_error = BR_FAILED_REPLY;
1571         goto err_copy_data_failed;
1572     }
1573     if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
1574         binder_user_error("binder: %d:%d got transaction with "
1575             "invalid offsets size, %zd\n",
1576             proc->pid, thread->pid, tr->offsets_size);
1577         return_error = BR_FAILED_REPLY;
1578         goto err_bad_offset;
1579     }
1580     off_end = (void *)offp + tr->offsets_size;
1581     for (; offp < off_end; offp++) {
1582         struct flat_binder_object *fp;
1583         if (*offp > t->buffer->data_size - sizeof(*fp) ||
1584             t->buffer->data_size < sizeof(*fp) ||
1585             !IS_ALIGNED(*offp, sizeof(void *))) {
1586             binder_user_error("binder: %d:%d got transaction with "
1587                 "invalid offset, %zd\n",
1588                 proc->pid, thread->pid, *offp);
1589             return_error = BR_FAILED_REPLY;
1590             goto err_bad_offset;
1591         }
1592         fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1593         switch (fp->type) {
1594         case BINDER_TYPE_BINDER:
1595         case BINDER_TYPE_WEAK_BINDER: {//根据flattern_binder,type是BINDER_TYPE_BINDER
1596             struct binder_ref *ref;
1597             struct binder_node *node = binder_get_node(proc, fp->binder);//这个node返回null,什么时候进这里node不是null呢? 我猜想是reply时
1598             if (node == NULL) {
1599                 node = binder_new_node(proc, fp->binder, fp->cookie);//binder传进来的是MediaPlayerService的对象的引用,也就是地址。
1600                 if (node == NULL) {
1601                     return_error = BR_FAILED_REPLY;
1602                     goto err_binder_new_node_failed;
1603                 }
1604                 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1605                 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1606             }
1607             if (fp->cookie != node->cookie) {
1608                 binder_user_error("binder: %d:%d sending u%p "
1609                     "node %d, cookie mismatch %p != %p\n",
1610                     proc->pid, thread->pid,
1611                     fp->binder, node->debug_id,
1612                     fp->cookie, node->cookie);
1613                 goto err_binder_get_ref_for_node_failed;
1614             }
1615             ref = binder_get_ref_for_node(target_proc, node);//这里把这个node加到servicemanager的proc,以后client会从servicemanage获取这个东西
和MediaPlayService通信,其实就是获取String16("media.player"), new MediaPlayerService())中New MediaPlayerService的地址
1616             if (ref == NULL) {
1617                 return_error = BR_FAILED_REPLY;
1618                 goto err_binder_get_ref_for_node_failed;
1619             }
1620             if (fp->type == BINDER_TYPE_BINDER)
1621                 fp->type = BINDER_TYPE_HANDLE;
1622             else
1623                 fp->type = BINDER_TYPE_WEAK_HANDLE;
1624             fp->handle = ref->desc;
1625             binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1626                        &thread->todo);
1627 
1628             binder_debug(BINDER_DEBUG_TRANSACTION,
1629                      "        node %d u%p -> ref %d desc %d\n",
1630                      node->debug_id, node->ptr, ref->debug_id,
1631                      ref->desc);
1632         } break;
1633         case BINDER_TYPE_HANDLE:
1634         case BINDER_TYPE_WEAK_HANDLE: {
1635             struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1636             if (ref == NULL) {
1637                 binder_user_error("binder: %d:%d got "
1638                     "transaction with invalid "
1639                     "handle, %ld\n", proc->pid,
1640                     thread->pid, fp->handle);
1641                 return_error = BR_FAILED_REPLY;
1642                 goto err_binder_get_ref_failed;
1643             }
1644             if (ref->node->proc == target_proc) {
1645                 if (fp->type == BINDER_TYPE_HANDLE)
1646                     fp->type = BINDER_TYPE_BINDER;
1647                 else
1648                     fp->type = BINDER_TYPE_WEAK_BINDER;
1649                 fp->binder = ref->node->ptr;
1650                 fp->cookie = ref->node->cookie;
1651                 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1652                 binder_debug(BINDER_DEBUG_TRANSACTION,
1653                          "        ref %d desc %d -> node %d u%p\n",
1654                          ref->debug_id, ref->desc, ref->node->debug_id,
1655                          ref->node->ptr);
1656             } else {
1657                 struct binder_ref *new_ref;
1658                 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1659                 if (new_ref == NULL) {
1660                     return_error = BR_FAILED_REPLY;
1661                     goto err_binder_get_ref_for_node_failed;
1662                 }
1663                 fp->handle = new_ref->desc;
1664                 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1665                 binder_debug(BINDER_DEBUG_TRANSACTION,
1666                          "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1667                          ref->debug_id, ref->desc, new_ref->debug_id,
1668                          new_ref->desc, ref->node->debug_id);
1669             }
1670         } break;
1718     if (reply) {
1719         BUG_ON(t->buffer->async_transaction != 0);
1720         binder_pop_transaction(target_thread, in_reply_to);
1721     } else if (!(t->flags & TF_ONE_WAY)) {
1722         BUG_ON(t->buffer->async_transaction != 0);
1723         t->need_reply = 1;
1724         t->from_parent = thread->transaction_stack;
1725         thread->transaction_stack = t;
1726     } else {
1727         BUG_ON(target_node == NULL);
1728         BUG_ON(t->buffer->async_transaction != 1);
1729         if (target_node->has_async_transaction) {
1730             target_list = &target_node->async_todo;
1731             target_wait = NULL;
1732         } else
1733             target_node->has_async_transaction = 1;
1734     }
1735     t->work.type = BINDER_WORK_TRANSACTION;
1736     list_add_tail(&t->work.entry, target_list);//把t放到target_list中,
1737     tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1738     list_add_tail(&tcomplete->entry, &thread->todo);
1739     if (target_wait)
1740         wake_up_interruptible(target_wait);//唤醒servicemanager
1741     return;
.......
1784 }
1671 
         

在上一篇中servciemanager等在了

2257             ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));

mediaplayerservice进程暂时就不管了,回到了servicemanager进程:


2194 static int binder_thread_read(struct binder_proc *proc,
2195                   struct binder_thread *thread,
2196                   void  __user *buffer, int size,
2197                   signed long *consumed, int non_block)
2198 {

。。。。。。
2257             ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
。。。。。。
2267     while (1) {
2268         uint32_t cmd;
2269         struct binder_transaction_data tr;
2270         struct binder_work *w;
2271         struct binder_transaction *t = NULL;
2272 
2273         if (!list_empty(&thread->todo))//todo,上面target_list = &target_thread->todo,因为这是内核空间而且 binder_context_mgr_node
是全局变量,所以换了一个进程这个todo还是有效,这个就是binder能工作的最主要原因。
 2274             w = list_first_entry(&thread->todo, struct binder_work, entry);
2275         else if (!list_empty(&proc->todo) && wait_for_proc_work)
2276             w = list_first_entry(&proc->todo, struct binder_work, entry);
2277         else {
2278             if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
2279                 goto retry;
2280             break;
2281         }
2282 
2283         if (end - ptr < sizeof(tr) + 4)
2284             break;
2286         switch (w->type) {//t->work.type = BINDER_WORK_TRANSACTION;
2287         case BINDER_WORK_TRANSACTION: {
2288             t = container_of(w, struct binder_transaction, work);
2289         } break; 
。。。。。。。


2404         BUG_ON(t->buffer == NULL);
2405         if (t->buffer->target_node) {
2406             struct binder_node *target_node = t->buffer->target_node;
2407             tr.target.ptr = target_node->ptr;//addService中的data数据copy到了这里。
2408             tr.cookie =  target_node->cookie;
2409             t->saved_priority = task_nice(current);
2410             if (t->priority < target_node->min_priority &&
2411                 !(t->flags & TF_ONE_WAY))
2412                 binder_set_nice(t->priority);
2413             else if (!(t->flags & TF_ONE_WAY) ||
2414                  t->saved_priority > target_node->min_priority)
2415                 binder_set_nice(target_node->min_priority);
2416             cmd = BR_TRANSACTION;//注意这个,在servicemanagr有用
2417         } else {
2418             tr.target.ptr = NULL;
2419             tr.cookie = NULL;
2420             cmd = BR_REPLY;
2421         }
2422         tr.code = t->code;//ADD_SERVICE_TRANSACTION=3
。。。。。
}



oK,现在返回了frameworks/native/cmds/servicemanager/binder.c 

370     for (;;) {
371         bwr.read_size = sizeof(readbuf);
372         bwr.read_consumed = 0;
373         bwr.read_buffer = (unsigned) readbuf;
374 
375         res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//上一篇中,servicemanager等在这里
//现在接着跑
382         res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
。。。。
} 


194 int binder_parse(struct binder_state *bs, struct binder_io *bio,
195                  uint32_t *ptr, uint32_t size, binder_handler func)
196 {           
197     int r = 1;
198     uint32_t *end = ptr + (size / 4);
199         
200     while (ptr < end) {
201         uint32_t cmd = *ptr++;
202 #if TRACE
203         fprintf(stderr,"%s:\n", cmd_name(cmd));
204 #endif  
205         switch(cmd) {//cmd = BR_TRANSACTION;
。。。。
219         case BR_TRANSACTION: {
220             struct binder_txn *txn = (void *) ptr;
221             if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
222                 ALOGE("parse: txn too small!\n");
223                 return -1;
224             }
225             binder_dump_txn(txn);
226             if (func) {
。。。。。//func=svcmgr_handler

27                 unsigned rdata[256/4];
228                 struct binder_io msg;
229                 struct binder_io reply;
230                 int res;
231 
232                 bio_init(&reply, rdata, sizeof(rdata), 4);
233                 bio_init_from_txn(&msg, txn);
234                 res = func(bs, txn, &msg, &reply);
235                 binder_send_reply(bs, &reply, txn->data, res);


236             }
237             ptr += sizeof(*txn) / sizeof(uint32_t);
238             break;


 

203 int svcmgr_handler(struct binder_state *bs,
204                    struct binder_txn *txn,
205                    struct binder_io *msg,
206                    struct binder_io *reply)
207 {
。。。。。
232 
233     switch(txn->code) {


。。。。。
243     case SVC_MGR_ADD_SERVICE:///从上面可以看到 tr.code = t->code;//ADD_SERVICE_TRANSACTION=3
244         s = bio_get_string16(msg, &len);
245         ptr = bio_get_ref(msg);
246         allow_isolated = bio_get_uint32(msg) ? 1 : 0;
247         if (do_add_service(bs, s, len, ptr, txn->sender_euid, allow_isolated))
248             return -1;
249         break;
。。。。
}


155 int do_add_service(struct binder_state *bs,
156                    uint16_t *s, unsigned len,
157                    void *ptr, unsigned uid, int allow_isolated)
158 {
.......

//这个就不看了,就是把service的名称例如media.player和和其对象的指针保存起来。供getservice用
}




  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值