Android Binder分析二:Natvie Service的注册

http://blog.csdn.net/lilian0118/article/details/23997249

这一章我们通过MediaPlayerService的注册来说明如何在Native层通过binder向ServiceManager注册一个service,以及client如何通过binder向ServiceManager获得一个service,并调用这个Service的方法。

Native Service的注册

这里以MediaPlayerService举例来说明如何在Native层注册Service,首先来看main_mediaservice.cpp的main方法:
  1. int main(int argc, char** argv)  
  2. {  
  3.     signal(SIGPIPE, SIG_IGN);  
  4.     char value[PROPERTY_VALUE_MAX];  
  5.     bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);  
  6.       
  7.         if (doLog) {  
  8.             prctl(PR_SET_PDEATHSIG, SIGKILL);   // if parent media.log dies before me, kill me also   
  9.             setpgid(0, 0);                      // but if I die first, don't kill my parent   
  10.         }  
  11.         sp<ProcessState> proc(ProcessState::self());  
  12.         sp<IServiceManager> sm = defaultServiceManager();  
  13.         ALOGI("ServiceManager: %p", sm.get());  
  14.         AudioFlinger::instantiate();  
  15.         MediaPlayerService::instantiate();  
  16.         CameraService::instantiate();  
  17.         AudioPolicyService::instantiate();  
  18.         registerExtensions();  
  19.         ProcessState::self()->startThreadPool();  
  20.         IPCThreadState::self()->joinThreadPool();  
  21. }  
int main(int argc, char** argv)
{
    signal(SIGPIPE, SIG_IGN);
    char value[PROPERTY_VALUE_MAX];
    bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
    
        if (doLog) {
            prctl(PR_SET_PDEATHSIG, SIGKILL);   // if parent media.log dies before me, kill me also
            setpgid(0, 0);                      // but if I die first, don't kill my parent
        }
        sp<ProcessState> proc(ProcessState::self());
        sp<IServiceManager> sm = defaultServiceManager();
        ALOGI("ServiceManager: %p", sm.get());
        AudioFlinger::instantiate();
        MediaPlayerService::instantiate();
        CameraService::instantiate();
        AudioPolicyService::instantiate();
        registerExtensions();
        ProcessState::self()->startThreadPool();
        IPCThreadState::self()->joinThreadPool();
}

这里首先通过ProcessState::self()获得一个ProcessState对象,ProcessState是与进程相关对象,在一个进程中只会存在一个ProcessState对象。首先来看ProcessState::self()和构造函数:

  1. sp<ProcessState> ProcessState::self()  
  2. {  
  3.     Mutex::Autolock _l(gProcessMutex);  
  4.     if (gProcess != NULL) {  
  5.         return gProcess;  
  6.     }  
  7.     gProcess = new ProcessState;  
  8.     return gProcess;  
  9. }  
  10.   
  11. ProcessState::ProcessState()  
  12.     : mDriverFD(open_driver())  
  13.     , mVMStart(MAP_FAILED)  
  14.     , mManagesContexts(false)  
  15.     , mBinderContextCheckFunc(NULL)  
  16.     , mBinderContextUserData(NULL)  
  17.     , mThreadPoolStarted(false)  
  18.     , mThreadPoolSeq(1)  
  19. {  
  20.     if (mDriverFD >= 0) {  
  21.         mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);  
  22.         if (mVMStart == MAP_FAILED) {  
  23.             // *sigh*   
  24.             ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");  
  25.             close(mDriverFD);  
  26.             mDriverFD = -1;  
  27.         }  
  28.     }  
  29. }  
sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != NULL) {
        return gProcess;
    }
    gProcess = new ProcessState;
    return gProcess;
}

ProcessState::ProcessState()
    : mDriverFD(open_driver())
    , mVMStart(MAP_FAILED)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(NULL)
    , mBinderContextUserData(NULL)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
{
    if (mDriverFD >= 0) {
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
        if (mVMStart == MAP_FAILED) {
            // *sigh*
            ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
            close(mDriverFD);
            mDriverFD = -1;
        }
    }
}

gProcess的定义是在Static.cpp文件里面,当main_mediaservice的main函数第一次调用ProcessState::self()方法时,gProcess为空,所以首先会构造一个ProcessState对象。在ProcessState的构造函数中,首先会调用open_driver()方法去打开/dev/binder设备:

  1. static int open_driver()  
  2. {  
  3.     int fd = open("/dev/binder", O_RDWR);  
  4.     if (fd >= 0) {  
  5.         fcntl(fd, F_SETFD, FD_CLOEXEC);  
  6.         int vers;  
  7.         status_t result = ioctl(fd, BINDER_VERSION, &vers);  
  8.         if (result == -1) {  
  9.             ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));  
  10.             close(fd);  
  11.             fd = -1;  
  12.         }  
  13.         if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {  
  14.             ALOGE("Binder driver protocol does not match user space protocol!");  
  15.             close(fd);  
  16.             fd = -1;  
  17.         }  
  18.         size_t maxThreads = 15;  
  19.         result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);  
  20.         if (result == -1) {  
  21.             ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));  
  22.         }  
  23.     } else {  
  24.         ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));  
  25.     }  
  26.     return fd;  
  27. }  
static int open_driver()
{
    int fd = open("/dev/binder", O_RDWR);
    if (fd >= 0) {
        fcntl(fd, F_SETFD, FD_CLOEXEC);
        int vers;
        status_t result = ioctl(fd, BINDER_VERSION, &vers);
        if (result == -1) {
            ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
            close(fd);
            fd = -1;
        }
        if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
            ALOGE("Binder driver protocol does not match user space protocol!");
            close(fd);
            fd = -1;
        }
        size_t maxThreads = 15;
        result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
        if (result == -1) {
            ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
        }
    } else {
        ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
    }
    return fd;
}

打开/dev/binder设备会调用到binder驱动中的binder_open方法,在前面分析ServiceManager中我们已经分析过,这个方法首先会创建一个binder_proc对象,并初始化它的pid和task_struct结构,并把它自己链接到全局的binder_procs链表中。在成功打开/dev/binder设备后,会往binder驱动中通过ioctl发送BINDER_VERSION和BINDER_SET_MAX_THREADS两个命令,我们到binder_ioctl去分析:

  1. static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  
  2. {  
  3.         int ret;  
  4.         struct binder_proc *proc = filp->private_data;  
  5.         struct binder_thread *thread;  
  6.         unsigned int size = _IOC_SIZE(cmd);  
  7.         void __user *ubuf = (void __user *)arg;  
  8.   
  9.         binder_lock(__func__);  
  10.         thread = binder_get_thread(proc);  
  11.         if (thread == NULL) {  
  12.                 ret = -ENOMEM;  
  13.                 goto err;  
  14.         }  
  15.   
  16.         switch (cmd) {  
  17.         case BINDER_SET_MAX_THREADS:  
  18.                 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {  
  19.                         ret = -EINVAL;  
  20.                         goto err;  
  21.                 }  
  22.                 break;  
  23.         case BINDER_VERSION:  
  24.                 if (size != sizeof(struct binder_version)) {  
  25.                         ret = -EINVAL;  
  26.                         goto err;  
  27.                 }  
  28.                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {  
  29.                         ret = -EINVAL;  
  30.                         goto err;  
  31.                 }  
  32.                 break;  
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
        int ret;
        struct binder_proc *proc = filp->private_data;
        struct binder_thread *thread;
        unsigned int size = _IOC_SIZE(cmd);
        void __user *ubuf = (void __user *)arg;

        binder_lock(__func__);
        thread = binder_get_thread(proc);
        if (thread == NULL) {
                ret = -ENOMEM;
                goto err;
        }

        switch (cmd) {
        case BINDER_SET_MAX_THREADS:
                if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
                        ret = -EINVAL;
                        goto err;
                }
                break;
        case BINDER_VERSION:
                if (size != sizeof(struct binder_version)) {
                        ret = -EINVAL;
                        goto err;
                }
                if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
                        ret = -EINVAL;
                        goto err;
                }
                break;

与前面分析ServiceManager一样,这里首先调用binder_get_thread为meidaservcie构造一个binder_thread对象,并把它链接到前面创建的binder_proc数据结构的threads红黑树上。接下来处理BINDER_SET_MAX_THREADS和BINDER_VERSION都比较简单。回到ProcessState的构造函数中,接着会调用mmap方法去为分配实际的物理页面,并为用户空间和内核空间映射内存。

在main_mediaservice.cpp接着会调用defaultServiceManager()获得一个ServiceManager的binder指针,我们在后面再来分析这个方法。接着会分别实例化几个不同的service,我们这里只分析AudioFlinger和MediaPlayerService两个service。AudioFlinger继承于BinderService,如下:

  1. class AudioFlinger :  
  2.     public BinderService<AudioFlinger>,  
  3.     public BnAudioFlinger  
  4. {  
  5.     friend class BinderService<AudioFlinger>;   // for AudioFlinger()   
  6. public:  
  7.     static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }  
class AudioFlinger :
    public BinderService<AudioFlinger>,
    public BnAudioFlinger
{
    friend class BinderService<AudioFlinger>;   // for AudioFlinger()
public:
    static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }

BinderService是一个类模板,并实现了instantiate()方法,如下:

  1. template<typename SERVICE>  
  2. class BinderService  
  3. {  
  4. public:  
  5.     static status_t publish(bool allowIsolated = false) {  
  6.         sp<IServiceManager> sm(defaultServiceManager());  
  7.         return sm->addService(  
  8.                 String16(SERVICE::getServiceName()),  
  9.                 new SERVICE(), allowIsolated);  
  10.     }  
  11.   
  12.     static void publishAndJoinThreadPool(bool allowIsolated = false) {  
  13.         publish(allowIsolated);  
  14.         joinThreadPool();  
  15.     }  
  16.   
  17.     static void instantiate() { publish(); }  
  18.   
  19.     static status_t shutdown() { return NO_ERROR; }  
  20.   
  21. private:  
  22.     static void joinThreadPool() {  
  23.         sp<ProcessState> ps(ProcessState::self());  
  24.         ps->startThreadPool();  
  25.         ps->giveThreadPoolName();  
  26.         IPCThreadState::self()->joinThreadPool();  
  27.     }  
  28. };  
template<typename SERVICE>
class BinderService
{
public:
    static status_t publish(bool allowIsolated = false) {
        sp<IServiceManager> sm(defaultServiceManager());
        return sm->addService(
                String16(SERVICE::getServiceName()),
                new SERVICE(), allowIsolated);
    }

    static void publishAndJoinThreadPool(bool allowIsolated = false) {
        publish(allowIsolated);
        joinThreadPool();
    }

    static void instantiate() { publish(); }

    static status_t shutdown() { return NO_ERROR; }

private:
    static void joinThreadPool() {
        sp<ProcessState> ps(ProcessState::self());
        ps->startThreadPool();
        ps->giveThreadPoolName();
        IPCThreadState::self()->joinThreadPool();
    }
};

instantiate()方法调用publish()函数实现向ServiceManager 注册服务。在介绍defaultServiceManager()函数之前,我们先来看一下刚刚讲到的几个类的关系:




从上图可以看到,IServiceManager是继承于IInterface类,而在IInterface有两个重要的宏定义,DECLARE_META_INTERFACE和IMPLEMENT_META_INTERFACE,这两个宏定义声明和定义了两个比较重要的函数:descriptor和asInterface,后面我们在使用的过程中再来详细解释每个函数。

我们先来看defaultServiceManager()函数:

  1. sp<IServiceManager> defaultServiceManager()  
  2. {  
  3.     if (gDefaultServiceManager != NULL) return gDefaultServiceManager;  
  4.       
  5.     {  
  6.         AutoMutex _l(gDefaultServiceManagerLock);  
  7.         while (gDefaultServiceManager == NULL) {  
  8.             gDefaultServiceManager = interface_cast<IServiceManager>(  
  9.                 ProcessState::self()->getContextObject(NULL));  
  10.             if (gDefaultServiceManager == NULL)  
  11.                 sleep(1);  
  12.         }  
  13.     }  
  14.       
  15.     return gDefaultServiceManager;  
  16. }  
sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
    
    {
        AutoMutex _l(gDefaultServiceManagerLock);
        while (gDefaultServiceManager == NULL) {
            gDefaultServiceManager = interface_cast<IServiceManager>(
                ProcessState::self()->getContextObject(NULL));
            if (gDefaultServiceManager == NULL)
                sleep(1);
        }
    }
    
    return gDefaultServiceManager;
}

和ProcessState一样,这里的gDefaultServiceManager也是定义在Static.cpp中,所以在一个进程中只会存在一份实例。当第一次调用defaultServiceManager()函数时,会调用ProcessState的getContextObject方法去获取一个Bpbinder(首先我们要有个概念,BpBinder就是一个代理binder,BnBinder才是真正实现服务的地方),我们先来看getContextObject的实现:

  1. sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)  
  2. {  
  3.     return getStrongProxyForHandle(0);  
  4. }  
  5.   
  6. sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)  
  7. {  
  8.     sp<IBinder> result;  
  9.   
  10.     AutoMutex _l(mLock);  
  11.   
  12.     handle_entry* e = lookupHandleLocked(handle);  
  13.   
  14.     if (e != NULL) {  
  15.         IBinder* b = e->binder;  
  16.         if (b == NULL || !e->refs->attemptIncWeak(this)) {  
  17.             if (handle == 0) {  
  18.                 Parcel data;  
  19.                 status_t status = IPCThreadState::self()->transact(  
  20.                         0, IBinder::PING_TRANSACTION, data, NULL, 0);  
  21.                 if (status == DEAD_OBJECT)  
  22.                    return NULL;  
  23.             }  
  24.   
  25.             b = new BpBinder(handle);   
  26.             e->binder = b;  
  27.             if (b) e->refs = b->getWeakRefs();  
  28.             result = b;  
  29.         } else {  
  30.             result.force_set(b);  
  31.             e->refs->decWeak(this);  
  32.         }  
  33.     }  
  34.   
  35.     return result;  
  36. }  
  37.   
  38. ProcessState::handle_entry* ProcessState::lookupHandleLocked(int32_t handle)  
  39. {  
  40.     const size_t N=mHandleToObject.size();  
  41.     if (N <= (size_t)handle) {  
  42.         handle_entry e;  
  43.         e.binder = NULL;  
  44.         e.refs = NULL;  
  45.         status_t err = mHandleToObject.insertAt(e, N, handle+1-N);  
  46.         if (err < NO_ERROR) return NULL;  
  47.     }  
  48.     return &mHandleToObject.editItemAt(handle);  
  49. }  
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{
    return getStrongProxyForHandle(0);
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);

    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            if (handle == 0) {
                Parcel data;
                status_t status = IPCThreadState::self()->transact(
                        0, IBinder::PING_TRANSACTION, data, NULL, 0);
                if (status == DEAD_OBJECT)
                   return NULL;
            }

            b = new BpBinder(handle); 
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            result.force_set(b);
            e->refs->decWeak(this);
        }
    }

    return result;
}

ProcessState::handle_entry* ProcessState::lookupHandleLocked(int32_t handle)
{
    const size_t N=mHandleToObject.size();
    if (N <= (size_t)handle) {
        handle_entry e;
        e.binder = NULL;
        e.refs = NULL;
        status_t err = mHandleToObject.insertAt(e, N, handle+1-N);
        if (err < NO_ERROR) return NULL;
    }
    return &mHandleToObject.editItemAt(handle);
}

getContextObject会直接调用getStrongProxyForHandle()方法去获取一个BpBinder,传入的handler id是0,在ServiceManager那章讲过,在binder驱动中ServiceManager的handle值为0,所以这里即是要获得ServiceManager这个BpBinder,我们后面慢慢来分析。接着看getStrongProxyForHandle的实现,先通过lookupHandleLocked(0)去查找在mHandeToObject数组中有没有存在hande等于0的BpBinder,如果不存在就新建一个entry,并把它的binder和refs都设为NULL。回到getStrongProxyForHandle中,因为binder等于NULL并且hande等于0,所以调用IPCThreadState的transact方法来测试ServiceManager是否已经注册或者ServiceManager是否还存活在。关于这里给ServiceManager发送PING_TRANSACTION来检查ServiceManager是否注册的代码,我们后面分析注册Service时一起来分析,先假设这里ServiceManager已经注册到系统中了并且是存活着。接着会创建一个BpBinder(0)并返回。

回到defaultServiceManager()函数,ProcessState::self()->getContextObject(NULL)其实就是返回一个BpBinder(0),然后我们来看interface_cast 的实现,这个模板函数是定义在IIterface.h中:

  1. template<typename INTERFACE>  
  2. inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)  
  3. {  
  4.     return INTERFACE::asInterface(obj);  
  5. }  
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
    return INTERFACE::asInterface(obj);
}
它直接调用ISerivceManager的asInterface方法,asInterface就是我们前面讲到的DECLARE_META_INTERFACE和IMPLEMENT_META_INTERFACE宏定义的三个函数之一,我们先来看这两个宏的定义:

  1. #define DECLARE_META_INTERFACE(INTERFACE)                               \   
  2.     static const android::String16 descriptor;                          \  
  3.     static android::sp<I##INTERFACE> asInterface(                       \  
  4.             const android::sp<android::IBinder>& obj);                  \  
  5.     virtual const android::String16& getInterfaceDescriptor() const;    \  
  6.     I##INTERFACE();                                                     \  
  7.     virtual ~I##INTERFACE();                                            \  
  8.   
  9.   
  10. #define IMPLEMENT_META_INTERFACE(INTERFACE, NAME)                       \   
  11.     const android::String16 I##INTERFACE::descriptor(NAME);             \  
  12.     const android::String16&                                            \  
  13.             I##INTERFACE::getInterfaceDescriptor() const {              \  
  14.         return I##INTERFACE::descriptor;                                \  
  15.     }                                                                   \  
  16.     android::sp<I##INTERFACE> I##INTERFACE::asInterface(                \  
  17.             const android::sp<android::IBinder>& obj)                   \  
  18.     {                                                                   \  
  19.         android::sp<I##INTERFACE> intr;                                 \  
  20.         if (obj != NULL) {                                              \  
  21.             intr = static_cast<I##INTERFACE*>(                          \  
  22.                 obj->queryLocalInterface(                               \  
  23.                         I##INTERFACE::descriptor).get());               \  
  24.             if (intr == NULL) {                                         \  
  25.                 intr = new Bp##INTERFACE(obj);                          \  
  26.             }                                                           \  
  27.         }                                                               \  
  28.         return intr;                                                    \  
  29.     }                                                                   \  
  30.     I##INTERFACE::I##INTERFACE() { }                                    \  
  31.     I##INTERFACE::~I##INTERFACE() { }                                   \  
#define DECLARE_META_INTERFACE(INTERFACE)                               \
    static const android::String16 descriptor;                          \
    static android::sp<I##INTERFACE> asInterface(                       \
            const android::sp<android::IBinder>& obj);                  \
    virtual const android::String16& getInterfaceDescriptor() const;    \
    I##INTERFACE();                                                     \
    virtual ~I##INTERFACE();                                            \


#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME)                       \
    const android::String16 I##INTERFACE::descriptor(NAME);             \
    const android::String16&                                            \
            I##INTERFACE::getInterfaceDescriptor() const {              \
        return I##INTERFACE::descriptor;                                \
    }                                                                   \
    android::sp<I##INTERFACE> I##INTERFACE::asInterface(                \
            const android::sp<android::IBinder>& obj)                   \
    {                                                                   \
        android::sp<I##INTERFACE> intr;                                 \
        if (obj != NULL) {                                              \
            intr = static_cast<I##INTERFACE*>(                          \
                obj->queryLocalInterface(                               \
                        I##INTERFACE::descriptor).get());               \
            if (intr == NULL) {                                         \
                intr = new Bp##INTERFACE(obj);                          \
            }                                                           \
        }                                                               \
        return intr;                                                    \
    }                                                                   \
    I##INTERFACE::I##INTERFACE() { }                                    \
    I##INTERFACE::~I##INTERFACE() { }                                   \

DECLARE_META_INTERFACE宏声明了4个函数,其中包含构造和析构函数;另外包含asInterface和asInterface。这两个宏都带有参数,其中INTERFACE为函数的类名,例如IServiceManager.cpp中,就定义INTERFACE为ServiceManager;NAME为"android.os.IServiceManager",通过宏定义中的"##"将INTERFACE名字前面加上“I",如IServiceManager.cpp中的定义:

  1. DECLARE_META_INTERFACE(ServiceManager);  
  2.   
  3. IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");  
DECLARE_META_INTERFACE(ServiceManager);

IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");

我们将上面的两个宏展开,就可以得到如下的代码:

  1. static const android::String16 descriptor;                            
  2. static android::sp<IServiceManager> asInterface(                         
  3.         const android::sp<android::IBinder>& obj);                    
  4. virtual const android::String16& getInterfaceDescriptor() const;      
  5. IServiceManager();                                                       
  6. virtual ~IServiceManager();                                              
  7.   
  8.                     
  9. const android::String16 IServiceManager::descriptor("android.os.IServiceManager");               
  10. const android::String16&                                              
  11.         IServiceManager::getInterfaceDescriptor() const {                
  12.     return IServiceManager::descriptor;                                  
  13. }                                                                     
  14. android::sp<IServiceManager> IServiceManager::asInterface(                  
  15.         const android::sp<android::IBinder>& obj)                     
  16. {                                                                     
  17.     android::sp<IServiceManager> intr;                                   
  18.     if (obj != NULL) {                                                
  19.         intr = static_cast<IServiceManager*>(                            
  20.             obj->queryLocalInterface(                                 
  21.                     IServiceManager::descriptor).get());                 
  22.         if (intr == NULL) {                                           
  23.             intr = new BpServiceManager(obj);                            
  24.         }                                                             
  25.     }                                                                 
  26.     return intr;                                                      
  27. }                                                                     
  28. IServiceManager::IServiceManager() { }                                      
  29. IServiceManager::~IServiceManager() { }   
    static const android::String16 descriptor;                          
    static android::sp<IServiceManager> asInterface(                       
            const android::sp<android::IBinder>& obj);                  
    virtual const android::String16& getInterfaceDescriptor() const;    
    IServiceManager();                                                     
    virtual ~IServiceManager();                                            

                      
    const android::String16 IServiceManager::descriptor("android.os.IServiceManager");             
    const android::String16&                                            
            IServiceManager::getInterfaceDescriptor() const {              
        return IServiceManager::descriptor;                                
    }                                                                   
    android::sp<IServiceManager> IServiceManager::asInterface(                
            const android::sp<android::IBinder>& obj)                   
    {                                                                   
        android::sp<IServiceManager> intr;                                 
        if (obj != NULL) {                                              
            intr = static_cast<IServiceManager*>(                          
                obj->queryLocalInterface(                               
                        IServiceManager::descriptor).get());               
            if (intr == NULL) {                                         
                intr = new BpServiceManager(obj);                          
            }                                                           
        }                                                               
        return intr;                                                    
    }                                                                   
    IServiceManager::IServiceManager() { }                                    
    IServiceManager::~IServiceManager() { } 

所以在defaultServiceManager()函数调用interface_cast<IServiceManager>(BpBinder(0)),其实就是调用上面的asInterface(BpBinder(0))。在binder.cpp中,我们看到getInterfaceDescriptor()的定义如下:

  1. sp<IInterface>  IBinder::queryLocalInterface(const String16& descriptor)  
  2. {  
  3.     return NULL;  
  4. }  
sp<IInterface>  IBinder::queryLocalInterface(const String16& descriptor)
{
    return NULL;
}

所以前面的defaultServiceManager()可以改写为:

  1. sp<IServiceManager> defaultServiceManager()  
  2. {  
  3.     if (gDefaultServiceManager != NULL) return gDefaultServiceManager;  
  4.       
  5.     {  
  6.         AutoMutex _l(gDefaultServiceManagerLock);  
  7.         while (gDefaultServiceManager == NULL) {  
  8.             gDefaultServiceManager = new BpServiceManager(  
  9.                 BpBinder(0));  
  10.             if (gDefaultServiceManager == NULL)  
  11.                 sleep(1);  
  12.         }  
  13.     }  
  14.       
  15.     return gDefaultServiceManager;  
  16. }  
sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
    
    {
        AutoMutex _l(gDefaultServiceManagerLock);
        while (gDefaultServiceManager == NULL) {
            gDefaultServiceManager = new BpServiceManager(
                BpBinder(0));
            if (gDefaultServiceManager == NULL)
                sleep(1);
        }
    }
    
    return gDefaultServiceManager;
}

至此,gDefaultServiceManager其实就是一个BpServiceManager,先来看一下上面的类图关系:



我们来看BpServiceManager的构造函数:

  1. class BpServiceManager : public BpInterface<IServiceManager>  
  2. {  
  3. public:  
  4.     BpServiceManager(const sp<IBinder>& impl)  
  5.         : BpInterface<IServiceManager>(impl)  
  6.     {  
  7.     }  
  8.   
  9. template<typename INTERFACE>  
  10. inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)  
  11.     : BpRefBase(remote)  
  12. {  
  13. }  
  14.   
  15. BpRefBase::BpRefBase(const sp<IBinder>& o)  
  16.     : mRemote(o.get()), mRefs(NULL), mState(0)  
  17. {  
  18.     extendObjectLifetime(OBJECT_LIFETIME_WEAK);  
  19.   
  20.     if (mRemote) {  
  21.         mRemote->incStrong(this);           // Removed on first IncStrong().   
  22.         mRefs = mRemote->createWeak(this);  // Held for our entire lifetime.   
  23.     }  
  24. }  
class BpServiceManager : public BpInterface<IServiceManager>
{
public:
    BpServiceManager(const sp<IBinder>& impl)
        : BpInterface<IServiceManager>(impl)
    {
    }

template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
    : BpRefBase(remote)
{
}

BpRefBase::BpRefBase(const sp<IBinder>& o)
    : mRemote(o.get()), mRefs(NULL), mState(0)
{
    extendObjectLifetime(OBJECT_LIFETIME_WEAK);

    if (mRemote) {
        mRemote->incStrong(this);           // Removed on first IncStrong().
        mRefs = mRemote->createWeak(this);  // Held for our entire lifetime.
    }
}

通过一系列的调用,最后把BpBinder(0)记录在mRemote变量中,并增加它的强弱指针引用计数。回到BinderService的instantiate()方法,sm即是BpServiceManager(BpBinder(0)),接着调用它的addService方法,将BinderService的publish方法展开如下:

  1. static status_t publish(bool allowIsolated = false) {  
  2.     sp<IServiceManager> sm(defaultServiceManager());  
  3.     return sm->addService(  
  4.             String16("media.audio_flinger"),  
  5.             new AudioFlinger (), false);  
  6. }  
    static status_t publish(bool allowIsolated = false) {
        sp<IServiceManager> sm(defaultServiceManager());
        return sm->addService(
                String16("media.audio_flinger"),
                new AudioFlinger (), false);
    }

接着来看addService的实现:

  1. virtual status_t addService(const String16& name, const sp<IBinder>& service,  
  2.         bool allowIsolated)  
  3. {  
  4.     Parcel data, reply;  
  5.     data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());  
  6.     data.writeString16(name);  
  7.     data.writeStrongBinder(service);  
  8.     data.writeInt32(allowIsolated ? 1 : 0);  
  9.     status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);  
  10.     return err == NO_ERROR ? reply.readExceptionCode() : err;  
  11. }  
    virtual status_t addService(const String16& name, const sp<IBinder>& service,
            bool allowIsolated)
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        data.writeStrongBinder(service);
        data.writeInt32(allowIsolated ? 1 : 0);
        status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
        return err == NO_ERROR ? reply.readExceptionCode() : err;
    }

首先定义两个Parcel对象,一个用于存储发送的数据,一个用于接收response。首先来看writeInterfaceToken()方法,我们知道IServiceManager::getInterfaceDescriptor()会返回"android.os.IServiceManager":

  1. status_t Parcel::writeInterfaceToken(const String16& interface)  
  2. {  
  3.     writeInt32(IPCThreadState::self()->getStrictModePolicy() |  
  4.                STRICT_MODE_PENALTY_GATHER);  
  5.     // currently the interface identification token is just its name as a string   
  6.     return writeString16(interface);  
  7. }  
status_t Parcel::writeInterfaceToken(const String16& interface)
{
    writeInt32(IPCThreadState::self()->getStrictModePolicy() |
               STRICT_MODE_PENALTY_GATHER);
    // currently the interface identification token is just its name as a string
    return writeString16(interface);
}

首先向Parcel中写入strict mode,这个会被binder驱动用于做PRC检验,接着会把"android.os.IServiceManager"和”media.audio_flinger"也写入到Parcel对象中。下面来看一下writeStrongBinder方法,参数是AudioFlinger对象:

  1. status_t Parcel::writeStrongBinder(const sp<IBinder>& val)  
  2. {  
  3.     return flatten_binder(ProcessState::self(), val, this);  
  4. }  
  5.   
  6. status_t flatten_binder(const sp<ProcessState>& proc,  
  7.     const sp<IBinder>& binder, Parcel* out)  
  8. {  
  9.     flat_binder_object obj;  
  10.       
  11.     obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;  
  12.     if (binder != NULL) {  
  13.         IBinder *local = binder->localBinder();  
  14.         if (!local) {  
  15.             BpBinder *proxy = binder->remoteBinder();  
  16.             if (proxy == NULL) {  
  17.                 ALOGE("null proxy");  
  18.             }  
  19.             const int32_t handle = proxy ? proxy->handle() : 0;  
  20.             obj.type = BINDER_TYPE_HANDLE;  
  21.             obj.handle = handle;  
  22.             obj.cookie = NULL;  
  23.         } else {  
  24.             obj.type = BINDER_TYPE_BINDER;  
  25.             obj.binder = local->getWeakRefs();  
  26.             obj.cookie = local;  
  27.         }  
  28.     } else {  
  29.         obj.type = BINDER_TYPE_BINDER;  
  30.         obj.binder = NULL;  
  31.         obj.cookie = NULL;  
  32.     }  
  33.       
  34.     return finish_flatten_binder(binder, obj, out);  
  35. }  
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
    return flatten_binder(ProcessState::self(), val, this);
}

status_t flatten_binder(const sp<ProcessState>& proc,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;
    
    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    if (binder != NULL) {
        IBinder *local = binder->localBinder();
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            if (proxy == NULL) {
                ALOGE("null proxy");
            }
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type = BINDER_TYPE_HANDLE;
            obj.handle = handle;
            obj.cookie = NULL;
        } else {
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = local->getWeakRefs();
            obj.cookie = local;
        }
    } else {
        obj.type = BINDER_TYPE_BINDER;
        obj.binder = NULL;
        obj.cookie = NULL;
    }
    
    return finish_flatten_binder(binder, obj, out);
}

先来看flat_binder_object的结构,定义在binder驱动的binder.h中:

  1. struct flat_binder_object {  
  2.         /* 8 bytes for large_flat_header. */  
  3.         unsigned long           type;  
  4.         unsigned long           flags;  
  5.   
  6.         /* 8 bytes of data. */  
  7.         union {  
  8.                 void            *binder;        /* local object */  
  9.                 signed long     handle;         /* remote object */  
  10.         };  
  11.   
  12.         /* extra data associated with local object */  
  13.         void                    *cookie;  
  14. };  
struct flat_binder_object {
        /* 8 bytes for large_flat_header. */
        unsigned long           type;
        unsigned long           flags;

        /* 8 bytes of data. */
        union {
                void            *binder;        /* local object */
                signed long     handle;         /* remote object */
        };

        /* extra data associated with local object */
        void                    *cookie;
};

flat_binder_object数据结构中,根据type的不同,分别binder和handle保存不同的对象。如果是type是BINDER_TYPE_HANDLE,就表示flat_binder_object存储的是binder驱动中的一个handle id值,所以会把handle id记录在handle中;如果type是BINDER_TYPE_BINDER,表示flat_binder_object存储的是一个binder对象,所以会把binder对象放在binder中。这里的AudioFlinger对象是继承于BBinder,所以它的localBinder不会为空,就将flat_binder_object的binder置为RefBase的mRefs变量,并将cookie置为AudioFlinger本身。接着调用finish_flatten_binder将flat_binder_object写入到Parcel中。来看一下finish_flatten_binder的实现:

  1. inline static status_t finish_flatten_binder(  
  2.     const sp<IBinder>& binder, const flat_binder_object& flat, Parcel* out)  
  3. {  
  4.     return out->writeObject(flat, false);  
  5. }  
  6.   
  7. status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)  
  8. {  
  9.     const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;  
  10.     const bool enoughObjects = mObjectsSize < mObjectsCapacity;  
  11.     if (enoughData && enoughObjects) {  
  12. restart_write:  
  13.         *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;  
  14.           
  15.         // Need to write meta-data?   
  16.         if (nullMetaData || val.binder != NULL) {  
  17.             mObjects[mObjectsSize] = mDataPos;  
  18.             acquire_object(ProcessState::self(), val, this);  
  19.             mObjectsSize++;  
  20.         }  
  21.           
  22.   
  23.         return finishWrite(sizeof(flat_binder_object));  
  24.     }  
  25. }  
inline static status_t finish_flatten_binder(
    const sp<IBinder>& binder, const flat_binder_object& flat, Parcel* out)
{
    return out->writeObject(flat, false);
}

status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
{
    const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
    const bool enoughObjects = mObjectsSize < mObjectsCapacity;
    if (enoughData && enoughObjects) {
restart_write:
        *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
        
        // Need to write meta-data?
        if (nullMetaData || val.binder != NULL) {
            mObjects[mObjectsSize] = mDataPos;
            acquire_object(ProcessState::self(), val, this);
            mObjectsSize++;
        }
        

        return finishWrite(sizeof(flat_binder_object));
    }
}

上面的代码中先检验现在Parcel中分配的数组空间是否足够,如果不足够就去扩大分配的数组;假设这里数组空间大小足够,就将上面的flat_binder_object写入到mData+mDataPos处,并在mObjects中的记录下这个数据结构写入的起始地址。因为Parcel不仅存在整形、string,还存在flat_binder_object数据结构,为了快速的找到所有的binder,这里利用mObjects数组存下写入到Parcel中的所有flat_binder_object数据结构的偏移地址,mObjectSize存写入的flat_binder_object数据结构个数。把上面所有的数据都写入到Parcel中,现在Parcel中的数据结构如下:

Strict Mode0
interface "android.os.IServiceManager"
name”media.audio_flinger"
flat_binder_objecttypeBINDER_TYPE_BINDER
flags0
binderlocal->getWeakRefs
cookielocal



接着调用remote()->transact方法,我们知道这里的remote()返回BpBinder(0),所以这里会调用BpBinder的transact方法:

  1. status_t BpBinder::transact(  
  2.     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)  
  3. {  
  4.     // Once a binder has died, it will never come back to life.   
  5.     if (mAlive) {  
  6.         status_t status = IPCThreadState::self()->transact(  
  7.             mHandle, code, data, reply, flags);  
  8.         if (status == DEAD_OBJECT) mAlive = 0;  
  9.         return status;  
  10.     }  
  11.   
  12.     return DEAD_OBJECT;  
  13. }  
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}

这里会调用IPCThreadState的transact方法,这里的mHandle等于0,表示数据要发往ServiceManager,code是ADD_SERVICE_TRANSACTION,data是上面画出来的Parcel数据。来看IPCThreadState的transact的实现:

  1. status_t IPCThreadState::transact(int32_t handle,  
  2.                                   uint32_t code, const Parcel& data,  
  3.                                   Parcel* reply, uint32_t flags)  
  4. {  
  5.     status_t err = data.errorCheck();  
  6.   
  7.     flags |= TF_ACCEPT_FDS;  
  8.   
  9.       
  10.     if (err == NO_ERROR) {  
  11.         LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),  
  12.             (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");  
  13.         err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);  
  14.     }  
  15.       
  16.     if ((flags & TF_ONE_WAY) == 0) {  
  17.         if (reply) {  
  18.             err = waitForResponse(reply);  
  19.         } else {  
  20.             Parcel fakeReply;  
  21.             err = waitForResponse(&fakeReply);  
  22.         }  
  23.     } else {  
  24.         err = waitForResponse(NULL, NULL);  
  25.     }  
  26.       
  27.     return err;  
  28. }  
status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err = data.errorCheck();

    flags |= TF_ACCEPT_FDS;

    
    if (err == NO_ERROR) {
        LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
            (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }
    
    if ((flags & TF_ONE_WAY) == 0) {
        if (reply) {
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
    } else {
        err = waitForResponse(NULL, NULL);
    }
    
    return err;
}

首先做参数加入,如果参数无误,就调用writeTransactionData将数据写入到IPCThreadState的mOut这个Parcel对象中等待发送出去。在IPCThreadState中有两个Parcel对象,一个是mOut;一个是mIn。分别用于记录发往binder驱动的数据和回收binder写给上层的数据,我们后面分析会看到。先来看writeTransactionData的实现:

  1. status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,  
  2.     int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)  
  3. {  
  4.     binder_transaction_data tr;  
  5.   
  6.     tr.target.handle = handle;  
  7.     tr.code = code;  
  8.     tr.flags = binderFlags;  
  9.     tr.cookie = 0;  
  10.     tr.sender_pid = 0;  
  11.     tr.sender_euid = 0;  
  12.       
  13.     const status_t err = data.errorCheck();  
  14.     if (err == NO_ERROR) {  
  15.         tr.data_size = data.ipcDataSize();  
  16.         tr.data.ptr.buffer = data.ipcData();  
  17.         tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);  
  18.         tr.data.ptr.offsets = data.ipcObjects();  
  19.     } else if (statusBuffer) {  
  20.         tr.flags |= TF_STATUS_CODE;  
  21.         *statusBuffer = err;  
  22.         tr.data_size = sizeof(status_t);  
  23.         tr.data.ptr.buffer = statusBuffer;  
  24.         tr.offsets_size = 0;  
  25.         tr.data.ptr.offsets = NULL;  
  26.     } else {  
  27.         return (mLastError = err);  
  28.     }  
  29.       
  30.     mOut.writeInt32(cmd);  
  31.     mOut.write(&tr, sizeof(tr));  
  32.       
  33.     return NO_ERROR;  
  34. }  
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;

    tr.target.handle = handle;
    tr.code = code;
    tr.flags = binderFlags;
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;
    
    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
        tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);
        tr.data.ptr.offsets = data.ipcObjects();
    } else if (statusBuffer) {
        tr.flags |= TF_STATUS_CODE;
        *statusBuffer = err;
        tr.data_size = sizeof(status_t);
        tr.data.ptr.buffer = statusBuffer;
        tr.offsets_size = 0;
        tr.data.ptr.offsets = NULL;
    } else {
        return (mLastError = err);
    }
    
    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));
    
    return NO_ERROR;
}

首先声明一个binder_transaction_data数据结构,它的定义是在binder驱动的binder.h中:

  1. struct binder_transaction_data {  
  2.         /* The first two are only used for bcTRANSACTION and brTRANSACTION, 
  3.          * identifying the target and contents of the transaction. 
  4.          */  
  5.         union {  
  6.                 size_t  handle; /* target descriptor of command transaction */  
  7.                 void    *ptr;   /* target descriptor of return transaction */  
  8.         } target;  
  9.         void            *cookie;        /* target object cookie */  
  10.         unsigned int    code;           /* transaction command */  
  11.   
  12.         /* General information about the transaction. */  
  13.         unsigned int    flags;  
  14.         pid_t           sender_pid;  
  15.         uid_t           sender_euid;  
  16.         size_t          data_size;      /* number of bytes of data */  
  17.         size_t          offsets_size;   /* number of bytes of offsets */  
  18.   
  19.         /* If this transaction is inline, the data immediately 
  20.          * follows here; otherwise, it ends with a pointer to 
  21.          * the data buffer. 
  22.          */  
  23.         union {  
  24.                 struct {  
  25.                         /* transaction data */  
  26.                         const void      *buffer;  
  27.                         /* offsets from buffer to flat_binder_object structs */  
  28.                         const void      *offsets;  
  29.                 } ptr;  
  30.                 uint8_t buf[8];  
  31.         } data;  
  32. };  
struct binder_transaction_data {
        /* The first two are only used for bcTRANSACTION and brTRANSACTION,
         * identifying the target and contents of the transaction.
         */
        union {
                size_t  handle; /* target descriptor of command transaction */
                void    *ptr;   /* target descriptor of return transaction */
        } target;
        void            *cookie;        /* target object cookie */
        unsigned int    code;           /* transaction command */

        /* General information about the transaction. */
        unsigned int    flags;
        pid_t           sender_pid;
        uid_t           sender_euid;
        size_t          data_size;      /* number of bytes of data */
        size_t          offsets_size;   /* number of bytes of offsets */

        /* If this transaction is inline, the data immediately
         * follows here; otherwise, it ends with a pointer to
         * the data buffer.
         */
        union {
                struct {
                        /* transaction data */
                        const void      *buffer;
                        /* offsets from buffer to flat_binder_object structs */
                        const void      *offsets;
                } ptr;
                uint8_t buf[8];
        } data;
};

binder_transaction_data结构中的target记录着这个数据要发往哪里,如果从用户层发往binder驱动,就设置handle为要发往的那个service在binder驱动中的handle id;如果由kernel发回上层,则设置ptr为要发送的binder的weak refs,cookie设置为binder本身。回到writeTransactionData中,将所有的数据记录在binder_transaction_data的buffer指针;所有的flat_binder_object数据结构偏移地址保存在offsets上;data_size记录整个Pacel对象的大小;offsets_size记录保存的flat_binder_object个数。接着把上面的cmd和binder_transaction_data写入到mOut对象中。这里mOut对象中的数据结构组织如下:

cmdBC_TRANSACTION
binder_transaction_datatarget(handle)0
cookie0
codeADD_SERVICE_TRANSACTION
flags0
sender_pid0
sender_euid0
data_size 
offsets_size 
bufferStrict Mode                       0
interface                           "android.os.IServiceManager"
name                                  ”media.audio_flinger"
flat_binder_object           type     BINDER_TYPE_BINDER
                                           flags      0
                                           binder   local->getWeakRefs
                                           cookie   local
offsets26


回到IPCThreadState的transact方法,接着会调用waitForResponse于binder驱动交互并获取reply结果。TF_ONE_WAY表示这是一个异步消息或者不需要等待回复,这里没有设置。

  1. status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)  
  2. {  
  3.     int32_t cmd;  
  4.     int32_t err;  
  5.   
  6.     while (1) {  
  7.         if ((err=talkWithDriver()) < NO_ERROR) break;  
  8.         err = mIn.errorCheck();  
  9.         if (err < NO_ERROR) break;  
  10.         if (mIn.dataAvail() == 0) continue;  
  11.           
  12.         cmd = mIn.readInt32();  
  13.           
  14.         IF_LOG_COMMANDS() {  
  15.             alog << "Processing waitForResponse Command: "  
  16.                 << getReturnString(cmd) << endl;  
  17.         }  
  18.   
  19.         switch (cmd) {  
  20.         case BR_TRANSACTION_COMPLETE:  
  21.             if (!reply && !acquireResult) goto finish;  
  22.             break;  
  23.           
  24.         case BR_DEAD_REPLY:  
  25.             err = DEAD_OBJECT;  
  26.             goto finish;  
  27.   
  28.         case BR_FAILED_REPLY:  
  29.             err = FAILED_TRANSACTION;  
  30.             goto finish;  
  31.           
  32.         case BR_REPLY:  
  33.             {  
  34.                 binder_transaction_data tr;  
  35.                 err = mIn.read(&tr, sizeof(tr));  
  36.                 ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");  
  37.                 if (err != NO_ERROR) goto finish;  
  38.   
  39.                 if (reply) {  
  40.                     if ((tr.flags & TF_STATUS_CODE) == 0) {  
  41.                         reply->ipcSetDataReference(  
  42.                             reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),  
  43.                             tr.data_size,  
  44.                             reinterpret_cast<const size_t*>(tr.data.ptr.offsets),  
  45.                             tr.offsets_size/sizeof(size_t),  
  46.                             freeBuffer, this);  
  47.                     } else {  
  48.   
  49.                     }  
  50.                 } else {  
  51.   
  52.                 }  
  53.             }  
  54.             goto finish;  
  55.   
  56.         default:  
  57.             err = executeCommand(cmd);  
  58.             if (err != NO_ERROR) goto finish;  
  59.             break;  
  60.         }  
  61.     }  
  62.   
  63. finish:  
  64.     if (err != NO_ERROR) {  
  65.         if (acquireResult) *acquireResult = err;  
  66.         if (reply) reply->setError(err);  
  67.         mLastError = err;  
  68.     }  
  69.       
  70.     return err;  
  71. }  
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        
        cmd = mIn.readInt32();
        
        IF_LOG_COMMANDS() {
            alog << "Processing waitForResponse Command: "
                << getReturnString(cmd) << endl;
        }

        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;
        
        case BR_DEAD_REPLY:
            err = DEAD_OBJECT;
            goto finish;

        case BR_FAILED_REPLY:
            err = FAILED_TRANSACTION;
            goto finish;
        
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t),
                            freeBuffer, this);
                    } else {

                    }
                } else {

                }
            }
            goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }

finish:
    if (err != NO_ERROR) {
        if (acquireResult) *acquireResult = err;
        if (reply) reply->setError(err);
        mLastError = err;
    }
    
    return err;
}

上面的代码中,循环的调用talkWithDriver与binder驱动交互,并获取reply,直至获取到的回复cmd是BR_REPLY或者出错才退出。首先来看talkWithDriver如何把数据发往binder驱动并从binder驱动中获取reply:

  1. status_t IPCThreadState::talkWithDriver(bool doReceive)  
  2. {  
  3.     binder_write_read bwr;  
  4.       
  5.     // Is the read buffer empty?   
  6.     const bool needRead = mIn.dataPosition() >= mIn.dataSize();  
  7.       
  8.     const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;  
  9.       
  10.     bwr.write_size = outAvail;  
  11.     bwr.write_buffer = (long unsigned int)mOut.data();  
  12.   
  13.     // This is what we'll read.   
  14.     if (doReceive && needRead) {  
  15.         bwr.read_size = mIn.dataCapacity();  
  16.         bwr.read_buffer = (long unsigned int)mIn.data();  
  17.     } else {  
  18.         bwr.read_size = 0;  
  19.         bwr.read_buffer = 0;  
  20.     }  
  21.       
  22.     // Return immediately if there is nothing to do.   
  23.     if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;  
  24.   
  25.     bwr.write_consumed = 0;  
  26.     bwr.read_consumed = 0;  
  27.     status_t err;  
  28.     do {  
  29.   
  30.         if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)  
  31.             err = NO_ERROR;  
  32.         else  
  33.             err = -errno;  
  34.               
  35.     } while (err == -EINTR);  
  36.   
  37.     if (err >= NO_ERROR) {  
  38.         if (bwr.write_consumed > 0) {  
  39.             if (bwr.write_consumed < (ssize_t)mOut.dataSize())  
  40.                 mOut.remove(0, bwr.write_consumed);  
  41.             else  
  42.                 mOut.setDataSize(0);  
  43.         }  
  44.         if (bwr.read_consumed > 0) {  
  45.             mIn.setDataSize(bwr.read_consumed);  
  46.             mIn.setDataPosition(0);  
  47.         }  
  48.   
  49.         return NO_ERROR;  
  50.     }  
  51.       
  52.     return err;  
  53. }  
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;
    
    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
    
    bwr.write_size = outAvail;
    bwr.write_buffer = (long unsigned int)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (long unsigned int)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
    
    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {

        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            err = -errno;
            
    } while (err == -EINTR);

    if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < (ssize_t)mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }

        return NO_ERROR;
    }
    
    return err;
}

talkWithDriver首先声明一个binder_write_read数据结构,前面我们已经介绍过这个数据结构了,它是用来和binder驱动交互的数据类型。再判断mIn中是否还有未读出的数据。如果mIn中有未读的数据,并且这是一个同步的请求,需要等待binder的回复,则先将write_size置为0,不往binder驱动中写入数据和读出数据,先处理完mIn中的reply。因为这里是第一次进入到talkWithDriver,所以这里的mIn初始化为空。将write_buffer指向上面的mOut数据,read_buffer指向mIn数据,并设置write_size和read_size。binder驱动会判断write_size和read_size分别执行write和read请求。接着调用ioctl向binder驱动写入数据,这里的write_size和read_size都不为0,mOut中带有BC_TRANSACTION 的命令和binder_transaction_data数据。在binder_ioctl中,先调用binder_thread_write去处理写请求,再调用binder_thread_read处理读请求。先来看binder_thread_write的实现:

  1. int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  
  2.                         void __user *buffer, int size, signed long *consumed)  
  3. {  
  4.         uint32_t cmd;  
  5.         void __user *ptr = buffer + *consumed;  
  6.         void __user *end = buffer + size;  
  7.   
  8.         while (ptr < end && thread->return_error == BR_OK) {  
  9.                 if (get_user(cmd, (uint32_t __user *)ptr))  
  10.                         return -EFAULT;  
  11.                 ptr += sizeof(uint32_t);  
  12.                 trace_binder_command(cmd);  
  13.                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {  
  14.                         binder_stats.bc[_IOC_NR(cmd)]++;  
  15.                         proc->stats.bc[_IOC_NR(cmd)]++;  
  16.                         thread->stats.bc[_IOC_NR(cmd)]++;  
  17.                 }  
  18.                 switch (cmd) {  
  19.   
  20.                 case BC_TRANSACTION:  
  21.                 case BC_REPLY: {  
  22.                         struct binder_transaction_data tr;  
  23.   
  24.                         if (copy_from_user(&tr, ptr, sizeof(tr)))  
  25.                                 return -EFAULT;  
  26.                         ptr += sizeof(tr);  
  27.                         binder_transaction(proc, thread, &tr, cmd == BC_REPLY);  
  28.                         break;  
  29.                 }  
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                        void __user *buffer, int size, signed long *consumed)
{
        uint32_t cmd;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;

        while (ptr < end && thread->return_error == BR_OK) {
                if (get_user(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
                trace_binder_command(cmd);
                if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
                        binder_stats.bc[_IOC_NR(cmd)]++;
                        proc->stats.bc[_IOC_NR(cmd)]++;
                        thread->stats.bc[_IOC_NR(cmd)]++;
                }
                switch (cmd) {

                case BC_TRANSACTION:
                case BC_REPLY: {
                        struct binder_transaction_data tr;

                        if (copy_from_user(&tr, ptr, sizeof(tr)))
                                return -EFAULT;
                        ptr += sizeof(tr);
                        binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
                        break;
                }

处理BC_TRANSACTION和BC_REPLY是在同一个case语句,都是先从buffer中获取到binder_transaction_data数据结构后,这里tr内容是:

binder_transaction_datatarget(handle)0
cookie0
codeADD_SERVICE_TRANSACTION
flags0
sender_pid0
sender_euid0
data_size 
offsets_size 
bufferStrict Mode                       0
interface                           "android.os.IServiceManager"
name                                  ”media.audio_flinger"
flat_binder_object           type     BINDER_TYPE_BINDER
                                           flags      0
                                           binder   local->getWeakRefs
                                           cookie   local
offsets26


调用binder_transaction来处理:

  1. static void binder_transaction(struct binder_proc *proc,  
  2.                                struct binder_thread *thread,  
  3.                                struct binder_transaction_data *tr, int reply)  
  4. {  
  5.         struct binder_transaction *t;  
  6.         struct binder_work *tcomplete;  
  7.         size_t *offp, *off_end;  
  8.         struct binder_proc *target_proc;  
  9.         struct binder_thread *target_thread = NULL;  
  10.         struct binder_node *target_node = NULL;  
  11.         struct list_head *target_list;  
  12.         wait_queue_head_t *target_wait;  
  13.         struct binder_transaction *in_reply_to = NULL;  
  14.         struct binder_transaction_log_entry *e;  
  15.         uint32_t return_error;  
  16.   
  17.         if (reply) {  
  18.   
  19.         } else {  
  20.                 if (tr->target.handle) {  
  21.   
  22.                 } else {  
  23.                         target_node = binder_context_mgr_node;  
  24.                         if (target_node == NULL) {  
  25.   
  26.                         }  
  27.                 }  
  28.                 e->to_node = target_node->debug_id;  
  29.                 target_proc = target_node->proc;  
  30.                 if (target_proc == NULL) {  
  31.   
  32.                 }  
  33.                 if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {  
  34.   
  35.                 }  
  36.                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {  
  37.   
  38.                 }  
  39.         }  
  40.         if (target_thread) {  
  41.   
  42.         } else {  
  43.                 target_list = &target_proc->todo;  
  44.                 target_wait = &target_proc->wait;  
  45.         }  
  46.         e->to_proc = target_proc->pid;  
  47.   
  48.         /* TODO: reuse incoming transaction for reply */  
  49.         t = kzalloc(sizeof(*t), GFP_KERNEL);  
  50.         if (t == NULL) {  
  51.                 return_error = BR_FAILED_REPLY;  
  52.                 goto err_alloc_t_failed;  
  53.         }  
  54.         binder_stats_created(BINDER_STAT_TRANSACTION);  
  55.   
  56.         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);  
  57.         if (tcomplete == NULL) {  
  58.                 return_error = BR_FAILED_REPLY;  
  59.                 goto err_alloc_tcomplete_failed;  
  60.         }  
  61.         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);  
  62.   
  63.         t->debug_id = ++binder_last_id;  
  64.         e->debug_id = t->debug_id;  
  65.   
  66.         if (!reply && !(tr->flags & TF_ONE_WAY))  
  67.                 t->from = thread;  
  68.         else  
  69.                 t->from = NULL;  
  70.         t->sender_euid = proc->tsk->cred->euid;  
  71.         t->to_proc = target_proc;  
  72.         t->to_thread = target_thread;  
  73.         t->code = tr->code;  
  74.         t->flags = tr->flags;  
  75.         t->priority = task_nice(current);  
  76.   
  77.         t->buffer = binder_alloc_buf(target_proc, tr->data_size,  
  78.                 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));  
  79.         if (t->buffer == NULL) {  
  80.   
  81.         }  
  82.         t->buffer->allow_user_free = 0;  
  83.         t->buffer->debug_id = t->debug_id;  
  84.         t->buffer->transaction = t;  
  85.         t->buffer->target_node = target_node;  
  86.         trace_binder_transaction_alloc_buf(t->buffer);  
  87.         if (target_node)  
  88.                 binder_inc_node(target_node, 1, 0, NULL);  
  89.   
  90.         offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));  
  91.   
  92.         if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {  
  93.   
  94.         }  
  95.         if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {  
  96.   
  97.         }  
  98.         if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {  
  99.   
  100.         }  
  101.         off_end = (void *)offp + tr->offsets_size;  
  102.         for (; offp < off_end; offp++) {  
  103.                 struct flat_binder_object *fp;  
  104.                 if (*offp > t->buffer->data_size - sizeof(*fp) ||  
  105.                     t->buffer->data_size < sizeof(*fp) ||  
  106.                     !IS_ALIGNED(*offp, sizeof(void *))) {  
  107.   
  108.                 }  
  109.                 fp = (struct flat_binder_object *)(t->buffer->data + *offp);  
  110.                 switch (fp->type) {  
  111.                 case BINDER_TYPE_BINDER:  
  112.                 case BINDER_TYPE_WEAK_BINDER: {  
  113.                         struct binder_ref *ref;  
  114.                         struct binder_node *node = binder_get_node(proc, fp->binder);  
  115.                         if (node == NULL) {  
  116.                                 node = binder_new_node(proc, fp->binder, fp->cookie);  
  117.                                 if (node == NULL) {  
  118.                                         return_error = BR_FAILED_REPLY;  
  119.                                         goto err_binder_new_node_failed;  
  120.                                 }  
  121.                                 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;  
  122.                                 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);  
  123.                         }  
  124.                         if (fp->cookie != node->cookie) {  
  125.                                 binder_user_error("binder: %d:%d sending u%p "  
  126.                                         "node %d, cookie mismatch %p != %p\n",  
  127.                                         proc->pid, thread->pid,  
  128.                                         fp->binder, node->debug_id,  
  129.                                         fp->cookie, node->cookie);  
  130.                                 goto err_binder_get_ref_for_node_failed;  
  131.                         }  
  132.                         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {  
  133.                                 return_error = BR_FAILED_REPLY;  
  134.                                 goto err_binder_get_ref_for_node_failed;  
  135.                         }  
  136.                         ref = binder_get_ref_for_node(target_proc, node);  
  137.                         if (ref == NULL) {  
  138.                                 return_error = BR_FAILED_REPLY;  
  139.                                 goto err_binder_get_ref_for_node_failed;  
  140.                         }  
  141.                         if (fp->type == BINDER_TYPE_BINDER)  
  142.                                 fp->type = BINDER_TYPE_HANDLE;  
  143.                         else  
  144.                                 fp->type = BINDER_TYPE_WEAK_HANDLE;  
  145.                         fp->handle = ref->desc;  
  146.                         binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,  
  147.                                        &thread->todo);  
  148.   
  149.                 } break;  
  150.                 case BINDER_TYPE_HANDLE:  
  151.                 case BINDER_TYPE_WEAK_HANDLE: {  
  152.                          
  153.                 } break;  
  154.   
  155.                 case BINDER_TYPE_FD: {  
  156.                           
  157.                 } break;  
  158.   
  159.                 default:  
  160.   
  161.                 }  
  162.         }  
  163.         if (reply) {  
  164.                 
  165.         } else if (!(t->flags & TF_ONE_WAY)) {  
  166.                 BUG_ON(t->buffer->async_transaction != 0);  
  167.                 t->need_reply = 1;  
  168.                 t->from_parent = thread->transaction_stack;  
  169.                 thread->transaction_stack = t;  
  170.         } else {  
  171.   
  172.         }  
  173.         t->work.type = BINDER_WORK_TRANSACTION;  
  174.         list_add_tail(&t->work.entry, target_list);  
  175.         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;  
  176.         list_add_tail(&tcomplete->entry, &thread->todo);  
  177.         if (target_wait)  
  178.                 wake_up_interruptible(target_wait);  
  179.         return;  
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply)
{
        struct binder_transaction *t;
        struct binder_work *tcomplete;
        size_t *offp, *off_end;
        struct binder_proc *target_proc;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
        struct list_head *target_list;
        wait_queue_head_t *target_wait;
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
        uint32_t return_error;

        if (reply) {

        } else {
                if (tr->target.handle) {

                } else {
                        target_node = binder_context_mgr_node;
                        if (target_node == NULL) {

                        }
                }
                e->to_node = target_node->debug_id;
                target_proc = target_node->proc;
                if (target_proc == NULL) {

                }
                if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {

                }
                if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {

                }
        }
        if (target_thread) {

        } else {
                target_list = &target_proc->todo;
                target_wait = &target_proc->wait;
        }
        e->to_proc = target_proc->pid;

        /* TODO: reuse incoming transaction for reply */
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        if (t == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_alloc_t_failed;
        }
        binder_stats_created(BINDER_STAT_TRANSACTION);

        tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
        if (tcomplete == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_alloc_tcomplete_failed;
        }
        binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

        t->debug_id = ++binder_last_id;
        e->debug_id = t->debug_id;

        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
        else
                t->from = NULL;
        t->sender_euid = proc->tsk->cred->euid;
        t->to_proc = target_proc;
        t->to_thread = target_thread;
        t->code = tr->code;
        t->flags = tr->flags;
        t->priority = task_nice(current);

        t->buffer = binder_alloc_buf(target_proc, tr->data_size,
                tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
        if (t->buffer == NULL) {

        }
        t->buffer->allow_user_free = 0;
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
        trace_binder_transaction_alloc_buf(t->buffer);
        if (target_node)
                binder_inc_node(target_node, 1, 0, NULL);

        offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));

        if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {

        }
        if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {

        }
        if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {

        }
        off_end = (void *)offp + tr->offsets_size;
        for (; offp < off_end; offp++) {
                struct flat_binder_object *fp;
                if (*offp > t->buffer->data_size - sizeof(*fp) ||
                    t->buffer->data_size < sizeof(*fp) ||
                    !IS_ALIGNED(*offp, sizeof(void *))) {

                }
                fp = (struct flat_binder_object *)(t->buffer->data + *offp);
                switch (fp->type) {
                case BINDER_TYPE_BINDER:
                case BINDER_TYPE_WEAK_BINDER: {
                        struct binder_ref *ref;
                        struct binder_node *node = binder_get_node(proc, fp->binder);
                        if (node == NULL) {
                                node = binder_new_node(proc, fp->binder, fp->cookie);
                                if (node == NULL) {
                                        return_error = BR_FAILED_REPLY;
                                        goto err_binder_new_node_failed;
                                }
                                node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
                                node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
                        }
                        if (fp->cookie != node->cookie) {
                                binder_user_error("binder: %d:%d sending u%p "
                                        "node %d, cookie mismatch %p != %p\n",
                                        proc->pid, thread->pid,
                                        fp->binder, node->debug_id,
                                        fp->cookie, node->cookie);
                                goto err_binder_get_ref_for_node_failed;
                        }
                        if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
                                return_error = BR_FAILED_REPLY;
                                goto err_binder_get_ref_for_node_failed;
                        }
                        ref = binder_get_ref_for_node(target_proc, node);
                        if (ref == NULL) {
                                return_error = BR_FAILED_REPLY;
                                goto err_binder_get_ref_for_node_failed;
                        }
                        if (fp->type == BINDER_TYPE_BINDER)
                                fp->type = BINDER_TYPE_HANDLE;
                        else
                                fp->type = BINDER_TYPE_WEAK_HANDLE;
                        fp->handle = ref->desc;
                        binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                                       &thread->todo);

                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
                       
                } break;

                case BINDER_TYPE_FD: {
                        
                } break;

                default:

                }
        }
        if (reply) {
              
        } else if (!(t->flags & TF_ONE_WAY)) {
                BUG_ON(t->buffer->async_transaction != 0);
                t->need_reply = 1;
                t->from_parent = thread->transaction_stack;
                thread->transaction_stack = t;
        } else {

        }
        t->work.type = BINDER_WORK_TRANSACTION;
        list_add_tail(&t->work.entry, target_list);
        tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
        list_add_tail(&tcomplete->entry, &thread->todo);
        if (target_wait)
                wake_up_interruptible(target_wait);
        return;

首先,传入到binder_transaction的reply参数为false,只有在cmd等于BC_REPLY时才为true;接着看tr->target.handle,因为我们现在要请求ServiceManager为我们服务,hande id肯定是0,从上面的binder_transaction_data第一个数据也可以看到,所以代码中会设置target_node = binder_context_mgr_node,binder_context_mgr_node就是我们在启动ServiceManager构造的。target_proc设置为ServiceManager的上下文信息;因为当前thread(即注册AudioFlinger服务的线程)的transaction_stack为空,所以不会进到if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) 这个if中;因为target_thread也为空,所以会设置target_list和target_wait为ServiceManager的todo和wait列表。接下来就会去分配这次事务的binder_transaction结构,我们先来看binder_transaction数据结构:

  1. struct binder_transaction {  
  2.         int debug_id;  
  3.         struct binder_work work;       //连接binder_proc的todo链表中   
  4.         struct binder_thread *from;    //从哪个thread调用的   
  5.         struct binder_transaction *from_parent;  
  6.         struct binder_proc *to_proc;   //被调用的binder_proc信息   
  7.         struct binder_thread *to_thread;    //被调用的binder_thread   
  8.         struct binder_transaction *to_parent;  //   
  9.         unsigned need_reply:1;  
  10.         /* unsigned is_dead:1; */       /* not used at the moment */  
  11.   
  12.         struct binder_buffer *buffer;   //这次事务的数据   
  13.         unsigned int    code;           //这次事务的cmd类型   
  14.         unsigned int    flags;          //这次事务的flag参数   
  15.         long    priority;   
  16.         long    saved_priority;  
  17.         uid_t   sender_euid;  
  18. };  
struct binder_transaction {
        int debug_id;
        struct binder_work work;       //连接binder_proc的todo链表中
        struct binder_thread *from;    //从哪个thread调用的
        struct binder_transaction *from_parent;
        struct binder_proc *to_proc;   //被调用的binder_proc信息
        struct binder_thread *to_thread;    //被调用的binder_thread
        struct binder_transaction *to_parent;  //
        unsigned need_reply:1;
        /* unsigned is_dead:1; */       /* not used at the moment */

        struct binder_buffer *buffer;   //这次事务的数据
        unsigned int    code;           //这次事务的cmd类型
        unsigned int    flags;          //这次事务的flag参数
        long    priority; 
        long    saved_priority;
        uid_t   sender_euid;
};

因为当前事务是需要等待回复的(没有设置TF_ONE_WAY的flag),ServiceManager处理完这个transaction后,需要通知注册AudioFlinger服务的线程,这里设置t->from = thread。接着调用binder_alloc_buf从free_buffer红黑树中分配内存,并将用户空间的数据拷贝到内核空间。如果在传入的binder_transaction_data数据中有binder类型的数据,接下来就会一个个处理binder数据。因为这次注册AudioFinger服务传入的binder type是BINDER_TYPE_BINDER,我们来看这个case分支。因为是第一次注册AuidoFlinger服务,所以通过binder_get_node查找当前binder_proc中的nodes红黑树,会返回NULL,这里就先调用binder_new_node创建一个binder_node,在前面讲ServiceManager启动过程中已经讲过binder_new_node和binder_node数据结构了。接着调用binder_get_ref_for_node为刚创建的binder_node再创建一个binder_ref对象,binder_ref对象中的desc数据就是后面我们在获取binder中返回的handle id值:

  1. static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,  
  2.                                                   struct binder_node *node)  
  3. {  
  4.         struct rb_node *n;  
  5.         struct rb_node **p = &proc->refs_by_node.rb_node;  
  6.         struct rb_node *parent = NULL;  
  7.         struct binder_ref *ref, *new_ref;  
  8.   
  9.         while (*p) {  
  10.                 parent = *p;  
  11.                 ref = rb_entry(parent, struct binder_ref, rb_node_node);  
  12.   
  13.                 if (node < ref->node)  
  14.                         p = &(*p)->rb_left;  
  15.                 else if (node > ref->node)  
  16.                         p = &(*p)->rb_right;  
  17.                 else  
  18.                         return ref;  
  19.         }  
  20.         new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);  
  21.         if (new_ref == NULL)  
  22.                 return NULL;  
  23.         binder_stats_created(BINDER_STAT_REF);  
  24.         new_ref->debug_id = ++binder_last_id;  
  25.         new_ref->proc = proc;  
  26.         new_ref->node = node;  
  27.         rb_link_node(&new_ref->rb_node_node, parent, p);  
  28.         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);  
  29.   
  30.         new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;  
  31.         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {  
  32.                 ref = rb_entry(n, struct binder_ref, rb_node_desc);  
  33.                 if (ref->desc > new_ref->desc)  
  34.                         break;  
  35.                 new_ref->desc = ref->desc + 1;  
  36.         }  
  37.   
  38.         p = &proc->refs_by_desc.rb_node;  
  39.         while (*p) {  
  40.                 parent = *p;  
  41.                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);  
  42.   
  43.                 if (new_ref->desc < ref->desc)  
  44.                         p = &(*p)->rb_left;  
  45.                 else if (new_ref->desc > ref->desc)  
  46.                         p = &(*p)->rb_right;  
  47.                 else  
  48.                         BUG();  
  49.         }  
  50.         rb_link_node(&new_ref->rb_node_desc, parent, p);  
  51.         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);  
  52.         if (node) {  
  53.                 hlist_add_head(&new_ref->node_entry, &node->refs);  
  54.   
  55.                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,  
  56.                              "binder: %d new ref %d desc %d for "  
  57.                              "node %d\n", proc->pid, new_ref->debug_id,  
  58.                              new_ref->desc, node->debug_id);  
  59.         } else {  
  60.                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,  
  61.                              "binder: %d new ref %d desc %d for "  
  62.                              "dead node\n", proc->pid, new_ref->debug_id,  
  63.                               new_ref->desc);  
  64.         }  
  65.         return new_ref;  
  66. }  
static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
                                                  struct binder_node *node)
{
        struct rb_node *n;
        struct rb_node **p = &proc->refs_by_node.rb_node;
        struct rb_node *parent = NULL;
        struct binder_ref *ref, *new_ref;

        while (*p) {
                parent = *p;
                ref = rb_entry(parent, struct binder_ref, rb_node_node);

                if (node < ref->node)
                        p = &(*p)->rb_left;
                else if (node > ref->node)
                        p = &(*p)->rb_right;
                else
                        return ref;
        }
        new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
        if (new_ref == NULL)
                return NULL;
        binder_stats_created(BINDER_STAT_REF);
        new_ref->debug_id = ++binder_last_id;
        new_ref->proc = proc;
        new_ref->node = node;
        rb_link_node(&new_ref->rb_node_node, parent, p);
        rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);

        new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
        for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
                ref = rb_entry(n, struct binder_ref, rb_node_desc);
                if (ref->desc > new_ref->desc)
                        break;
                new_ref->desc = ref->desc + 1;
        }

        p = &proc->refs_by_desc.rb_node;
        while (*p) {
                parent = *p;
                ref = rb_entry(parent, struct binder_ref, rb_node_desc);

                if (new_ref->desc < ref->desc)
                        p = &(*p)->rb_left;
                else if (new_ref->desc > ref->desc)
                        p = &(*p)->rb_right;
                else
                        BUG();
        }
        rb_link_node(&new_ref->rb_node_desc, parent, p);
        rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
        if (node) {
                hlist_add_head(&new_ref->node_entry, &node->refs);

                binder_debug(BINDER_DEBUG_INTERNAL_REFS,
                             "binder: %d new ref %d desc %d for "
                             "node %d\n", proc->pid, new_ref->debug_id,
                             new_ref->desc, node->debug_id);
        } else {
                binder_debug(BINDER_DEBUG_INTERNAL_REFS,
                             "binder: %d new ref %d desc %d for "
                             "dead node\n", proc->pid, new_ref->debug_id,
                              new_ref->desc);
        }
        return new_ref;
}

首先去target_proc(ServiceManager)中的refs_by_node红黑树中查找有没有node对应的binder_refs对象,如果有则返回,这里会返回空并创建一个新的binder_refs对象,并设置它的一些变量。接着查找refs_by_desc红黑树,为刚创建的binder_refs分配一个独一无二的desc值并把这个binder_refs插入到refs_by_desc红黑树中。

回到binder_transaction中,接下来会把传入的flat_binder_object结构中的type和handle改变,还记得在最开始设置flat_binder_object的type = BINDER_TYPE_BINDER,handle(binder) = binder.getWeakRefs();这里将type改为BINDER_TYPE_HANDLE,将handle设为ref->desc(这里就是一个id值)。所以后面ServiceManager再来处理这个binder_transaction结构时,只能得到在Binder驱动中的desc值,通过这个desc值可以从binder驱动中获取到实际binder_node节点。在处理完这些后,先将刚创建binder_transaction加入到注册AudioFlinger服务的线程的transaction_stack中,表示当中thread有事务正在等待处理。然后将刚创建binder_transaction添加到ServiceManager所在的binder_proc的todo列表中等待处理。并创建一个binder_work结构的tcomplete对象添加到注册AudioFlinger服务的线程的binder_proc的todo列表中,表示事务已经发送完成了。最后调用wake_up_interruptible去唤醒ServiceManager。

当处理完binder_thread_write后,就会调用binder_thread_read来处理读请求了,首先来看binder_thread_read的实现:

  1. static int binder_thread_read(struct binder_proc *proc,  
  2.                               struct binder_thread *thread,  
  3.                               void  __user *buffer, int size,  
  4.                               signed long *consumed, int non_block)  
  5. {  
  6.         void __user *ptr = buffer + *consumed;  
  7.         void __user *end = buffer + size;  
  8.   
  9.         int ret = 0;  
  10.         int wait_for_proc_work;  
  11.   
  12.         if (*consumed == 0) {  
  13.                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))  
  14.                         return -EFAULT;  
  15.                 ptr += sizeof(uint32_t);  
  16.         }  
  17.   
  18. retry:  
  19.         wait_for_proc_work = thread->transaction_stack == NULL &&  
  20.                                 list_empty(&thread->todo);  
  21.   
  22.   
  23.         thread->looper |= BINDER_LOOPER_STATE_WAITING;  
  24.         if (wait_for_proc_work)  
  25.                 proc->ready_threads++;  
  26.   
  27.         binder_unlock(__func__);  
  28.   
  29.         if (wait_for_proc_work) {  
  30.   
  31.         } else {  
  32.                 if (non_block) {  
  33.   
  34.                 } else  
  35.                         ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));  
  36.         }  
  37.   
  38.         binder_lock(__func__);  
  39.   
  40.         if (wait_for_proc_work)  
  41.                 proc->ready_threads--;  
  42.         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;  
  43.   
  44.         if (ret)  
  45.                 return ret;  
  46.   
  47.         while (1) {  
  48.                 uint32_t cmd;  
  49.                 struct binder_transaction_data tr;  
  50.                 struct binder_work *w;  
  51.                 struct binder_transaction *t = NULL;  
  52.   
  53.                 if (!list_empty(&thread->todo))  
  54.                         w = list_first_entry(&thread->todo, struct binder_work, entry);  
  55.                 else if (!list_empty(&proc->todo) && wait_for_proc_work)  
  56.                         w = list_first_entry(&proc->todo, struct binder_work, entry);  
  57.                 else {                                                                                                                                                                                                                                             if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */  
  58.                                 goto retry;  
  59.                         break;  
  60.   
  61.                 }  
  62.   
  63.                 if (end - ptr < sizeof(tr) + 4)  
  64.                         break;  
  65.   
  66.                 switch (w->type) {  
  67.                 case BINDER_WORK_TRANSACTION: {  
  68.                         t = container_of(w, struct binder_transaction, work);  
  69.                 } break;  
  70.                 case BINDER_WORK_TRANSACTION_COMPLETE: {  
  71.                         cmd = BR_TRANSACTION_COMPLETE;  
  72.                         if (put_user(cmd, (uint32_t __user *)ptr))  
  73.                                 return -EFAULT;  
  74.                         ptr += sizeof(uint32_t);  
  75.   
  76.                         binder_stat_br(proc, thread, cmd);  
  77.                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,  
  78.                                      "binder: %d:%d BR_TRANSACTION_COMPLETE\n",  
  79.                                      proc->pid, thread->pid);  
  80.   
  81.                         list_del(&w->entry);  
  82.                         kfree(w);  
  83.                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);  
  84.                 } break;  
  85.                 case BINDER_WORK_NODE: {  
  86.      
  87.                 } break;  
  88.                 }  
  89.   
  90.                 if (!t)  
  91.                         continue;  
  92.   
  93.         }  
  94.   
  95. done:  
  96.   
  97.         *consumed = ptr - buffer;  
  98.         return 0;  
  99. }  
static int binder_thread_read(struct binder_proc *proc,
                              struct binder_thread *thread,
                              void  __user *buffer, int size,
                              signed long *consumed, int non_block)
{
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;

        int ret = 0;
        int wait_for_proc_work;

        if (*consumed == 0) {
                if (put_user(BR_NOOP, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
        }

retry:
        wait_for_proc_work = thread->transaction_stack == NULL &&
                                list_empty(&thread->todo);


        thread->looper |= BINDER_LOOPER_STATE_WAITING;
        if (wait_for_proc_work)
                proc->ready_threads++;

        binder_unlock(__func__);

        if (wait_for_proc_work) {

        } else {
                if (non_block) {

                } else
                        ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
        }

        binder_lock(__func__);

        if (wait_for_proc_work)
                proc->ready_threads--;
        thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

        if (ret)
                return ret;

        while (1) {
                uint32_t cmd;
                struct binder_transaction_data tr;
                struct binder_work *w;
                struct binder_transaction *t = NULL;

                if (!list_empty(&thread->todo))
                        w = list_first_entry(&thread->todo, struct binder_work, entry);
                else if (!list_empty(&proc->todo) && wait_for_proc_work)
                        w = list_first_entry(&proc->todo, struct binder_work, entry);
                else {                                                                                                                                                                                                                                             if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
                                goto retry;
                        break;

                }

                if (end - ptr < sizeof(tr) + 4)
                        break;

                switch (w->type) {
                case BINDER_WORK_TRANSACTION: {
                        t = container_of(w, struct binder_transaction, work);
                } break;
                case BINDER_WORK_TRANSACTION_COMPLETE: {
                        cmd = BR_TRANSACTION_COMPLETE;
                        if (put_user(cmd, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);

                        binder_stat_br(proc, thread, cmd);
                        binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
                                     "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
                                     proc->pid, thread->pid);

                        list_del(&w->entry);
                        kfree(w);
                        binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
                } break;
                case BINDER_WORK_NODE: {
   
                } break;
                }

                if (!t)
                        continue;

        }

done:

        *consumed = ptr - buffer;
        return 0;
}

binder_thread_read首先向user space写入BR_NOOP命令。因为此时的transaction_stack和todo链表都不为空,所以wait_for_proc_work为false,并且wait_event_freezable会直接返回。接下来从todo链表中取出头一个元素即tcomplete对象。前面看到tcomplete对象的type是BINDER_WORK_TRANSACTION_COMPLETE,处理它的case只是很简单的往usespace的buffer写入一个BR_TRANSACTION_COMPLETE命令,并将这个tcomplete对象从todo链表中删除。因为tcomplete对象没有要处理的binder_transaction数据结构,也就是上面的t是空,会继续while循环,最终在else处跳出整个大循环。

回到talkWithDriver函数中,通过ioctrl执行完命令后并返回0:,看接下来的处理流程:

  1. if (err >= NO_ERROR) {  
  2.     if (bwr.write_consumed > 0) {  
  3.         if (bwr.write_consumed < (ssize_t)mOut.dataSize())  
  4.             mOut.remove(0, bwr.write_consumed);  
  5.         else  
  6.             mOut.setDataSize(0);  
  7.     }  
  8.     if (bwr.read_consumed > 0) {  
  9.         mIn.setDataSize(bwr.read_consumed);  
  10.         mIn.setDataPosition(0);  
  11.     }  
  12.     return NO_ERROR;  
  13. }  
    if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < (ssize_t)mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        return NO_ERROR;
    }

上面的write_consumed会在binder_thread_write被置为0,而read_consumed会在binder_thread_read被置为8(因为有BR_NOOP和BR_TRANSACTION_COMPLETE两个命令)。所以上面的代码中首先将mOut的大小设置为0,并将mIn设置为8。回到waitForResponse函数中,首先从mIn中读出BR_NOOP命令,这个命令什么也不做。然后waitForResponse接着调用talkWithDriver,这次进入到talkWithDriver时mOut中还有个命令没有处理,所有设置write_size和read_size都为0,通过ioctrl发送到bindr驱动后,binder驱动什么也不做,直接返回。接着再从mOut中读出BR_TRANSACTION_COMPLETE命令,BR_TRANSACTION_COMPLETE也是什么都不做,waitForResponse再调用talkWithDriver函数等待ServiceManager执行后ADD_SEVICE的返回。这时通过ioctrl发送到binder驱动的binder_write_read对象的write_size为0,read_size不为0。所以调用binder_thread_read函数去处理读指令,又因为当前thread的transaction_stack不为空,所以最后调用wait_event_freezable(thread->wait, binder_has_thread_work(thread))等待。


回到ServieManager中,它会在wait_event_freezable_exclusive中等待客户端的请求,首先来看前面分析过的代码:

  1. static int binder_thread_read(struct binder_proc *proc,  
  2.                               struct binder_thread *thread,  
  3.                               void  __user *buffer, int size,  
  4.                               signed long *consumed, int non_block)  
  5. {  
  6.         void __user *ptr = buffer + *consumed;  
  7.         void __user *end = buffer + size;  
  8.   
  9.         int ret = 0;  
  10.         int wait_for_proc_work;  
  11.   
  12.         if (*consumed == 0) {  
  13.                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))  
  14.                         return -EFAULT;  
  15.                 ptr += sizeof(uint32_t);  
  16.         }  
  17.   
  18. retry:  
  19.         wait_for_proc_work = thread->transaction_stack == NULL &&  
  20.                                 list_empty(&thread->todo);  
  21.   
  22.         if (thread->return_error != BR_OK && ptr < end) {  
  23.   
  24.         }  
  25.   
  26.   
  27.         thread->looper |= BINDER_LOOPER_STATE_WAITING;  
  28.         if (wait_for_proc_work)  
  29.                 proc->ready_threads++;  
  30.   
  31.         binder_unlock(__func__);  
  32.   
  33.         trace_binder_wait_for_work(wait_for_proc_work,  
  34.                                    !!thread->transaction_stack,  
  35.                                    !list_empty(&thread->todo));  
  36.         if (wait_for_proc_work) {  
  37.   
  38.                 binder_set_nice(proc->default_priority);  
  39.                 if (non_block) {  
  40.   
  41.                 } else  
  42.                         ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));  
  43.         } else {  
  44.   
  45.         }  
  46.   
  47.         binder_lock(__func__);  
  48.   
  49.         if (wait_for_proc_work)  
  50.                 proc->ready_threads--;  
  51.         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;  
  52.   
  53.         if (ret)  
  54.                 return ret;  
  55.   
  56.         while (1) {  
  57.                 uint32_t cmd;  
  58.                 struct binder_transaction_data tr;  
  59.                 struct binder_work *w;  
  60.                 struct binder_transaction *t = NULL;  
  61.   
  62.                 if (!list_empty(&thread->todo))  
  63.                         w = list_first_entry(&thread->todo, struct binder_work, entry);  
  64.                 else if (!list_empty(&proc->todo) && wait_for_proc_work)  
  65.                         w = list_first_entry(&proc->todo, struct binder_work, entry);  
  66.                 else {  
  67.                         if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */  
  68.                                 goto retry;  
  69.                         break;  
  70.                 }  
  71.   
  72.                 if (end - ptr < sizeof(tr) + 4)  
  73.                         break;  
  74.   
  75.                 switch (w->type) {  
  76.                 case BINDER_WORK_TRANSACTION: {  
  77.                         t = container_of(w, struct binder_transaction, work);  
  78.                 } break;  
  79.                 }  
  80.   
  81.                 if (!t)  
  82.                         continue;  
  83.   
  84.                 BUG_ON(t->buffer == NULL);  
  85.                 if (t->buffer->target_node) {  
  86.                         struct binder_node *target_node = t->buffer->target_node;  
  87.                         tr.target.ptr = target_node->ptr;  
  88.                         tr.cookie =  target_node->cookie;  
  89.                         t->saved_priority = task_nice(current);  
  90.                         if (t->priority < target_node->min_priority &&  
  91.                             !(t->flags & TF_ONE_WAY))  
  92.                                 binder_set_nice(t->priority);  
  93.                         else if (!(t->flags & TF_ONE_WAY) ||  
  94.                                  t->saved_priority > target_node->min_priority)  
  95.                                 binder_set_nice(target_node->min_priority);  
  96.                         cmd = BR_TRANSACTION;  
  97.                 } else {  
  98.   
  99.                 }  
  100.                 tr.code = t->code;  
  101.                 tr.flags = t->flags;  
  102.                 tr.sender_euid = t->sender_euid;  
  103.   
  104.                 if (t->from) {  
  105.                         struct task_struct *sender = t->from->proc->tsk;  
  106.                         tr.sender_pid = task_tgid_nr_ns(sender,  
  107.                                                         current->nsproxy->pid_ns);  
  108.                 } else {  
  109.   
  110.                 }  
  111.   
  112.                 tr.data_size = t->buffer->data_size;  
  113.                 tr.offsets_size = t->buffer->offsets_size;  
  114.                 tr.data.ptr.buffer = (void *)t->buffer->data +  
  115.                                         proc->user_buffer_offset;  
  116.                 tr.data.ptr.offsets = tr.data.ptr.buffer +  
  117.                                         ALIGN(t->buffer->data_size,  
  118.                                             sizeof(void *));  
  119.   
  120.                 if (put_user(cmd, (uint32_t __user *)ptr))  
  121.                         return -EFAULT;  
  122.                 ptr += sizeof(uint32_t);  
  123.                 if (copy_to_user(ptr, &tr, sizeof(tr)))  
  124.                         return -EFAULT;  
  125.                 ptr += sizeof(tr);  
  126.   
  127.   
  128.                 list_del(&t->work.entry);  
  129.                 t->buffer->allow_user_free = 1;  
  130.                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {  
  131.                         t->to_parent = thread->transaction_stack;  
  132.                         t->to_thread = thread;  
  133.                         thread->transaction_stack = t;  
  134.                 } else {  
  135.   
  136.                 }  
  137.                 break;  
  138.         }  
  139.   
  140. done:  
  141.   
  142.         *consumed = ptr - buffer;  
  143.         if (proc->requested_threads + proc->ready_threads == 0 &&  
  144.             proc->requested_threads_started < proc->max_threads &&  
  145.             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |  
  146.              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */  
  147.              /*spawn a new thread if we leave this out */) {  
  148.                 proc->requested_threads++;  
  149.                 binder_debug(BINDER_DEBUG_THREADS,  
  150.                              "binder: %d:%d BR_SPAWN_LOOPER\n",  
  151.                              proc->pid, thread->pid);  
  152.                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))  
  153.                         return -EFAULT;  
  154.                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);  
  155.         }  
  156.         return 0;  
  157. }  
static int binder_thread_read(struct binder_proc *proc,
                              struct binder_thread *thread,
                              void  __user *buffer, int size,
                              signed long *consumed, int non_block)
{
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;

        int ret = 0;
        int wait_for_proc_work;

        if (*consumed == 0) {
                if (put_user(BR_NOOP, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
        }

retry:
        wait_for_proc_work = thread->transaction_stack == NULL &&
                                list_empty(&thread->todo);

        if (thread->return_error != BR_OK && ptr < end) {

        }


        thread->looper |= BINDER_LOOPER_STATE_WAITING;
        if (wait_for_proc_work)
                proc->ready_threads++;

        binder_unlock(__func__);

        trace_binder_wait_for_work(wait_for_proc_work,
                                   !!thread->transaction_stack,
                                   !list_empty(&thread->todo));
        if (wait_for_proc_work) {

                binder_set_nice(proc->default_priority);
                if (non_block) {

                } else
                        ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
        } else {

        }

        binder_lock(__func__);

        if (wait_for_proc_work)
                proc->ready_threads--;
        thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

        if (ret)
                return ret;

        while (1) {
                uint32_t cmd;
                struct binder_transaction_data tr;
                struct binder_work *w;
                struct binder_transaction *t = NULL;

                if (!list_empty(&thread->todo))
                        w = list_first_entry(&thread->todo, struct binder_work, entry);
                else if (!list_empty(&proc->todo) && wait_for_proc_work)
                        w = list_first_entry(&proc->todo, struct binder_work, entry);
                else {
                        if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
                                goto retry;
                        break;
                }

                if (end - ptr < sizeof(tr) + 4)
                        break;

                switch (w->type) {
                case BINDER_WORK_TRANSACTION: {
                        t = container_of(w, struct binder_transaction, work);
                } break;
                }

                if (!t)
                        continue;

                BUG_ON(t->buffer == NULL);
                if (t->buffer->target_node) {
                        struct binder_node *target_node = t->buffer->target_node;
                        tr.target.ptr = target_node->ptr;
                        tr.cookie =  target_node->cookie;
                        t->saved_priority = task_nice(current);
                        if (t->priority < target_node->min_priority &&
                            !(t->flags & TF_ONE_WAY))
                                binder_set_nice(t->priority);
                        else if (!(t->flags & TF_ONE_WAY) ||
                                 t->saved_priority > target_node->min_priority)
                                binder_set_nice(target_node->min_priority);
                        cmd = BR_TRANSACTION;
                } else {

                }
                tr.code = t->code;
                tr.flags = t->flags;
                tr.sender_euid = t->sender_euid;

                if (t->from) {
                        struct task_struct *sender = t->from->proc->tsk;
                        tr.sender_pid = task_tgid_nr_ns(sender,
                                                        current->nsproxy->pid_ns);
                } else {

                }

                tr.data_size = t->buffer->data_size;
                tr.offsets_size = t->buffer->offsets_size;
                tr.data.ptr.buffer = (void *)t->buffer->data +
                                        proc->user_buffer_offset;
                tr.data.ptr.offsets = tr.data.ptr.buffer +
                                        ALIGN(t->buffer->data_size,
                                            sizeof(void *));

                if (put_user(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
                if (copy_to_user(ptr, &tr, sizeof(tr)))
                        return -EFAULT;
                ptr += sizeof(tr);


                list_del(&t->work.entry);
                t->buffer->allow_user_free = 1;
                if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
                        t->to_parent = thread->transaction_stack;
                        t->to_thread = thread;
                        thread->transaction_stack = t;
                } else {

                }
                break;
        }

done:

        *consumed = ptr - buffer;
        if (proc->requested_threads + proc->ready_threads == 0 &&
            proc->requested_threads_started < proc->max_threads &&
            (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
             BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
             /*spawn a new thread if we leave this out */) {
                proc->requested_threads++;
                binder_debug(BINDER_DEBUG_THREADS,
                             "binder: %d:%d BR_SPAWN_LOOPER\n",
                             proc->pid, thread->pid);
                if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
                        return -EFAULT;
                binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
        }
        return 0;
}

首先从todo链表中取出前面添加的binder_transaction数据结构,t->buffer->target_node即是binder_context_mgr_node。binder_transaction_data结构我们在前面讲过,并复制binder_transaction结构中的target_node、code、flags、sender_euid、data_size、offsets_size、buffer和offsets到binder_transaction_data结构中。另外,因为注册AudioFlinger的线程需要等待ServiceManager的回复,所以在sender_pid记录注册线程的pid号。然后拷贝BR_TRANSACTION命令和binder_transaction_data结构到用户空间,并将binder_transaction数据结构从todo链表中删除。因为cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)为true,表示ServieManager 在处理完这个事务后需要给binder驱动回复,所以这里先将binder_transaction数据结构放在transaction_stack链表中。在done这个标志处,计算是否需要让Service去创建线程来处理事务,以后再来分析这一点。

回到ServiceManager的binder_loop处,返回到用户层的数据如下:

cmdBR_TRANSACTION
binder_transaction_datatarget(ptr)binder_context_mgr_node.local().getWeakRefs()
cookiebinder_context_mgr_node.local()
codeADD_SERVICE_TRANSACTION
flags0
sender_pid注册AudioFlinger的线程的pid
sender_euid0
data_size 
offsets_size 
bufferStrict Mode                       0
interface                           "android.os.IServiceManager"
name                                  ”media.audio_flinger"
flat_binder_object          type     BINDER_TYPE_HANDLE
                                           flags      0
                                           handle  ref->desc
                                           cookie   local
offsets26



首先调用binder_parse去解析收到的指令:

  1. int binder_parse(struct binder_state *bs, struct binder_io *bio,  
  2.                  uint32_t *ptr, uint32_t size, binder_handler func)  
  3. {  
  4.     int r = 1;  
  5.     uint32_t *end = ptr + (size / 4);  
  6.   
  7.     while (ptr < end) {  
  8.         uint32_t cmd = *ptr++;  
  9.         switch(cmd) {  
  10.         case BR_TRANSACTION: {  
  11.             struct binder_txn *txn = (void *) ptr;  
  12.             if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {  
  13.                 ALOGE("parse: txn too small!\n");  
  14.                 return -1;  
  15.             }  
  16.             binder_dump_txn(txn);  
  17.             if (func) {  
  18.                 unsigned rdata[256/4];  
  19.                 struct binder_io msg;  
  20.                 struct binder_io reply;  
  21.                 int res;  
  22.   
  23.                 bio_init(&reply, rdata, sizeof(rdata), 4);  
  24.                 bio_init_from_txn(&msg, txn);  
  25.                 res = func(bs, txn, &msg, &reply);  
  26.                 binder_send_reply(bs, &reply, txn->data, res);  
  27.             }  
  28.             ptr += sizeof(*txn) / sizeof(uint32_t);  
  29.             break;  
  30.         }  
  31.         default:  
  32.             ALOGE("parse: OOPS %d\n", cmd);  
  33.             return -1;  
  34.         }  
  35.     }  
  36.   
  37.     return r;  
  38. }  
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uint32_t *ptr, uint32_t size, binder_handler func)
{
    int r = 1;
    uint32_t *end = ptr + (size / 4);

    while (ptr < end) {
        uint32_t cmd = *ptr++;
        switch(cmd) {
        case BR_TRANSACTION: {
            struct binder_txn *txn = (void *) ptr;
            if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);
                binder_send_reply(bs, &reply, txn->data, res);
            }
            ptr += sizeof(*txn) / sizeof(uint32_t);
            break;
        }
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}

这里的cmd前面设置的BR_TRANSACTION,然后txn执行上面的binder_transaction_data结构,来看一下binder_txn的结构,它是和binder_transaction_data数据结构完全一一对应的。

  1. struct binder_txn  
  2. {  
  3.     void *target;  
  4.     void *cookie;  
  5.     uint32_t code;  
  6.     uint32_t flags;  
  7.   
  8.     uint32_t sender_pid;  
  9.     uint32_t sender_euid;  
  10.   
  11.     uint32_t data_size;  
  12.     uint32_t offs_size;  
  13.     void *data;  
  14.     void *offs;  
  15. };  
struct binder_txn
{
    void *target;
    void *cookie;
    uint32_t code;
    uint32_t flags;

    uint32_t sender_pid;
    uint32_t sender_euid;

    uint32_t data_size;
    uint32_t offs_size;
    void *data;
    void *offs;
};

这里首先调用bio_init和bio_init_from_txn去初始化msg和reply两个binder_io数据结构,调用bio_init_from_txn后,然后msg的数据内容如下:

dataStrict Mode                       0
interface                           "android.os.IServiceManager"
name                                  ”media.audio_flinger"
flat_binder_object          type     BINDER_TYPE_HANDLE
                                           flags      0
                                           handle  ref->desc
                                           cookie   local
offs26
data_availdata_size
offs_availoffs_size
data0 
offs0 
flagsBIO_F_SHARED
unused 


再调用svcmgr_handler去处理具体的事务:

  1. int svcmgr_handler(struct binder_state *bs,  
  2.                    struct binder_txn *txn,  
  3.                    struct binder_io *msg,  
  4.                    struct binder_io *reply)  
  5. {  
  6.     struct svcinfo *si;  
  7.     uint16_t *s;  
  8.     unsigned len;  
  9.     void *ptr;  
  10.     uint32_t strict_policy;  
  11.     int allow_isolated;  
  12.   
  13.     if (txn->target != svcmgr_handle)  
  14.         return -1;  
  15.   
  16.     strict_policy = bio_get_uint32(msg);  
  17.     s = bio_get_string16(msg, &len);  
  18.     if ((len != (sizeof(svcmgr_id) / 2)) ||  
  19.         memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {  
  20.         fprintf(stderr,"invalid id %s\n", str8(s));  
  21.         return -1;  
  22.     }  
  23.   
  24.     switch(txn->code) {  
  25.     case SVC_MGR_ADD_SERVICE:  
  26.         s = bio_get_string16(msg, &len);  
  27.         ptr = bio_get_ref(msg);  
  28.         allow_isolated = bio_get_uint32(msg) ? 1 : 0;  
  29.         if (do_add_service(bs, s, len, ptr, txn->sender_euid, allow_isolated))  
  30.             return -1;  
  31.         break;  
  32.     }  
  33.   
  34.     bio_put_uint32(reply, 0);  
  35.     return 0;  
  36. }  
int svcmgr_handler(struct binder_state *bs,
                   struct binder_txn *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    unsigned len;
    void *ptr;
    uint32_t strict_policy;
    int allow_isolated;

    if (txn->target != svcmgr_handle)
        return -1;

    strict_policy = bio_get_uint32(msg);
    s = bio_get_string16(msg, &len);
    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s));
        return -1;
    }

    switch(txn->code) {
    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);
        ptr = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, ptr, txn->sender_euid, allow_isolated))
            return -1;
        break;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

我们前面讲过在数据的开始会写入strict mode和"android.os.IServiceManager",这里会读出这两个数据用于RPC检查。处理SVC_MGR_ADD_SERVICE中首先从msg取出”media.audio_flinger" 字串,ptr指针指向binder_transaction_data中的target结构,然后调用do_add_service来添加service。

  1. int do_add_service(struct binder_state *bs,  
  2.                    uint16_t *s, unsigned len,  
  3.                    void *ptr, unsigned uid, int allow_isolated)  
  4. {  
  5.     struct svcinfo *si;  
  6.     if (!ptr || (len == 0) || (len > 127))  
  7.         return -1;  
  8.   
  9.     if (!svc_can_register(uid, s)) {  
  10.         ALOGE("add_service('%s',%p) uid=%d - PERMISSION DENIED\n",  
  11.              str8(s), ptr, uid);  
  12.         return -1;  
  13.     }  
  14.   
  15.     si = find_svc(s, len);  
  16.     if (si) {  
  17.         if (si->ptr) {  
  18.             ALOGE("add_service('%s',%p) uid=%d - ALREADY REGISTERED, OVERRIDE\n",  
  19.                  str8(s), ptr, uid);  
  20.             svcinfo_death(bs, si);  
  21.         }  
  22.         si->ptr = ptr;  
  23.     } else {  
  24.         si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));  
  25.         if (!si) {  
  26.             ALOGE("add_service('%s',%p) uid=%d - OUT OF MEMORY\n",  
  27.                  str8(s), ptr, uid);  
  28.             return -1;  
  29.         }  
  30.         si->ptr = ptr;  
  31.         si->len = len;  
  32.         memcpy(si->name, s, (len + 1) * sizeof(uint16_t));  
  33.         si->name[len] = '\0';  
  34.         si->death.func = svcinfo_death;  
  35.         si->death.ptr = si;  
  36.         si->allow_isolated = allow_isolated;  
  37.         si->next = svclist;  
  38.         svclist = si;  
  39.     }  
  40.   
  41.     binder_acquire(bs, ptr);  
  42.     binder_link_to_death(bs, ptr, &si->death);  
  43.     return 0;  
  44. }  
int do_add_service(struct binder_state *bs,
                   uint16_t *s, unsigned len,
                   void *ptr, unsigned uid, int allow_isolated)
{
    struct svcinfo *si;
    if (!ptr || (len == 0) || (len > 127))
        return -1;

    if (!svc_can_register(uid, s)) {
        ALOGE("add_service('%s',%p) uid=%d - PERMISSION DENIED\n",
             str8(s), ptr, uid);
        return -1;
    }

    si = find_svc(s, len);
    if (si) {
        if (si->ptr) {
            ALOGE("add_service('%s',%p) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
                 str8(s), ptr, uid);
            svcinfo_death(bs, si);
        }
        si->ptr = ptr;
    } else {
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            ALOGE("add_service('%s',%p) uid=%d - OUT OF MEMORY\n",
                 str8(s), ptr, uid);
            return -1;
        }
        si->ptr = ptr;
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = svcinfo_death;
        si->death.ptr = si;
        si->allow_isolated = allow_isolated;
        si->next = svclist;
        svclist = si;
    }

    binder_acquire(bs, ptr);
    binder_link_to_death(bs, ptr, &si->death);
    return 0;
}

首先调用svc_can_register查看能否能注册这个service,能注册service的条件是注册AudioFlinger服务的进程uid是0或者是AID_SYSTEM,或者在allowed数组中有指定。再调用find_svc通过服务名字去查找是否已经注册,这里是第一次注册,所以返回空。然后创建一个svcinfo对象,并设置它的一些变量,这里将ptr指针指向flat_binder_object 的handle id值。最后将创建的svcinfo对象加入到全局的svclist链表中。

在svcmgr_handler方法的最后,通过bio_put_uint32(reply, 0)写一个0到reply中。回到binder_parse方法中会调用binder_send_reply向binder驱动返回执行结果,如下:

  1. void binder_send_reply(struct binder_state *bs,  
  2.                        struct binder_io *reply,  
  3.                        void *buffer_to_free,  
  4.                        int status)  
  5. {  
  6.     struct {  
  7.         uint32_t cmd_free;  
  8.         void *buffer;  
  9.         uint32_t cmd_reply;  
  10.         struct binder_txn txn;  
  11.     } __attribute__((packed)) data;  
  12.   
  13.     data.cmd_free = BC_FREE_BUFFER;  
  14.     data.buffer = buffer_to_free;  
  15.     data.cmd_reply = BC_REPLY;  
  16.     data.txn.target = 0;  
  17.     data.txn.cookie = 0;  
  18.     data.txn.code = 0;  
  19.     if (status) {  
  20.         data.txn.flags = TF_STATUS_CODE;  
  21.         data.txn.data_size = sizeof(int);  
  22.         data.txn.offs_size = 0;  
  23.         data.txn.data = &status;  
  24.         data.txn.offs = 0;  
  25.     } else {  
  26.         data.txn.flags = 0;  
  27.         data.txn.data_size = reply->data - reply->data0;  
  28.         data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);  
  29.         data.txn.data = reply->data0;  
  30.         data.txn.offs = reply->offs0;  
  31.     }  
  32.     binder_write(bs, &data, sizeof(data));  
  33. }  
void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       void *buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        void *buffer;
        uint32_t cmd_reply;
        struct binder_txn txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER;
    data.buffer = buffer_to_free;
    data.cmd_reply = BC_REPLY;
    data.txn.target = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offs_size = 0;
        data.txn.data = &status;
        data.txn.offs = 0;
    } else {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data = reply->data0;
        data.txn.offs = reply->offs0;
    }
    binder_write(bs, &data, sizeof(data));
}

这里填充data 数据结构如下:

cmd_freeBC_FREE_BUFFER
bufferbuffer_to_free
cmd_replyBC_REPLY
binder_txntarget0
cookie0
code0
flags0
sender_pid0
sender_euid0
data_size4
offs_size0
data0
offs0


我们可以看到上面有两个cmd,一个是BC_FREE_BUFFER,一个是BC_REPLY。BC_FREE_BUFFER就是释放前面我们申请t->buffer = binder_alloc_buf()内存;BC_REPLY就是执行ADD_SEVICE的结果。binder_write前面我们分析过,通过ioctrl将上面的data数据发送给binder驱动,我们直接到binder驱动中去分析binder_thread_write方法:

  1. int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  
  2.                         void __user *buffer, int size, signed long *consumed)  
  3. {  
  4.         uint32_t cmd;  
  5.         void __user *ptr = buffer + *consumed;  
  6.         void __user *end = buffer + size;  
  7.   
  8.         while (ptr < end && thread->return_error == BR_OK) {  
  9.                 if (get_user(cmd, (uint32_t __user *)ptr))  
  10.                         return -EFAULT;  
  11.                 ptr += sizeof(uint32_t);  
  12.                 trace_binder_command(cmd);  
  13.                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {  
  14.                         binder_stats.bc[_IOC_NR(cmd)]++;  
  15.                         proc->stats.bc[_IOC_NR(cmd)]++;  
  16.                         thread->stats.bc[_IOC_NR(cmd)]++;  
  17.                 }  
  18.                 switch (cmd) {  
  19.                 case BC_FREE_BUFFER: {  
  20.                         void __user *data_ptr;  
  21.                         struct binder_buffer *buffer;  
  22.   
  23.                         if (get_user(data_ptr, (void * __user *)ptr))  
  24.                                 return -EFAULT;  
  25.                         ptr += sizeof(void *);  
  26.   
  27.                         buffer = binder_buffer_lookup(proc, data_ptr);  
  28.                         if (buffer == NULL) {  
  29.   
  30.                         }  
  31.                         if (!buffer->allow_user_free) {  
  32.   
  33.                         }  
  34.   
  35.                         if (buffer->transaction) {  
  36.                                 buffer->transaction->buffer = NULL;  
  37.                                 buffer->transaction = NULL;  
  38.                         }  
  39.                         if (buffer->async_transaction && buffer->target_node) {  
  40.                                 BUG_ON(!buffer->target_node->has_async_transaction);  
  41.                                 if (list_empty(&buffer->target_node->async_todo))  
  42.                                         buffer->target_node->has_async_transaction = 0;  
  43.                                 else  
  44.                                         list_move_tail(buffer->target_node->async_todo.next, &thread->todo);  
  45.                         }  
  46.                         binder_transaction_buffer_release(proc, buffer, NULL);  
  47.                         binder_free_buf(proc, buffer);  
  48.                         break;  
  49.                 }  
  50.   
  51.                 case BC_TRANSACTION:  
  52.                 case BC_REPLY: {  
  53.                         struct binder_transaction_data tr;  
  54.   
  55.                         if (copy_from_user(&tr, ptr, sizeof(tr)))  
  56.                                 return -EFAULT;  
  57.                         ptr += sizeof(tr);  
  58.                         binder_transaction(proc, thread, &tr, cmd == BC_REPLY);  
  59.                         break;  
  60.                 }  
  61.                 }  
  62.                 *consumed = ptr - buffer;  
  63.         }  
  64.         return 0;  
  65. }  
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                        void __user *buffer, int size, signed long *consumed)
{
        uint32_t cmd;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;

        while (ptr < end && thread->return_error == BR_OK) {
                if (get_user(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
                trace_binder_command(cmd);
                if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
                        binder_stats.bc[_IOC_NR(cmd)]++;
                        proc->stats.bc[_IOC_NR(cmd)]++;
                        thread->stats.bc[_IOC_NR(cmd)]++;
                }
                switch (cmd) {
                case BC_FREE_BUFFER: {
                        void __user *data_ptr;
                        struct binder_buffer *buffer;

                        if (get_user(data_ptr, (void * __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(void *);

                        buffer = binder_buffer_lookup(proc, data_ptr);
                        if (buffer == NULL) {

                        }
                        if (!buffer->allow_user_free) {

                        }

                        if (buffer->transaction) {
                                buffer->transaction->buffer = NULL;
                                buffer->transaction = NULL;
                        }
                        if (buffer->async_transaction && buffer->target_node) {
                                BUG_ON(!buffer->target_node->has_async_transaction);
                                if (list_empty(&buffer->target_node->async_todo))
                                        buffer->target_node->has_async_transaction = 0;
                                else
                                        list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
                        }
                        binder_transaction_buffer_release(proc, buffer, NULL);
                        binder_free_buf(proc, buffer);
                        break;
                }

                case BC_TRANSACTION:
                case BC_REPLY: {
                        struct binder_transaction_data tr;

                        if (copy_from_user(&tr, ptr, sizeof(tr)))
                                return -EFAULT;
                        ptr += sizeof(tr);
                        binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
                        break;
                }
                }
                *consumed = ptr - buffer;
        }
        return 0;
}

首先处理BC_FREE_BUFFER命令,从BC_FREE_BUFFER指令后取出buffer的地址,并在binder_proc查找相应的的binder_buffer,最后调用binder_transaction_buffer_release和binder_free_buf来释放这块binder_buffer。接着处理BC_REPLY命令,通过copy_from_user将ptr中的数据拷贝到tr数据结构中,这时tr中的数据如下:

binder_transaction_datatarget0
cookie0
code0
flags0
sender_pid0
sender_euid0
data_size4
offset_size0
data0
offsets0


接着来看binder_transaction方法,这个函数在前面处理BC_TRANSACTION命名时分析过。不同的是这里的最后一个参数cmd == BC_REPLY是true。

  1. static void binder_transaction(struct binder_proc *proc,  
  2.                                struct binder_thread *thread,  
  3.                                struct binder_transaction_data *tr, int reply)  
  4. {  
  5.         struct binder_transaction *t;  
  6.         struct binder_work *tcomplete;  
  7.         size_t *offp, *off_end;  
  8.         struct binder_proc *target_proc;  
  9.         struct binder_thread *target_thread = NULL;  
  10.         struct binder_node *target_node = NULL;  
  11.         struct list_head *target_list;  
  12.         wait_queue_head_t *target_wait;  
  13.         struct binder_transaction *in_reply_to = NULL;  
  14.         struct binder_transaction_log_entry *e;  
  15.         uint32_t return_error;  
  16.   
  17.         e = binder_transaction_log_add(&binder_transaction_log);  
  18.         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);  
  19.         e->from_proc = proc->pid;  
  20.         e->from_thread = thread->pid;  
  21.         e->target_handle = tr->target.handle;  
  22.         e->data_size = tr->data_size;  
  23.         e->offsets_size = tr->offsets_size;  
  24.   
  25.         if (reply) {  
  26.                 in_reply_to = thread->transaction_stack;  
  27.                 if (in_reply_to == NULL) {  
  28.   
  29.                 }  
  30.                 binder_set_nice(in_reply_to->saved_priority);  
  31.                 if (in_reply_to->to_thread != thread) {  
  32.   
  33.                 }  
  34.                 thread->transaction_stack = in_reply_to->to_parent;  
  35.                 target_thread = in_reply_to->from;  
  36.                 if (target_thread == NULL) {  
  37.   
  38.                 }  
  39.                 if (target_thread->transaction_stack != in_reply_to) {  
  40.   
  41.                 }  
  42.                 target_proc = target_thread->proc;  
  43.         } else {  
  44.                   
  45.                 }  
  46.         }  
  47.         if (target_thread) {  
  48.                 e->to_thread = target_thread->pid;  
  49.                 target_list = &target_thread->todo;  
  50.                 target_wait = &target_thread->wait;  
  51.         } else {  
  52.                 target_list = &target_proc->todo;  
  53.                 target_wait = &target_proc->wait;  
  54.         }  
  55.         e->to_proc = target_proc->pid;  
  56.   
  57.         /* TODO: reuse incoming transaction for reply */  
  58.         t = kzalloc(sizeof(*t), GFP_KERNEL);  
  59.         if (t == NULL) {  
  60.                 return_error = BR_FAILED_REPLY;  
  61.                 goto err_alloc_t_failed;  
  62.         }  
  63.         binder_stats_created(BINDER_STAT_TRANSACTION);  
  64.   
  65.         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);  
  66.         if (tcomplete == NULL) {  
  67.                 return_error = BR_FAILED_REPLY;  
  68.                 goto err_alloc_tcomplete_failed;  
  69.         }  
  70.         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);  
  71.   
  72.         t->debug_id = ++binder_last_id;  
  73.         e->debug_id = t->debug_id;  
  74.   
  75.         if (reply)  
  76.                 binder_debug(BINDER_DEBUG_TRANSACTION,  
  77.                              "binder: %d:%d BC_REPLY %d -> %d:%d, "  
  78.                              "data %p-%p size %zd-%zd\n",  
  79.                              proc->pid, thread->pid, t->debug_id,  
  80.                              target_proc->pid, target_thread->pid,  
  81.                              tr->data.ptr.buffer, tr->data.ptr.offsets,  
  82.                              tr->data_size, tr->offsets_size);  
  83.         else  
  84.   
  85.   
  86.         if (!reply && !(tr->flags & TF_ONE_WAY))  
  87.                 t->from = thread;  
  88.         else  
  89.                 t->from = NULL;  
  90.         t->sender_euid = proc->tsk->cred->euid;  
  91.         t->to_proc = target_proc;  
  92.         t->to_thread = target_thread;  
  93.         t->code = tr->code;  
  94.         t->flags = tr->flags;  
  95.         t->priority = task_nice(current);  
  96.   
  97.         trace_binder_transaction(reply, t, target_node);  
  98.   
  99.         t->buffer = binder_alloc_buf(target_proc, tr->data_size,  
  100.                 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));  
  101.         if (t->buffer == NULL) {  
  102.   
  103.         }  
  104.         t->buffer->allow_user_free = 0;  
  105.         t->buffer->debug_id = t->debug_id;  
  106.         t->buffer->transaction = t;  
  107.         t->buffer->target_node = target_node;  
  108.         trace_binder_transaction_alloc_buf(t->buffer);  
  109.         if (target_node)  
  110.                 binder_inc_node(target_node, 1, 0, NULL);  
  111.   
  112.         offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));  
  113.   
  114.         if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {  
  115.                 binder_user_error("binder: %d:%d got transaction with invalid "  
  116.                         "data ptr\n", proc->pid, thread->pid);  
  117.                 return_error = BR_FAILED_REPLY;  
  118.                 goto err_copy_data_failed;  
  119.         }  
  120.         if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {  
  121.                 binder_user_error("binder: %d:%d got transaction with invalid "  
  122.                         "offsets ptr\n", proc->pid, thread->pid);  
  123.                 return_error = BR_FAILED_REPLY;  
  124.                 goto err_copy_data_failed;  
  125.         }  
  126.         if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {  
  127.                 binder_user_error("binder: %d:%d got transaction with "  
  128.                         "invalid offsets size, %zd\n",  
  129.                         proc->pid, thread->pid, tr->offsets_size);  
  130.                 return_error = BR_FAILED_REPLY;  
  131.                 goto err_bad_offset;  
  132.         }  
  133.         off_end = (void *)offp + tr->offsets_size;  
  134.         for (; offp < off_end; offp++) {  
  135.                   
  136.         }  
  137.         if (reply) {  
  138.                 BUG_ON(t->buffer->async_transaction != 0);  
  139.                 binder_pop_transaction(target_thread, in_reply_to);  
  140.         } else if (!(t->flags & TF_ONE_WAY)) {  
  141.   
  142.         } else {  
  143.   
  144.         }  
  145.         t->work.type = BINDER_WORK_TRANSACTION;  
  146.         list_add_tail(&t->work.entry, target_list);  
  147.         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;  
  148.         list_add_tail(&tcomplete->entry, &thread->todo);  
  149.         if (target_wait)  
  150.                 wake_up_interruptible(target_wait);  
  151.         return;  
static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
                               struct binder_transaction_data *tr, int reply)
{
        struct binder_transaction *t;
        struct binder_work *tcomplete;
        size_t *offp, *off_end;
        struct binder_proc *target_proc;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
        struct list_head *target_list;
        wait_queue_head_t *target_wait;
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
        uint32_t return_error;

        e = binder_transaction_log_add(&binder_transaction_log);
        e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
        e->from_proc = proc->pid;
        e->from_thread = thread->pid;
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;

        if (reply) {
                in_reply_to = thread->transaction_stack;
                if (in_reply_to == NULL) {

                }
                binder_set_nice(in_reply_to->saved_priority);
                if (in_reply_to->to_thread != thread) {

                }
                thread->transaction_stack = in_reply_to->to_parent;
                target_thread = in_reply_to->from;
                if (target_thread == NULL) {

                }
                if (target_thread->transaction_stack != in_reply_to) {

                }
                target_proc = target_thread->proc;
        } else {
                
                }
        }
        if (target_thread) {
                e->to_thread = target_thread->pid;
                target_list = &target_thread->todo;
                target_wait = &target_thread->wait;
        } else {
                target_list = &target_proc->todo;
                target_wait = &target_proc->wait;
        }
        e->to_proc = target_proc->pid;

        /* TODO: reuse incoming transaction for reply */
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        if (t == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_alloc_t_failed;
        }
        binder_stats_created(BINDER_STAT_TRANSACTION);

        tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
        if (tcomplete == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_alloc_tcomplete_failed;
        }
        binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

        t->debug_id = ++binder_last_id;
        e->debug_id = t->debug_id;

        if (reply)
                binder_debug(BINDER_DEBUG_TRANSACTION,
                             "binder: %d:%d BC_REPLY %d -> %d:%d, "
                             "data %p-%p size %zd-%zd\n",
                             proc->pid, thread->pid, t->debug_id,
                             target_proc->pid, target_thread->pid,
                             tr->data.ptr.buffer, tr->data.ptr.offsets,
                             tr->data_size, tr->offsets_size);
        else


        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
        else
                t->from = NULL;
        t->sender_euid = proc->tsk->cred->euid;
        t->to_proc = target_proc;
        t->to_thread = target_thread;
        t->code = tr->code;
        t->flags = tr->flags;
        t->priority = task_nice(current);

        trace_binder_transaction(reply, t, target_node);

        t->buffer = binder_alloc_buf(target_proc, tr->data_size,
                tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
        if (t->buffer == NULL) {

        }
        t->buffer->allow_user_free = 0;
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
        trace_binder_transaction_alloc_buf(t->buffer);
        if (target_node)
                binder_inc_node(target_node, 1, 0, NULL);

        offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));

        if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
                binder_user_error("binder: %d:%d got transaction with invalid "
                        "data ptr\n", proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_copy_data_failed;
        }
        if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
                binder_user_error("binder: %d:%d got transaction with invalid "
                        "offsets ptr\n", proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_copy_data_failed;
        }
        if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
                binder_user_error("binder: %d:%d got transaction with "
                        "invalid offsets size, %zd\n",
                        proc->pid, thread->pid, tr->offsets_size);
                return_error = BR_FAILED_REPLY;
                goto err_bad_offset;
        }
        off_end = (void *)offp + tr->offsets_size;
        for (; offp < off_end; offp++) {
                
        }
        if (reply) {
                BUG_ON(t->buffer->async_transaction != 0);
                binder_pop_transaction(target_thread, in_reply_to);
        } else if (!(t->flags & TF_ONE_WAY)) {

        } else {

        }
        t->work.type = BINDER_WORK_TRANSACTION;
        list_add_tail(&t->work.entry, target_list);
        tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
        list_add_tail(&tcomplete->entry, &thread->todo);
        if (target_wait)
                wake_up_interruptible(target_wait);
        return;

首先从thread->transaction_stack中取出开始处理ADD_SERVICE时创建的binder_transaction对象,这时的thread是ServiceManager所在的thread,而target_thread和target_proc都是注册AudioFlinger所在的thread。然后在创建一个binder_transaction对象t,这时t对象的buffer数据里面讲没有binder数据,并且t->buffer->target_node为NULL,其from设置为NULL,表示不需要等待回复。再调用binder_pop_transaction(target_thread, in_reply_to)去释放in_reply_to所在的内存。然后发送给注册AudioFlinger所在的thread;并往ServiceManager所在的thread的todo队列中加入一个tcomplete对象表示完成。先到注册AudioFlinger所在的thread看如何处理:

  1. static int binder_thread_read(struct binder_proc *proc,  
  2.                               struct binder_thread *thread,  
  3.                               void  __user *buffer, int size,  
  4.                               signed long *consumed, int non_block)  
  5. {  
  6.         void __user *ptr = buffer + *consumed;  
  7.         void __user *end = buffer + size;  
  8.   
  9.         int ret = 0;  
  10.         int wait_for_proc_work;  
  11.   
  12.         if (*consumed == 0) {  
  13.                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))  
  14.                         return -EFAULT;  
  15.                 ptr += sizeof(uint32_t);  
  16.         }  
  17.   
  18. retry:  
  19.         wait_for_proc_work = thread->transaction_stack == NULL &&  
  20.                                 list_empty(&thread->todo);  
  21.   
  22.         if (thread->return_error != BR_OK && ptr < end) {  
  23.   
  24.         }  
  25.   
  26.   
  27.         thread->looper |= BINDER_LOOPER_STATE_WAITING;  
  28.         if (wait_for_proc_work)  
  29.                 proc->ready_threads++;  
  30.   
  31.         binder_unlock(__func__);  
  32.   
  33.         trace_binder_wait_for_work(wait_for_proc_work,  
  34.                                    !!thread->transaction_stack,  
  35.                                    !list_empty(&thread->todo));  
  36.         if (wait_for_proc_work) {  
  37.                  
  38.         } else {  
  39.                 if (non_block) {  
  40.   
  41.                 } else  
  42.                         ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));  
  43.         }  
  44.   
  45.         binder_lock(__func__);  
  46.   
  47.         if (wait_for_proc_work)  
  48.                 proc->ready_threads--;  
  49.         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;  
  50.   
  51.         while (1) {  
  52.                 uint32_t cmd;  
  53.                 struct binder_transaction_data tr;  
  54.                 struct binder_work *w;  
  55.                 struct binder_transaction *t = NULL;  
  56.   
  57.                 if (!list_empty(&thread->todo))  
  58.                         w = list_first_entry(&thread->todo, struct binder_work, entry);  
  59.                 else if (!list_empty(&proc->todo) && wait_for_proc_work)  
  60.                         w = list_first_entry(&proc->todo, struct binder_work, entry);  
  61.                 else {  
  62.                         if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */  
  63.                                 goto retry;  
  64.                         break;  
  65.                 }  
  66.   
  67.                 if (end - ptr < sizeof(tr) + 4)  
  68.                         break;  
  69.   
  70.                 switch (w->type) {  
  71.                 case BINDER_WORK_TRANSACTION: {  
  72.                         t = container_of(w, struct binder_transaction, work);  
  73.                 } break;  
  74.                 }  
  75.   
  76.                 if (!t)  
  77.                         continue;  
  78.   
  79.                 BUG_ON(t->buffer == NULL);  
  80.                 if (t->buffer->target_node) {  
  81.   
  82.                 } else {  
  83.                         tr.target.ptr = NULL;  
  84.                         tr.cookie = NULL;  
  85.                         cmd = BR_REPLY;  
  86.                 }  
  87.                 tr.code = t->code;  
  88.                 tr.flags = t->flags;  
  89.                 tr.sender_euid = t->sender_euid;  
  90.   
  91.                 if (t->from) {  
  92.   
  93.                 } else {  
  94.                         tr.sender_pid = 0;  
  95.                 }  
  96.   
  97.                 tr.data_size = t->buffer->data_size;  
  98.                 tr.offsets_size = t->buffer->offsets_size;  
  99.                 tr.data.ptr.buffer = (void *)t->buffer->data +  
  100.                                         proc->user_buffer_offset;  
  101.                 tr.data.ptr.offsets = tr.data.ptr.buffer +  
  102.                                         ALIGN(t->buffer->data_size,  
  103.                                             sizeof(void *));  
  104.   
  105.                 if (put_user(cmd, (uint32_t __user *)ptr))  
  106.                         return -EFAULT;  
  107.                 ptr += sizeof(uint32_t);  
  108.                 if (copy_to_user(ptr, &tr, sizeof(tr)))  
  109.                         return -EFAULT;  
  110.                 ptr += sizeof(tr);  
  111.   
  112.   
  113.                 list_del(&t->work.entry);  
  114.                 t->buffer->allow_user_free = 1;  
  115.                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {  
  116.   
  117.                 } else {  
  118.                        t->buffer->transaction = NULL;  
  119.                         kfree(t);  
  120.                         binder_stats_deleted(BINDER_STAT_TRANSACTION);  
  121.                 }  
  122.                 break;  
  123.         }  
  124.   
  125. done:  
  126.   
  127.         *consumed = ptr - buffer;  
  128.         return 0;  
  129. }  
static int binder_thread_read(struct binder_proc *proc,
                              struct binder_thread *thread,
                              void  __user *buffer, int size,
                              signed long *consumed, int non_block)
{
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;

        int ret = 0;
        int wait_for_proc_work;

        if (*consumed == 0) {
                if (put_user(BR_NOOP, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
        }

retry:
        wait_for_proc_work = thread->transaction_stack == NULL &&
                                list_empty(&thread->todo);

        if (thread->return_error != BR_OK && ptr < end) {

        }


        thread->looper |= BINDER_LOOPER_STATE_WAITING;
        if (wait_for_proc_work)
                proc->ready_threads++;

        binder_unlock(__func__);

        trace_binder_wait_for_work(wait_for_proc_work,
                                   !!thread->transaction_stack,
                                   !list_empty(&thread->todo));
        if (wait_for_proc_work) {
               
        } else {
                if (non_block) {

                } else
                        ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
        }

        binder_lock(__func__);

        if (wait_for_proc_work)
                proc->ready_threads--;
        thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

        while (1) {
                uint32_t cmd;
                struct binder_transaction_data tr;
                struct binder_work *w;
                struct binder_transaction *t = NULL;

                if (!list_empty(&thread->todo))
                        w = list_first_entry(&thread->todo, struct binder_work, entry);
                else if (!list_empty(&proc->todo) && wait_for_proc_work)
                        w = list_first_entry(&proc->todo, struct binder_work, entry);
                else {
                        if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
                                goto retry;
                        break;
                }

                if (end - ptr < sizeof(tr) + 4)
                        break;

                switch (w->type) {
                case BINDER_WORK_TRANSACTION: {
                        t = container_of(w, struct binder_transaction, work);
                } break;
                }

                if (!t)
                        continue;

                BUG_ON(t->buffer == NULL);
                if (t->buffer->target_node) {

                } else {
                        tr.target.ptr = NULL;
                        tr.cookie = NULL;
                        cmd = BR_REPLY;
                }
                tr.code = t->code;
                tr.flags = t->flags;
                tr.sender_euid = t->sender_euid;

                if (t->from) {

                } else {
                        tr.sender_pid = 0;
                }

                tr.data_size = t->buffer->data_size;
                tr.offsets_size = t->buffer->offsets_size;
                tr.data.ptr.buffer = (void *)t->buffer->data +
                                        proc->user_buffer_offset;
                tr.data.ptr.offsets = tr.data.ptr.buffer +
                                        ALIGN(t->buffer->data_size,
                                            sizeof(void *));

                if (put_user(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
                if (copy_to_user(ptr, &tr, sizeof(tr)))
                        return -EFAULT;
                ptr += sizeof(tr);


                list_del(&t->work.entry);
                t->buffer->allow_user_free = 1;
                if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {

                } else {
                       t->buffer->transaction = NULL;
                        kfree(t);
                        binder_stats_deleted(BINDER_STAT_TRANSACTION);
                }
                break;
        }

done:

        *consumed = ptr - buffer;
        return 0;
}

因为t->buffer->target_node为NULL,所以这里的cmd = BR_REPLY,回到waitForResponse来看如何处理BR_REPLY:

  1. status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)  
  2. {  
  3.     int32_t cmd;  
  4.     int32_t err;  
  5.   
  6.     while (1) {  
  7.         if ((err=talkWithDriver()) < NO_ERROR) break;  
  8.         err = mIn.errorCheck();  
  9.         if (err < NO_ERROR) break;  
  10.         if (mIn.dataAvail() == 0) continue;  
  11.           
  12.         cmd = mIn.readInt32();  
  13.   
  14.         switch (cmd) {  
  15.         case BR_REPLY:  
  16.             {  
  17.                 binder_transaction_data tr;  
  18.                 err = mIn.read(&tr, sizeof(tr));  
  19.   
  20.                 if (reply) {  
  21.                     if ((tr.flags & TF_STATUS_CODE) == 0) {  
  22.                         reply->ipcSetDataReference(  
  23.                             reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),  
  24.                             tr.data_size,  
  25.                             reinterpret_cast<const size_t*>(tr.data.ptr.offsets),  
  26.                             tr.offsets_size/sizeof(size_t),  
  27.                             freeBuffer, this);  
  28.                     } else {  
  29.   
  30.                     }  
  31.                 }  
  32.             }  
  33.             goto finish;  
  34.   
  35.         default:  
  36.             err = executeCommand(cmd);  
  37.             if (err != NO_ERROR) goto finish;  
  38.             break;  
  39.         }  
  40.     }  
  41.   
  42. finish:  
  43.     if (err != NO_ERROR) {  
  44.         if (acquireResult) *acquireResult = err;  
  45.         if (reply) reply->setError(err);  
  46.         mLastError = err;  
  47.     }  
  48.       
  49.     return err;  
  50. }  
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        
        cmd = mIn.readInt32();

        switch (cmd) {
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t),
                            freeBuffer, this);
                    } else {

                    }
                }
            }
            goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }

finish:
    if (err != NO_ERROR) {
        if (acquireResult) *acquireResult = err;
        if (reply) reply->setError(err);
        mLastError = err;
    }
    
    return err;
}

首先从mIn中读出binder_transaction_data数据,然后调用Parcel的ipcSetDataReference来释放内存:

  1. void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,  
  2.     const size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)  
  3. {  
  4.     freeDataNoInit();  
  5.     mError = NO_ERROR;  
  6.     mData = const_cast<uint8_t*>(data);  
  7.     mDataSize = mDataCapacity = dataSize;  
  8.     //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)\n", this, mDataSize, getpid());   
  9.     mDataPos = 0;  
  10.     ALOGV("setDataReference Setting data pos of %p to %d\n"this, mDataPos);  
  11.     mObjects = const_cast<size_t*>(objects);  
  12.     mObjectsSize = mObjectsCapacity = objectsCount;  
  13.     mNextObjectHint = 0;  
  14.     mOwner = relFunc;  
  15.     mOwnerCookie = relCookie;  
  16.     scanForFds();  
  17. }  
void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
    const size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
{
    freeDataNoInit();
    mError = NO_ERROR;
    mData = const_cast<uint8_t*>(data);
    mDataSize = mDataCapacity = dataSize;
    //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)\n", this, mDataSize, getpid());
    mDataPos = 0;
    ALOGV("setDataReference Setting data pos of %p to %d\n", this, mDataPos);
    mObjects = const_cast<size_t*>(objects);
    mObjectsSize = mObjectsCapacity = objectsCount;
    mNextObjectHint = 0;
    mOwner = relFunc;
    mOwnerCookie = relCookie;
    scanForFds();
}

这里设置mData是前面申请的binder_buffer的地址,mOwner为freeBuffer函数指针,在Parcel的析构函数中调用freeDataNoInit去释放binder_buffer数据结构:

  1. Parcel::~Parcel()  
  2. {  
  3.     freeDataNoInit();  
  4. }  
  5.   
  6. void Parcel::freeDataNoInit()  
  7. {  
  8.     if (mOwner) {  
  9.         mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);  
  10.     } else {  
  11.   
  12.     }  
  13. }  
  14.   
  15. void IPCThreadState::freeBuffer(Parcel* parcel, const uint8_t* data, size_t dataSize,  
  16.                                 const size_t* objects, size_t objectsSize,  
  17.                                 void* cookie)  
  18. {  
  19.     if (parcel != NULL) parcel->closeFileDescriptors();  
  20.     IPCThreadState* state = self();  
  21.     state->mOut.writeInt32(BC_FREE_BUFFER);  
  22.     state->mOut.writeInt32((int32_t)data);  
  23. }  
Parcel::~Parcel()
{
    freeDataNoInit();
}

void Parcel::freeDataNoInit()
{
    if (mOwner) {
        mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
    } else {

    }
}

void IPCThreadState::freeBuffer(Parcel* parcel, const uint8_t* data, size_t dataSize,
                                const size_t* objects, size_t objectsSize,
                                void* cookie)
{
    if (parcel != NULL) parcel->closeFileDescriptors();
    IPCThreadState* state = self();
    state->mOut.writeInt32(BC_FREE_BUFFER);
    state->mOut.writeInt32((int32_t)data);
}

前面我们分析过处理BC_FREE_BUFFER的流程,这里就不再介绍了。到这里AudioFlinger::instantiate方法就执行完了,接着执行MediaPlayerService::instantiate的方法,与前面类似,主要过程大致如下:

1.通过defaultServiceManager()方法构造一个BpServiceManager的对象,其中的mRemote为BpBinder(0)

2.调用BpServiceManager的addService方法,它其实是调用BpBinder的transact方法,code是ADD_SERVICE_TRANSACTION

3.BpBinder通过IPCThreadState的transact发送BC_TRANSACTION的cmd给binder驱动,并等待binder驱动的BR_REPLY回复

4.binder驱动收到BC_TRANSACTION指令后,为MediaPlayerService构造一个binder_node,并加入到binder_proc的nodes红黑树上(这是nodes上面就有两个节点了,一个AudioFlinger,一个MeidaPlayerService),然后通过binder_node构造一个binder_refs结构,并改写传入的type和handle值。并构造一个binder_transaction加入到ServiceManager的todo队列中。

5.ServiceManager取出binder_transaction对象,并根据里面的name和refs(handle id),构造一个svcinfo对象并加入到全局的svclist链表中

6.做reply和释放前面申请的内存


启动事务处理线程

当在main_mediaservice.cpp注册完所有的Service后,会调用后面两个函数:        ProcessState::self()->startThreadPool(),IPCThreadState::self()->joinThreadPool()。首先来看ProcessState::self()->startThreadPool()方法:

  1. void ProcessState::startThreadPool()  
  2. {  
  3.     AutoMutex _l(mLock);  
  4.     if (!mThreadPoolStarted) {  
  5.         mThreadPoolStarted = true;  
  6.         spawnPooledThread(true);  
  7.     }  
  8. }  
  9.   
  10. void ProcessState::spawnPooledThread(bool isMain)  
  11. {  
  12.     if (mThreadPoolStarted) {  
  13.         String8 name = makeBinderThreadName();  
  14.         ALOGV("Spawning new pooled thread, name=%s\n", name.string());  
  15.         sp<Thread> t = new PoolThread(isMain);  
  16.         t->run(name.string());  
  17.     }  
  18. }  
  19.   
  20.     virtual bool threadLoop()  
  21.     {  
  22.         IPCThreadState::self()->joinThreadPool(mIsMain);  
  23.         return false;  
  24.     }  
void ProcessState::startThreadPool()
{
    AutoMutex _l(mLock);
    if (!mThreadPoolStarted) {
        mThreadPoolStarted = true;
        spawnPooledThread(true);
    }
}

void ProcessState::spawnPooledThread(bool isMain)
{
    if (mThreadPoolStarted) {
        String8 name = makeBinderThreadName();
        ALOGV("Spawning new pooled thread, name=%s\n", name.string());
        sp<Thread> t = new PoolThread(isMain);
        t->run(name.string());
    }
}

    virtual bool threadLoop()
    {
        IPCThreadState::self()->joinThreadPool(mIsMain);
        return false;
    }

startThreadPool直接调用spawnPooledThread来启动线程池,makeBinderThreadName顺序的构造一个"Binder_%X"字串作为thread的名字。然后新建一个PoolThread线程并调用其run方法。thread的run方法最后会调用threadLoop方法,也就是调用IPCThreadState::self()->joinThreadPool(true)。所以这里会启动两个线程来不断的处理事务,先来看joinThreadPool的实现:

  1. void IPCThreadState::joinThreadPool(bool isMain)  
  2. {  
  3.     LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());  
  4.   
  5.     mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);  
  6.       
  7.     set_sched_policy(mMyThreadId, SP_FOREGROUND);  
  8.           
  9.     status_t result;  
  10.     do {  
  11.         processPendingDerefs();  
  12.         result = getAndExecuteCommand();  
  13.   
  14.         if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {  
  15.             ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",  
  16.                   mProcess->mDriverFD, result);  
  17.             abort();  
  18.         }  
  19.           
  20.         if(result == TIMED_OUT && !isMain) {  
  21.             break;  
  22.         }  
  23.     } while (result != -ECONNREFUSED && result != -EBADF);  
  24.   
  25.     LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%p\n",  
  26.         (void*)pthread_self(), getpid(), (void*)result);  
  27.       
  28.     mOut.writeInt32(BC_EXIT_LOOPER);  
  29.     talkWithDriver(false);  
  30. }  
  31.   
  32. status_t IPCThreadState::getAndExecuteCommand()  
  33. {  
  34.     status_t result;  
  35.     int32_t cmd;  
  36.   
  37.     result = talkWithDriver();  
  38.     if (result >= NO_ERROR) {  
  39.         size_t IN = mIn.dataAvail();  
  40.         if (IN < sizeof(int32_t)) return result;  
  41.         cmd = mIn.readInt32();  
  42.         IF_LOG_COMMANDS() {  
  43.             alog << "Processing top-level Command: "  
  44.                  << getReturnString(cmd) << endl;  
  45.         }  
  46.   
  47.         result = executeCommand(cmd);  
  48.         set_sched_policy(mMyThreadId, SP_FOREGROUND);  
  49.     }  
  50.   
  51.     return result;  
  52. }  
void IPCThreadState::joinThreadPool(bool isMain)
{
    LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());

    mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
    
    set_sched_policy(mMyThreadId, SP_FOREGROUND);
        
    status_t result;
    do {
        processPendingDerefs();
        result = getAndExecuteCommand();

        if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
            ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
                  mProcess->mDriverFD, result);
            abort();
        }
        
        if(result == TIMED_OUT && !isMain) {
            break;
        }
    } while (result != -ECONNREFUSED && result != -EBADF);

    LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%p\n",
        (void*)pthread_self(), getpid(), (void*)result);
    
    mOut.writeInt32(BC_EXIT_LOOPER);
    talkWithDriver(false);
}

status_t IPCThreadState::getAndExecuteCommand()
{
    status_t result;
    int32_t cmd;

    result = talkWithDriver();
    if (result >= NO_ERROR) {
        size_t IN = mIn.dataAvail();
        if (IN < sizeof(int32_t)) return result;
        cmd = mIn.readInt32();
        IF_LOG_COMMANDS() {
            alog << "Processing top-level Command: "
                 << getReturnString(cmd) << endl;
        }

        result = executeCommand(cmd);
        set_sched_policy(mMyThreadId, SP_FOREGROUND);
    }

    return result;
}


传入到joinThreadPool中的默认参数为true,所以会发送BC_ENTER_LOOPER命令到binder驱动中,我们在分析ServiceManager的时候,已经看过处理的代码了,只是设置thread->looper属性为BINDER_LOOPER_STATE_ENTERED。所以上面启动两个线程会一直循环的调用getAndExecuteCommand去等待事务,永不退出;而如果调用joinThreadPool(false)创建的thread在没有事务处理时,会在没有事务处理时退出。我们再来看一下什么时候会调用joinThreadPool(false) 去创建thread呢。在binder驱动中的binder_thread_read方法中曾说过下面这段code:

  1. *consumed = ptr - buffer;  
  2. if (proc->requested_threads + proc->ready_threads == 0 &&  
  3.     proc->requested_threads_started < proc->max_threads &&  
  4.     (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |  
  5.      BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */  
  6.      /*spawn a new thread if we leave this out */) {  
  7.         proc->requested_threads++;  
  8.         binder_debug(BINDER_DEBUG_THREADS,  
  9.                      "binder: %d:%d BR_SPAWN_LOOPER\n",  
  10.                      proc->pid, thread->pid);  
  11.         if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))  
  12.                 return -EFAULT;  
  13.         binder_stat_br(proc, thread, BR_SPAWN_LOOPER);  
  14. }  
        *consumed = ptr - buffer;
        if (proc->requested_threads + proc->ready_threads == 0 &&
            proc->requested_threads_started < proc->max_threads &&
            (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
             BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
             /*spawn a new thread if we leave this out */) {
                proc->requested_threads++;
                binder_debug(BINDER_DEBUG_THREADS,
                             "binder: %d:%d BR_SPAWN_LOOPER\n",
                             proc->pid, thread->pid);
                if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
                        return -EFAULT;
                binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
        }

当requested_threads(binder驱动请求增加的thread数)+ready_threads(处于等待客户端请求的空闲线程数)等于0,并且requested_threads_started(binder驱动请求成功增加的thread数)小于max_threads数目,就会向用户层发送一个BR_SPAWN_LOOPER命令,并将requested_threads加一。来看Service端收到BR_SPAWN_LOOPER的处理,代码在IPCThreadState::executeCommand方法中:

  1. case BR_SPAWN_LOOPER:  
  2.     mProcess->spawnPooledThread(false);  
  3.     break;  
    case BR_SPAWN_LOOPER:
        mProcess->spawnPooledThread(false);
        break;

这里调用ProcessState::self()->startThreadPool(false)来启动一个thread,并发送BC_REGISTER_LOOPER命令到binder驱动,我们来看处理的代码:

  1. case BC_REGISTER_LOOPER:  
  2.         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {  
  3.                 thread->looper |= BINDER_LOOPER_STATE_INVALID;  
  4.                 binder_user_error("binder: %d:%d ERROR:"  
  5.                         " BC_REGISTER_LOOPER called "  
  6.                         "after BC_ENTER_LOOPER\n",  
  7.                         proc->pid, thread->pid);  
  8.         } else if (proc->requested_threads == 0) {  
  9.                 thread->looper |= BINDER_LOOPER_STATE_INVALID;  
  10.                 binder_user_error("binder: %d:%d ERROR:"  
  11.                         " BC_REGISTER_LOOPER called "  
  12.                         "without request\n",  
  13.                         proc->pid, thread->pid);  
  14.         } else {  
  15.                 proc->requested_threads--;  
  16.                 proc->requested_threads_started++;  
  17.         }  
  18.         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;  
  19.         break;  
                case BC_REGISTER_LOOPER:
                        if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
                                thread->looper |= BINDER_LOOPER_STATE_INVALID;
                                binder_user_error("binder: %d:%d ERROR:"
                                        " BC_REGISTER_LOOPER called "
                                        "after BC_ENTER_LOOPER\n",
                                        proc->pid, thread->pid);
                        } else if (proc->requested_threads == 0) {
                                thread->looper |= BINDER_LOOPER_STATE_INVALID;
                                binder_user_error("binder: %d:%d ERROR:"
                                        " BC_REGISTER_LOOPER called "
                                        "without request\n",
                                        proc->pid, thread->pid);
                        } else {
                                proc->requested_threads--;
                                proc->requested_threads_started++;
                        }
                        thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
                        break;

因为不同的线程调用binder_ioctrl时通过binder_get_thread会返回不同的binder_thread对象,所以这里的thread->looper的属性是BINDER_LOOPER_STATE_NEED_RETURN。后面将requested_threads减一,并将成功增加的thread数目requested_threads_started增加一。这样就增加了一个Service服务处理线程,当在Service比较繁忙时,线程池中的线程数据会增加,当然不会超过max_threads+2的数目;当Service比较空闲的时候,线程池中的线程会自动退出,也不会少于2。
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值