Android Binder服务的获取与使用

{
sp sm = defaultServiceManager();
sp binder = sm->getService(String16(“IHello”));
sp<com::hello::IHe llo> hello = interface_cast(binder);
hello->hello();
int ret = 0;
hello->sum(1, 2, &ret);
}

interface IHello{
void hello();
int sum(int x, int y);
void registerCallback(ICallback cb);
}

namespace com {

namespace hello {

class BpHello : public ::android::BpInterface {
public:
explicit BpHello(const ::android::sp<::android::IBinder>& _aidl_impl);
virtual ~BpHello() = default;
::android::binder::Status hello() override;
::android::binder::Status sum(int32_t x, int32_t y, int32_t* _aidl_return) override;
::android::binder::Status registerCallback(const ::android::sp<::com::hello::ICallback>& cb) override;
};

}

}
#endif


上述代码就是客户端获取binder服务并调用binder服务响应的方法,调用过程如下

1.  调用defaultServiceManager方法初始化binder并获取一个BpServiceManager
2.  使用getService方法获取到注册在BpServiceManager中的binder服务
3.  调用binder发起远程调用

sm->getService
--------------

virtual sp<IBinder> getService(const String16& name) const
{
    sp<IBinder> svc = checkService(name);
    if (svc != nullptr) 
        return svc;
    ......
}



virtual sp<IBinder> checkService(const String16& name) const
{
    Parcel data, reply;
    data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
    data.writeString16(name);
    remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
    return reply.readStrongBinder();
}

getService方法中调用了checkService方法;checkService方法就是调用了remote()->transact,利用BpServiceManager中的BpBinder远程调用获取注册在BpServiceManager中的binder服务;查找到的binder服务在reply.readStrongBinder中获取,获取服务后利用interface\_cast转换为一个BpHello;此内容可在 [# Android binder 服务端注册](https://juejin.cn/post/7352681668550869042 "https://juejin.cn/post/7352681668550869042") 中查找

transact
--------

remote()->transact实际上是BpBinder->transact,以下是具体的实现方法,其中handle、flags为默认值0,handle的默认值0是ServiceManager的句柄

status_t BpBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}

return DEAD_OBJECT;

}


在transact的方法中调用了IPCThreadState::self()->transact

status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err;

flags |= TF_ACCEPT_FDS;

IF_LOG_TRANSACTIONS() {
    TextOutput::Bundle _b(alog);
    alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
        << handle << " / code " << TypeCode(code) << ": "
        << indent << data << dedent << endl;
}

LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
    (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
    
//此方法主要用于封装所需要的数据
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, nullptr);

if (err != NO_ERROR) {
    if (reply) reply->setError(err);
    return (mLastError = err);
}

if ((flags & TF_ONE_WAY) == 0) {
    if (UNLIKELY(mCallRestriction != ProcessState::CallRestriction::NONE)) {
        if (mCallRestriction == ProcessState::CallRestriction::ERROR_IF_NOT_ONEWAY) {
            ALOGE("Process making non-oneway call but is restricted.");
            CallStack::logStack("non-oneway call", CallStack::getCurrent(10).get(),
                ANDROID_LOG_ERROR);
        } else /* FATAL_IF_NOT_ONEWAY */ {
            LOG_ALWAYS_FATAL("Process may not make oneway calls.");
        }
    }

    #if 0
    if (code == 4) {
        ALOGI(">>>>>> CALLING transaction 4");
    } else {
        ALOGI(">>>>>> CALLING transaction %d", code);
    }
    #endif
    if (reply) {
        err = waitForResponse(reply);
    } else {
        Parcel fakeReply;
        err = waitForResponse(&fakeReply);
    }
    #if 0
    if (code == 4) { 
        ALOGI("<<<<<< RETURNING transaction 4");
    } else {
        ALOGI("<<<<<< RETURNING transaction %d", code);
    }
    #endif

    IF_LOG_TRANSACTIONS() {
        TextOutput::Bundle _b(alog);
        alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
            << handle << ": ";
        if (reply) alog << indent << *reply << dedent << endl;
        else alog << "(none requested)" << endl;
    }
} else {
    err = waitForResponse(nullptr, nullptr);
}

return err;

}

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don’t pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;

const status_t err = data.errorCheck();
if (err == NO_ERROR) {//走这里
    tr.data_size = data.ipcDataSize();
    tr.data.ptr.buffer = data.ipcData();
    tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
    tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
    tr.flags |= TF_STATUS_CODE;
    *statusBuffer = err;
    tr.data_size = sizeof(status_t);
    tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
    tr.offsets_size = 0;
    tr.data.ptr.offsets = 0;
} else {
    return (mLastError = err);
}

mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));

return NO_ERROR;

}

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;

while (1) {
    //talkWithDriver向binder内核发送数据
    if ((err=talkWithDriver()) < NO_ERROR) break;
    ......
}

......
return err;

}


1.  在IPCThreadState->transact中调用writeTransactionData方法,将transact传递的数据封装为binder\_transaction\_data,并将封装好的数据写入全局变量mOut中,mOut用于向binder传输消息时的数据写入
2.  封装好数据后调用waitForResponse向binder内核发送数据并解析返回的数据

talkWithDriver
--------------

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}

binder_write_read bwr;

const bool needRead = mIn.dataPosition() >= mIn.dataSize();

const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();

if (doReceive && needRead) {//走这个分支
    bwr.read_size = mIn.dataCapacity();
    bwr.read_buffer = (uintptr_t)mIn.data();
} else {
    bwr.read_size = 0;
    bwr.read_buffer = 0;
}

IF_LOG_COMMANDS() {
    TextOutput::Bundle _b(alog);
    if (outAvail != 0) {
        alog << "Sending commands to driver: " << indent;
        const void* cmds = (const void*)bwr.write_buffer;
        const void* end = ((const uint8_t*)cmds)+bwr.write_size;
        alog << HexDump(cmds, bwr.write_size) << endl;
        while (cmds < end) cmds = printCommand(alog, cmds);
        alog << dedent;
    }
    alog << "Size of receive buffer: " << bwr.read_size
        << ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}

if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
    IF_LOG_COMMANDS() {
        alog << "About to read/write, write size = " << mOut.dataSize() << endl;
    }

#if defined(ANDROID)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);

IF_LOG_COMMANDS() {
    alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
        << bwr.write_consumed << " (of " << mOut.dataSize()
                    << "), read consumed: " << bwr.read_consumed << endl;
}

if (err >= NO_ERROR) {
    if (bwr.write_consumed > 0) {
        if (bwr.write_consumed < mOut.dataSize())
            mOut.remove(0, bwr.write_consumed);
        else {
            mOut.setDataSize(0);
            processPostWriteDerefs();
        }
    }
    if (bwr.read_consumed > 0) {
        mIn.setDataSize(bwr.read_consumed);
        mIn.setDataPosition(0);
    }
    IF_LOG_COMMANDS() {
        TextOutput::Bundle _b(alog);
        alog << "Remaining data size: " << mOut.dataSize() << endl;
        alog << "Received commands from driver: " << indent;
        const void* cmds = mIn.data();
        const void* end = mIn.data() + mIn.dataSize();
        alog << HexDump(cmds, mIn.dataSize()) << endl;
        while (cmds < end) cmds = printReturnCommand(alog, cmds);
        alog << dedent;
    }
    return NO_ERROR;
}

return err;

}


talkWithDriver方法中定义一个binder\_write\_read结构的成员变量,将writeTransactionData中设置写入数据的mOut封装至变量的写数据中,紧接着设置变量的读数据,并调用ioctl将数据传递至binder内核中;

binder\_ioctl
-------------

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;

/*pr_info("binder_ioctl: %d:%d %x %lx\n",
        proc->pid, current->pid, cmd, arg);*/

binder_selftest_alloc(&proc->alloc);

trace_binder_ioctl(cmd, arg);

ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
    goto err_unlocked;

thread = binder_get_thread(proc);
if (thread == NULL) {
    ret = -ENOMEM;
    goto err;
}

switch (cmd) {
//走这个分支
case BINDER_WRITE_READ:
    ret = binder_ioctl_write_read(filp, cmd, arg, thread);
    if (ret)
        goto err;
    break;
    //省略其余case分支
    ......
}
ret = 0;

err:
if (thread)
thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info(“%d:%d ioctl %x %lx returned %d\n”, proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
trace_binder_ioctl_done(ret);
return ret;
}


在用户层调用ioctl最终会调用到linux内核中的binder\_ioctl方法中;

1.  根据filp获取binder\_proc,binder\_proc为调用ioctl的进程,并且根据proc获取调用线程;
2.  代码进入此方法中switch分支中的BINDER\_WRITE\_READ分支中并调用binder\_ioctl\_write\_read;

static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;

if (size != sizeof(struct binder_write_read)) {
    ret = -EINVAL;
    goto out;
}
//将用户态的数据拷贝至内核中
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
    ret = -EFAULT;
    goto out;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
         "%d:%d write %lld at %016llx, read %lld at %016llx\n",
         proc->pid, thread->pid,
         (u64)bwr.write_size, (u64)bwr.write_buffer,
         (u64)bwr.read_size, (u64)bwr.read_buffer);

//写入数据不为空,进入if分支
if (bwr.write_size > 0) {
    ret = binder_thread_write(proc, thread,
                  bwr.write_buffer,
                  bwr.write_size,
                  &bwr.write_consumed);
    trace_binder_write_done(ret);
    if (ret < 0) {
        bwr.read_consumed = 0;
        if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
            ret = -EFAULT;
        goto out;
    }
}

//省略无关代码
......

}


进入此方法后,先处理写入的数据,调用binder\_thread\_write方法

static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;

while (ptr < end && thread->return_error.cmd == BR_OK) {
    int ret;

    if (get_user(cmd, (uint32_t __user *)ptr))
        return -EFAULT;
    ptr += sizeof(uint32_t);
    trace_binder_command(cmd);
    if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
        atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
        atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
        atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
    }

    switch (cmd) {
    
    //进入此分支
    case BC_TRANSACTION:
    case BC_REPLY: {
        struct binder_transaction_data tr;
        //将用户态的数据拷贝并转换为 binder_transaction_data
        if (copy_from_user(&tr, ptr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);
        binder_transaction(proc, thread, &tr,
                   cmd == BC_REPLY, 0);
        break;
    }
    
    //省略其余case分支
    ......
    }
    *consumed = ptr - buffer;
}
return 0;

}


从数据中解析出cmd命令,并进入相应的分支执行处理方法

static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
int ret;
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t buffer_offset = 0;
binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
binder_size_t sg_buf_offset, sg_buf_end_offset;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error = 0;
uint32_t return_error_param = 0;
uint32_t return_error_line = 0;
binder_size_t last_fixup_obj_off = 0;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;

e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
e->context_name = proc->context->name;

if (reply) {
    ......
} else {//走这个分支
    //handle为0,走else分支
    if (tr->target.handle) {
        ......
    } else {
        mutex_lock(&context->context_mgr_node_lock);
        //获取ServiceManager对应的binder_node
        target_node = context->binder_context_mgr_node;
        if (target_node)
            //根据binder_node获取响应的binder_proc
            target_node = binder_get_node_refs_for_txn(
                    target_node, &target_proc,
                    &return_error);
        else
            return_error = BR_DEAD_REPLY;
        mutex_unlock(&context->context_mgr_node_lock);
        if (target_node && target_proc == proc) {
            binder_user_error("%d:%d got transaction to context manager from process owning it\n",
                      proc->pid, thread->pid);
            return_error = BR_FAILED_REPLY;
            return_error_param = -EINVAL;
            return_error_line = __LINE__;
            goto err_invalid_target_handle;
        }
    }
    if (!target_node) {
        return_error_param = -EINVAL;
        return_error_line = __LINE__;
        goto err_dead_binder;
    }
    e->to_node = target_node->debug_id;
    if (security_binder_transaction(proc->tsk,
                    target_proc->tsk) < 0) {
        return_error = BR_FAILED_REPLY;
        return_error_param = -EPERM;
        return_error_line = __LINE__;
        goto err_invalid_target_handle;
    }
    binder_inner_proc_lock(proc);
    if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
       ......
    }
    binder_inner_proc_unlock(proc);
}

//先省略后续代码
......

static struct binder_node *binder_get_node_refs_for_txn(
struct binder_node *node,
struct binder_proc **procp,
uint32_t *error)
{
struct binder_node *target_node = NULL;

binder_node_inner_lock(node);
if (node->proc) {
    target_node = node;
    binder_inc_node_nilocked(node, 1, 0, NULL);
    binder_inc_node_tmpref_ilocked(node);
    node->proc->tmp_ref++;
    *procp = node->proc;
} else
    *error = BR_DEAD_REPLY;
binder_node_inner_unlock(node);

return target_node;

}


binder\_get\_node\_refs\_for\_txn主要从binder\_node中获取binder\_proc并返回

接下来继续看binder\_transaction

static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{

if (target_thread)
    e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;

/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
    return_error = BR_FAILED_REPLY;
    return_error_param = -ENOMEM;
    return_error_line = __LINE__;
    goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
spin_lock_init(&t->lock);

tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
    return_error = BR_FAILED_REPLY;
    return_error_param = -ENOMEM;
    return_error_line = __LINE__;
    goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

t->debug_id = t_debug_id;

if (reply)
    binder_debug(BINDER_DEBUG_TRANSACTION,
             "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
             proc->pid, thread->pid, t->debug_id,
             target_proc->pid, target_thread->pid,
             (u64)tr->data.ptr.buffer,
             (u64)tr->data.ptr.offsets,
             (u64)tr->data_size, (u64)tr->offsets_size,
             (u64)extra_buffers_size);
else
    binder_debug(BINDER_DEBUG_TRANSACTION,
             "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
             proc->pid, thread->pid, t->debug_id,
             target_proc->pid, target_node->debug_id,
             (u64)tr->data.ptr.buffer,
             (u64)tr->data.ptr.offsets,
             (u64)tr->data_size, (u64)tr->offsets_size,
             (u64)extra_buffers_size);

if (!reply && !(tr->flags & TF_ONE_WAY))
    t->from = thread;
else
    t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
if (!(t->flags & TF_ONE_WAY) &&
    binder_supported_policy(current->policy)) {
    /* Inherit supported policies for synchronous transactions */
    t->priority.sched_policy = current->policy;
    t->priority.prio = current->normal_prio;
} else {
    /* Otherwise, fall back to the default priority */
    t->priority = target_proc->default_priority;
}

if (target_node && target_node->txn_security_ctx) {
    u32 secid;

    security_task_getsecid(proc->tsk, &secid);
    ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
    if (ret) {
        return_error = BR_FAILED_REPLY;
        return_error_param = ret;
        return_error_line = __LINE__;
        goto err_get_secctx_failed;
    }
    extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
}

trace_binder_transaction(reply, t, target_node);

t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
    tr->offsets_size, extra_buffers_size,
    !reply && (t->flags & TF_ONE_WAY));
if (IS_ERR(t->buffer)) {
    /*
     * -ESRCH indicates VMA cleared. The target is dying.
     */
    return_error_param = PTR_ERR(t->buffer);
    return_error = return_error_param == -ESRCH ?
        BR_DEAD_REPLY : BR_FAILED_REPLY;
    return_error_line = __LINE__;
    t->buffer = NULL;
    goto err_binder_alloc_buf_failed;
}
if (secctx) {
    size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
                ALIGN(tr->offsets_size, sizeof(void *)) +
                ALIGN(extra_buffers_size, sizeof(void *)) -
                ALIGN(secctx_sz, sizeof(u64));

    t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
    binder_alloc_copy_to_buffer(&target_proc->alloc,
                    t->buffer, buf_offset,
                    secctx, secctx_sz);
    security_release_secctx(secctx, secctx_sz);
    secctx = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);

}


上述代码是设置了一个binder\_transaction结构体的数据,并且为结构体中的buffer分配空间,接下来看一下buffer分配空间的具体代码

struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
{
struct binder_buffer *buffer;

mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
                     extra_buffers_size, is_async);
mutex_unlock(&alloc->mutex);
return buffer;

}

static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
size_t buffer_size;
struct rb_node *best_fit = NULL;
void __user *has_page_addr;
void __user *end_page_addr;
size_t size, data_offsets_size;
int ret;

if (!binder_alloc_get_vma(alloc)) {
    pr_err("%d: binder_alloc_buf, no vma\n",
           alloc->pid);
    return ERR_PTR(-ESRCH);
}

data_offsets_size = ALIGN(data_size, sizeof(void *)) +
    ALIGN(offsets_size, sizeof(void *));

if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
    binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
            "%d: got transaction with invalid size %zd-%zd\n",
            alloc->pid, data_size, offsets_size);
    return ERR_PTR(-EINVAL);
}
size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
if (size < data_offsets_size || size < extra_buffers_size) {
    binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
            "%d: got transaction with invalid extra_buffers_size %zd\n",
            alloc->pid, extra_buffers_size);
    return ERR_PTR(-EINVAL);
}
if (is_async &&
    alloc->free_async_space < size + sizeof(struct binder_buffer)) {
    binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
             "%d: binder_alloc_buf size %zd failed, no async space left\n",
              alloc->pid, size);
    return ERR_PTR(-ENOSPC);
}

/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));

//根据binder_proc中的free_buffer红黑树找寻最佳的空间
while (n) {
    buffer = rb_entry(n, struct binder_buffer, rb_node);
    BUG_ON(!buffer->free);
    buffer_size = binder_alloc_buffer_size(alloc, buffer);

    if (size < buffer_size) {
        best_fit = n;
        n = n->rb_left;
    } else if (size > buffer_size)
        n = n->rb_right;
    else {
        best_fit = n;
        break;
    }
}

if (best_fit == NULL) {
    size_t allocated_buffers = 0;
    size_t largest_alloc_size = 0;
    size_t total_alloc_size = 0;
    size_t free_buffers = 0;
    size_t largest_free_size = 0;
    size_t total_free_size = 0;

    for (n = rb_first(&alloc->allocated_buffers); n != NULL;
         n = rb_next(n)) {
        buffer = rb_entry(n, struct binder_buffer, rb_node);
        buffer_size = binder_alloc_buffer_size(alloc, buffer);
        allocated_buffers++;
        total_alloc_size += buffer_size;
        if (buffer_size > largest_alloc_size)
            largest_alloc_size = buffer_size;
    }
    for (n = rb_first(&alloc->free_buffers); n != NULL;
         n = rb_next(n)) {
        buffer = rb_entry(n, struct binder_buffer, rb_node);
        buffer_size = binder_alloc_buffer_size(alloc, buffer);
        free_buffers++;
        total_free_size += buffer_size;
        if (buffer_size > largest_free_size)
            largest_free_size = buffer_size;
    }
    pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
        alloc->pid, size);
    pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
           total_alloc_size, allocated_buffers, largest_alloc_size,
           total_free_size, free_buffers, largest_free_size);
    return ERR_PTR(-ENOSPC);
}
if (n == NULL) {
    //根据选中的rb_node获取对应的binder_buffer
    buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
    buffer_size = binder_alloc_buffer_size(alloc, buffer);
}

binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
         "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
          alloc->pid, size, buffer, buffer_size);

has_page_addr = (void __user *)
    (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
WARN_ON(n && buffer_size != size);
end_page_addr =
    (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
if (end_page_addr > has_page_addr)
    end_page_addr = has_page_addr;
    
//根据选中的节点分配内存
ret = binder_update_page_range(alloc, 1, (void __user *)
    PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
if (ret)
    return ERR_PTR(ret);

//若内存没有用完,则切分剩余内存,将剩余内存添加至binder_alloc->free_buffer
if (buffer_size != size) {
    struct binder_buffer *new_buffer;

    new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
    if (!new_buffer) {
        pr_err("%s: %d failed to alloc new buffer struct\n",
               __func__, alloc->pid);
        goto err_alloc_buf_struct_failed;
    }
    new_buffer->user_data = (u8 __user *)buffer->user_data + size;
    list_add(&new_buffer->entry, &buffer->entry);
    new_buffer->free = 1;
    binder_insert_free_buffer(alloc, new_buffer);
}

rb_erase(best_fit, &alloc->free_buffers);
//将选中的节点buffer添加至allocated_buffer总,并标记为已分配
buffer->free = 0;
buffer->allow_user_free = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
         "%d: binder_alloc_buf size %zd got %pK\n",
          alloc->pid, size, buffer);
//上述做了内存的切分,这里则重新标记内存大小
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
if (is_async) {
    alloc->free_async_space -= size + sizeof(struct binder_buffer);
    binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
             "%d: binder_alloc_buf size %zd async free %zd\n",
              alloc->pid, size, alloc->free_async_space);
}
return buffer;

err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0, (void __user *)
PAGE_ALIGN((uintptr_t)buffer->user_data),
end_page_addr);
return ERR_PTR(-ENOMEM);
}


上述代码的整体流程便是在binder\_alloc中的free\_buffer红黑树中找寻与所需内存大小差不多的节点,并且根据选中的节点分配相应的内存空间,若选中的节点大小大于所需要的大小,则作内存切分;

其中代码调用binder\_update\_page\_range方法做具体的内存分配

static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
void __user *start, void __user *end)
{
void __user *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = NULL;
bool need_mm = false;

binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
	     "%d: %s pages %pK-%pK\n", alloc->pid,
	     allocate ? "allocate" : "free", start, end);

if (end <= start)
	return 0;

trace_binder_update_page_range(alloc, allocate, start, end);

if (allocate == 0)
	goto free_range;

for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
	page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
	if (!page->page_ptr) {
		need_mm = true;
		break;
	}
}

if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
	mm = alloc->vma_vm_mm;

if (mm) {
	down_read(&mm->mmap_sem);
	vma = alloc->vma;
}

if (!vma && need_mm) {
	pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
		alloc->pid);
	goto err_no_vma;
}

for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
	int ret;
	bool on_lru;
	size_t index;

	index = (page_addr - alloc->buffer) / PAGE_SIZE;
	page = &alloc->pages[index];
            //若不为空,则说明已经分配过页
	if (page->page_ptr) {
		trace_binder_alloc_lru_start(alloc, index);

		on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
		WARN_ON(!on_lru);

		trace_binder_alloc_lru_end(alloc, index);
		continue;
	}

	if (WARN_ON(!vma))
		goto err_page_ptr_cleared;

	trace_binder_alloc_page_start(alloc, index);
            //分配一个页的物理内存
	page->page_ptr = alloc_page(GFP_KERNEL |
				    __GFP_HIGHMEM |
				    __GFP_ZERO);
	if (!page->page_ptr) {
		pr_err("%d: binder_alloc_buf failed for page at %pK\n",
			alloc->pid, page_addr);
		goto err_alloc_page_failed;
	}
	page->alloc = alloc;
	INIT_LIST_HEAD(&page->lru);

	user_page_addr = (uintptr_t)page_addr;
            //将物理内存映射到目标用户的虚拟内存空间
	ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
	if (ret) {
		pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
		       alloc->pid, user_page_addr);
		goto err_vm_insert_page_failed;
	}

	if (index + 1 > alloc->pages_high)
		alloc->pages_high = index + 1;

	trace_binder_alloc_page_end(alloc, index);
	/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
	up_read(&mm->mmap_sem);
	mmput(mm);
}
return 0;

free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
bool ret;
size_t index;

	index = (page_addr - alloc->buffer) / PAGE_SIZE;
	page = &alloc->pages[index];

	trace_binder_free_lru_start(alloc, index);

	ret = list_lru_add(&binder_alloc_lru, &page->lru);
	WARN_ON(!ret);

	trace_binder_free_lru_end(alloc, index);
	continue;

err_vm_insert_page_failed:
__free_page(page->page_ptr);
page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
;
}
err_no_vma:
if (mm) {
up_read(&mm->mmap_sem);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}


上述代码先是分配了页的物理内存,然后将物理内存与目标的虚拟内存及内核的虚拟内存做映射,这样子内核空间与目标空间就有了映射关系;

然后继续回到binder\_transaction的代码中

static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{

//下述代码中两个binder_alloc_copy_user_to_buffer()方法,将用户态数据拷贝至内核态

//将用户态中的tr->data.ptr.buffer数据拷贝至内核态,主要拷贝数据为用户层传过来的parcel中的数据
if (binder_alloc_copy_user_to_buffer(
            &target_proc->alloc,
            t->buffer, 0,
            (const void __user *)
                (uintptr_t)tr->data.ptr.buffer,
            tr->data_size)) {
    binder_user_error("%d:%d got transaction with invalid data ptr\n",
            proc->pid, thread->pid);
    return_error = BR_FAILED_REPLY;
    return_error_param = -EFAULT;
    return_error_line = __LINE__;
    goto err_copy_data_failed;
}

//tr->data.ptr.offsets便是用户层传过来的parcel中binder的偏移量
if (binder_alloc_copy_user_to_buffer(
            &target_proc->alloc,
            t->buffer,
            ALIGN(tr->data_size, sizeof(void *)),
            (const void __user *)
                (uintptr_t)tr->data.ptr.offsets,
            tr->offsets_size)) {
    binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
            proc->pid, thread->pid);
    return_error = BR_FAILED_REPLY;
    return_error_param = -EFAULT;
    return_error_line = __LINE__;
    goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
    binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
            proc->pid, thread->pid, (u64)tr->offsets_size);
    return_error = BR_FAILED_REPLY;
    return_error_param = -EINVAL;
    return_error_line = __LINE__;
    goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
    binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
              proc->pid, thread->pid,
              (u64)extra_buffers_size);
    return_error = BR_FAILED_REPLY;
    return_error_param = -EINVAL;
    return_error_line = __LINE__;
    goto err_bad_offset;
}
off_start_offset = ALIGN(tr->data_size, sizeof(void *));
buffer_offset = off_start_offset;
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
off_min = 0;

//因为此篇章的流程中,并没有binder实例的传输,所以不走for循环
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
     buffer_offset += sizeof(binder_size_t)) {
     //省略部分代码
     ......
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;

if (reply) {
//省略部分代码
    ......
} else if (!(t->flags & TF_ONE_WAY)) {//进入此分支
    BUG_ON(t->buffer->async_transaction != 0);
    binder_inner_proc_lock(proc);
    /*
     * Defer the TRANSACTION_COMPLETE, so we don't return to
     * userspace immediately; this allows the target process to
     * immediately start processing this transaction, reducing
     * latency. We will then return the TRANSACTION_COMPLETE when
     * the target replies (or there is an error).
     */
    binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
    t->need_reply = 1;
    t->from_parent = thread->transaction_stack;
    thread->transaction_stack = t;
    binder_inner_proc_unlock(proc);
    //将任务插入目标进程的待执行队列中
    if (!binder_proc_transaction(t, target_proc, target_thread)) {
        binder_inner_proc_lock(proc);
        binder_pop_transaction_ilocked(thread, t);
        binder_inner_proc_unlock(proc);
        goto err_dead_proc_or_thread;
    }
} else {
    //省略部分代码
    ......
}

if (target_thread)
    binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
if (target_node)
    binder_dec_node_tmpref(target_node);
/*
 * write barrier to synchronize with initialization
 * of log entry
 */
smp_wmb();
WRITE_ONCE(e->debug_id_done, t_debug_id);
return;

}


上述代码将用户态的数据拷贝至内核态,然后调用binder\_proc\_transaction方法将任务插入目标进程的待执行队列中

static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
struct binder_node *node = t->buffer->target_node;
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;

BUG_ON(!node);
binder_node_lock(node);
node_prio.prio = node->min_priority;
node_prio.sched_policy = node->sched_policy;

if (oneway) {
    BUG_ON(thread);
    if (node->has_async_transaction) {
        pending_async = true;
    } else {
        node->has_async_transaction = true;
    }
}

binder_inner_proc_lock(proc);

if (proc->is_dead || (thread && thread->is_dead)) {
    binder_inner_proc_unlock(proc);
    binder_node_unlock(node);
    return false;
}

//在目标进程的wait_threads等待线程列表中选择一个线程
if (!thread && !pending_async)
    thread = binder_select_thread_ilocked(proc);

if (thread) {
    binder_transaction_priority(thread->task, t, node_prio,
                    node->inherit_rt);
    //将内核binder_work加入到线程的待执行队列中
    binder_enqueue_thread_work_ilocked(thread, &t->work);
} else if (!pending_async) {
    binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
    binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
//唤醒目标进程(ServiceManager)
if (!pending_async)
    binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);

binder_inner_proc_unlock(proc);
binder_node_unlock(node);

return true;

}


binder\_proc\_transaction方法主要做了三件事:

1.  调用binder\_select\_thread\_ilocked方法在目标进程的等待线程队列中获取一个线程
2.  调用binder\_enqueue\_thread\_work\_ilocked方法将内核中的binder\_work添加在目标进程的线程中的待执行队列中
3.  调用binder\_wakeup\_thread\_ilocked方法唤醒目标进程

以下是调用三个方法的源码:

//在目标进程的wait_threads中获取一个thread
static struct binder_thread *binder_select_thread_ilocked(struct binder_proc *proc)
{
struct binder_thread *thread;

assert_spin_locked(&proc->inner_lock);
thread = list_first_entry_or_null(&proc->waiting_threads,
				  struct binder_thread,
				  waiting_thread_node);

if (thread)
	list_del_init(&thread->waiting_thread_node);

return thread;

}

//将内核的binder_work添加至thread中的待执行队列
static void binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
//添加
binder_enqueue_work_ilocked(work, &thread->todo);
thread->process_todo = true;
}

static void binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list)
{
BUG_ON(target_list == NULL);
BUG_ON(work->entry.next && !list_empty(&work->entry));
list_add_tail(&work->entry, target_list);
}

//唤醒目标进程
static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
struct binder_thread *thread,
bool sync)
{
assert_spin_locked(&proc->inner_lock);

if (thread) {//代码走这里
	if (sync)
		wake_up_interruptible_sync(&thread->wait);
	else
		wake_up_interruptible(&thread->wait);
	return;
}
binder_wakeup_poll_threads_ilocked(proc, sync);

}


执行完毕上述的代码,内核binder\_ioctl\_write\_read中,并且ServiceManager被唤醒,执行binder\_loop中的处理方法

static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{

    //省略部分代码
    ......

//binder_transaction方法执行完毕后,代码将会继续执行
    //将会检查读数据的大小,并执行binder_thread_read方法
if (bwr.read_size > 0) {
	ret = binder_thread_read(proc, thread, bwr.read_buffer,
				 bwr.read_size,
				 &bwr.read_consumed,
				 filp->f_flags & O_NONBLOCK);
	trace_binder_read_done(ret);
	binder_inner_proc_lock(proc);
	if (!binder_worklist_empty_ilocked(&proc->todo))
		binder_wakeup_proc_ilocked(proc);
	binder_inner_proc_unlock(proc);
	if (ret < 0) {
		if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
			ret = -EFAULT;
		goto out;
	}
}
    
......

}

static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;

int ret = 0;
int wait_for_proc_work;

if (*consumed == 0) {
	if (put_user(BR_NOOP, (uint32_t __user *)ptr))
		return -EFAULT;
	ptr += sizeof(uint32_t);
}

retry:
binder_inner_proc_lock(proc);
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
binder_inner_proc_unlock(proc);

thread->looper |= BINDER_LOOPER_STATE_WAITING;

trace_binder_wait_for_work(wait_for_proc_work,
			   !!thread->transaction_stack,
			   !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
	if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
				BINDER_LOOPER_STATE_ENTERED))) {
		binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
			proc->pid, thread->pid, thread->looper);
		wait_event_interruptible(binder_user_error_wait,
					 binder_stop_on_user_error < 2);
	}
	binder_restore_priority(current, proc->default_priority);
}

if (non_block) {
	if (!binder_has_work(thread, wait_for_proc_work))
		ret = -EAGAIN;
} else {
            //代码将在这里被阻塞
	ret = binder_wait_for_work(thread, wait_for_proc_work);
}

}


binder\_loop
------------

内核中代码会在binder\_thread\_read中被阻塞,同时ServiceManager在binder\_loop中被唤醒

void binder_loop(struct binder_state *bs, binder_handler func)

{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];

bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));

for (;;) {
    bwr.read_size = sizeof(readbuf);
    bwr.read_consumed = 0;
    bwr.read_buffer = (uintptr_t) readbuf;

    //代码从这里被唤醒
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

    if (res < 0) {
        ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
        break;
    }
    
    //解析收到的reply数据
    res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
    if (res == 0) {
        ALOGE("binder_loop: unexpected reply?!\n");
        break;
    }
    if (res < 0) {
        ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
        break;
    }
}

}

int binder_parse(struct binder_state *bs, struct binder_io *bio, uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;

while (ptr < end) {
    uint32_t cmd = *(uint32_t *) ptr;
    ptr += sizeof(uint32_t);

#if TRACE
fprintf(stderr,“%s:\n”, cmd_name(cmd));
#endif
switch(cmd) {
//代码走这里
case BR_TRANSACTION_SEC_CTX:
case BR_TRANSACTION: {
struct binder_transaction_data_secctx txn;
if (cmd == BR_TRANSACTION_SEC_CTX) {
if ((end - ptr) < sizeof(struct binder_transaction_data_secctx)) {
ALOGE(“parse: txn too small (binder_transaction_data_secctx)!\n”);
return -1;
}
memcpy(&txn, (void*) ptr, sizeof(struct binder_transaction_data_secctx));
ptr += sizeof(struct binder_transaction_data_secctx);
} else /* BR_TRANSACTION / {
if ((end - ptr) < sizeof(struct binder_transaction_data)) {
ALOGE(“parse: txn too small (binder_transaction_data)!\n”);
return -1;
}
memcpy(&txn.transaction_data, (void
) ptr, sizeof(struct binder_transaction_data));
ptr += sizeof(struct binder_transaction_data);

            txn.secctx = 0;
        }

        binder_dump_txn(&txn.transaction_data);
        if (func) {
            unsigned rdata[256/4];
            struct binder_io msg;
            struct binder_io reply;
            int res;

            bio_init(&reply, rdata, sizeof(rdata), 4);
            bio_init_from_txn(&msg, &txn.transaction_data);
            //调用具体的处理方法
            res = func(bs, &txn, &msg, &reply);
            if (txn.transaction_data.flags & TF_ONE_WAY) {
                binder_free_buffer(bs, txn.transaction_data.data.ptr.buffer);
            } else {
                binder_send_reply(bs, &reply, txn.transaction_data.data.ptr.buffer, res);
            }
        }
        break;
    }
    //省略部分case分支
    ......
}

return r;

}


ServiceManager在初始化时会进入binder\_loop循环等待消息,当收到获取服务的消息时,代码被唤醒,然后继续执行binder\_parse方法对数据进行解析并处理;

因为内核传的数据为

### binder\_parse

根据上述代码,调用binder\_parse会传入5个参数

1.  bs:代表了binder驱动
2.  readbuf: 从内核接收的数据
3.  bwr.read\_consumed:读数据的消耗量
4.  func: svcmgr\_handler方法

在binder\_parse方法中我们首先获取到内核传回来的cmd,这里值为BR\_TRANSACTION;在这个分支中获取数据,并且调用func方法做具体逻辑的处理;

svcmgr\_handler
---------------

接下来代码会走到ServiceManager的svcmgr\_handler中,这个方法处理最终的获取服务逻辑(部分代码已省略)

int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data_secctx *txn_secctx,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
uint32_t dumpsys_priority;

struct binder_transaction_data *txn = &txn_secctx->transaction_data;

if (txn->target.ptr != BINDER_SERVICE_MANAGER)
    return -1;

if (txn->code == PING_TRANSACTION)
    return 0;

strict_policy = bio_get_uint32(msg);
bio_get_uint32(msg);  // Ignore worksource header.
s = bio_get_string16(msg, &len);
if (s == NULL) {
    return -1;
}

if ((len != (sizeof(svcmgr_id) / 2)) ||
    memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
    fprintf(stderr,"invalid id %s\n", str8(s, len));
    return -1;
}

if (sehandle && selinux_status_updated() > 0) {

#ifdef VENDORSERVICEMANAGER
struct selabel_handle *tmp_sehandle = selinux_android_vendor_service_context_handle();
#else
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
#endif
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}

switch(txn->code) {
//代码进入这个分支
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
    //获取服务名称的地址
    s = bio_get_string16(msg, &len);
    if (s == NULL) {
        return -1;
    }
   
    handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid,
                             (const char*) txn_secctx->secctx);
    if (!handle)
        break;
    bio_put_ref(reply, handle);
    return 0;
}
bio_put_uint32(reply, 0);
return 0;

}

uint32_t do_find_service(const uint16_t s, size_t len, uid_t uid, pid_t spid, const char sid)
{
struct svcinfo *si = find_svc(s, len);

if (!si || !si->handle) {
    return 0;
}

if (!si->allow_isolated) {
    uid_t appid = uid % AID_USER;
    if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
        return 0;
    }
}

if (!svc_can_find(s, len, spid, sid, uid)) {
    return 0;
}

return si->handle;

}

struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
struct svcinfo *si;

for (si = svclist; si; si = si->next) {
    if ((len == si->len) &&
        !memcmp(s16, si->name, len * sizeof(uint16_t))) {
        return si;
    }
}
return NULL;

}


svcmgr\_handler是ServiceManager在处理binder\_loop中收到的消息,在获取服务端binder时switch进入SVC\_MGR\_CHECK\_SERVICE分支;

调用do\_find\_service查找与名称对应的服务,返回值是服务binder对应的句柄;方法中调用find\_svc方法查找对应的服务信息svcInfo;

find\_svc方法遍历循环svclist,svclist的服务在do\_add\_service中添加;

查找到svcinfo后,将其中的handle句柄添加至reply中,代码返回至binder\_parse中,接着继续执行代码binder\_send\_reply方法,将查到的数据发送至内核;

binder\_send\_reply
-------------------

void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} attribute((packed)) data;

data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
    data.txn.flags = TF_STATUS_CODE;
    data.txn.data_size = sizeof(int);
    data.txn.offsets_size = 0;
    data.txn.data.ptr.buffer = (uintptr_t)&status;
    data.txn.data.ptr.offsets = 0;
} else {
    data.txn.flags = 0;
    data.txn.data_size = reply->data - reply->data0;
    data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
    data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
    data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data));

}

int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;

bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;发送至内核
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
    fprintf(stderr,"binder_write: ioctl failed (%s)\n",
            strerror(errno));
}
return res;

}


binder\_send\_reply方法的实现很简单,定义了一个struct data,然后将reply等数据封装进这个结构体中;紧接着调用binder\_write,在binder\_write中封装binder\_write\_read结构数据,并且调用ioctl将数据发送至内核

紧接着继续走到binder\_ioctl中,然后调用binder\_ioctl\_write\_read方法,最后调用binder\_transaction方法,其中前两个方法和上述流程一样,binder\_transaction方法稍有不同

//此方法和前边代码稍有不同的地方在于 reply这个参数,签名reply为0,此处不为0,则所有分支都应走if(reply)
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
//生命局部变量的代码和之前讲过的流程一致

if (reply) { //现在代码走这里
    binder_inner_proc_lock(proc);
    in_reply_to = thread->transaction_stack;
    if (in_reply_to == NULL) {
        binder_inner_proc_unlock(proc);
        binder_user_error("%d:%d got reply transaction with no transaction stack\n",
                  proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        return_error_param = -EPROTO;
        return_error_line = __LINE__;
        goto err_empty_call_stack;
    }
    if (in_reply_to->to_thread != thread) {
        spin_lock(&in_reply_to->lock);
        binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
            proc->pid, thread->pid, in_reply_to->debug_id,
            in_reply_to->to_proc ?
            in_reply_to->to_proc->pid : 0,
            in_reply_to->to_thread ?
            in_reply_to->to_thread->pid : 0);
        spin_unlock(&in_reply_to->lock);
        binder_inner_proc_unlock(proc);
        return_error = BR_FAILED_REPLY;
        return_error_param = -EPROTO;
        return_error_line = __LINE__;
        in_reply_to = NULL;
        goto err_bad_call_stack;
    }
    thread->transaction_stack = in_reply_to->to_parent;
    binder_inner_proc_unlock(proc);
    
    //获取回复的目标线程
    target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
    if (target_thread == NULL) {
        return_error = BR_DEAD_REPLY;
        return_error_line = __LINE__;
        goto err_dead_binder;
    }
    if (target_thread->transaction_stack != in_reply_to) {
        binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
            proc->pid, thread->pid,
            target_thread->transaction_stack ?
            target_thread->transaction_stack->debug_id : 0,
            in_reply_to->debug_id);
        binder_inner_proc_unlock(target_thread->proc);
        return_error = BR_FAILED_REPLY;
        return_error_param = -EPROTO;
        return_error_line = __LINE__;
        in_reply_to = NULL;
        target_thread = NULL;
        goto err_dead_binder;
    }
    //回复的目标进程
    target_proc = target_thread->proc;
    target_proc->tmp_ref++;
    binder_inner_proc_unlock(target_thread->proc);
} else {
    ......
}


//下面部分代码和之前的流程一致,初始化了一个内核的数据流,用来将ServiceManager中的数据拷贝至此
if (target_thread)
    e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;

/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
    return_error = BR_FAILED_REPLY;
    return_error_param = -ENOMEM;
    return_error_line = __LINE__;
    goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
spin_lock_init(&t->lock);

tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
    return_error = BR_FAILED_REPLY;
    return_error_param = -ENOMEM;
    reurn_error_line = __LINE__;
    goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

t->debug_id = t_debug_id;

if (reply)
    binder_debug(BINDER_DEBUG_TRANSACTION,
             "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
             proc->pid, thread->pid, t->debug_id,
             target_proc->pid, target_thread->pid,
             (u64)tr->data.ptr.buffer,
             (u64)tr->data.ptr.offsets,
             (u64)tr->data_size, (u64)tr->offsets_size,
             (u64)extra_buffers_size);
else
    .......
    

if (!reply && !(tr->flags & TF_ONE_WAY))

    t->from = thread;

else

    t->from = NULL;

//设置内核数据
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
if (!(t->flags & TF_ONE_WAY) &&
    binder_supported_policy(current->policy)) {
    /* Inherit supported policies for synchronous transactions */
    t->priority.sched_policy = current->policy;
    t->priority.prio = current->normal_prio;
} else {
    /* Otherwise, fall back to the default priority */
    t->priority = target_proc->default_priority;
}


if (target_node && target_node->txn_security_ctx) {
    u32 secid;
    
    security_task_getsecid(proc->tsk, &secid);
    ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
    if (ret) {
        return_error = BR_FAILED_REPLY;
        return_error_param = ret;
        return_error_line = __LINE__;
        goto err_get_secctx_failed;
    }
    extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
}

trace_binder_transaction(reply, t, target_node);

//为内核buffer分配空间,并做映射(和之前流程一致)
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
    tr->offsets_size, extra_buffers_size,
    !reply && (t->flags & TF_ONE_WAY));
if (IS_ERR(t->buffer)) {
    /*
     * -ESRCH indicates VMA cleared. The target is dying.
     */
    return_error_param = PTR_ERR(t->buffer);
    return_error = return_error_param == -ESRCH ?
        BR_DEAD_REPLY : BR_FAILED_REPLY;
    return_error_line = __LINE__;
    t->buffer = NULL;
    goto err_binder_alloc_buf_failed;
}
if (secctx) {
    size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
                ALIGN(tr->offsets_size, sizeof(void *)) +
                ALIGN(extra_buffers_size, sizeof(void *)) -
                ALIGN(secctx_sz, sizeof(u64));

    t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
    binder_alloc_copy_to_buffer(&target_proc->alloc,
                    t->buffer, buf_offset,
                    secctx, secctx_sz);
    security_release_secctx(secctx, secctx_sz);
    secctx = NULL;
}

t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);

//将ServiceManager数据拷贝至内核
if (binder_alloc_copy_user_to_buffer(
            &target_proc->alloc,
            t->buffer, 0,
            (const void __user *)
                (uintptr_t)tr->data.ptr.buffer,
            tr->data_size)) {
    binder_user_error("%d:%d got transaction with invalid data ptr\n",
            proc->pid, thread->pid);
    return_error = BR_FAILED_REPLY;
    return_error_param = -EFAULT;
    return_error_line = __LINE__;
    goto err_copy_data_failed;
}
//将ServiceManager数据拷贝至内核
if (binder_alloc_copy_user_to_buffer(
            &target_proc->alloc,
            t->buffer,
            ALIGN(tr->data_size, sizeof(void *)),
            (const void __user *)
                (uintptr_t)tr->data.ptr.offsets,
            tr->offsets_size)) {
    binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
            proc->pid, thread->pid);
    return_error = BR_FAILED_REPLY;
    return_error_param = -EFAULT;
    return_error_line = __LINE__;
    goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
    binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
            proc->pid, thread->pid, (u64)tr->offsets_size);
    return_error = BR_FAILED_REPLY;
    return_error_param = -EINVAL;
    return_error_line = __LINE__;
    goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
    binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
              proc->pid, thread->pid,
              (u64)extra_buffers_size);
    return_error = BR_FAILED_REPLY;
    return_error_param = -EINVAL;
    return_error_line = __LINE__;
    goto err_bad_offset;
}

off_start_offset = ALIGN(tr->data_size, sizeof(void *));
buffer_offset = off_start_offset;
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
off_min = 0;

//ServiceManager并没有传递任务binder类数据,所以不走for循环
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
     buffer_offset += sizeof(binder_size_t)) {
}

tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;

if (reply) {//走这个分支
    //此分支的目的依旧时插入binder_work至目标线程的待执行队列,然后唤醒目标进程
    binder_enqueue_thread_work(thread, tcomplete);
    binder_inner_proc_lock(target_proc);
    if (target_thread->is_dead) {
        binder_inner_proc_unlock(target_proc);
        goto err_dead_proc_or_thread;
    }
    BUG_ON(t->buffer->async_transaction != 0);
    binder_pop_transaction_ilocked(target_thread, in_reply_to);
    binder_enqueue_thread_work_ilocked(target_thread, &t->work);
    binder_inner_proc_unlock(target_proc);
    wake_up_interruptible_sync(&target_thread->wait);
    binder_restore_priority(current, in_reply_to->saved_priority);
    binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
    ......
} else {
    ......
}
if (target_thread)
    binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
if (target_node)
    binder_dec_node_tmpref(target_node);
/*
 * write barrier to synchronize with initialization
 * of log entry
 */
smp_wmb();
WRITE_ONCE(e->debug_id_done, t_debug_id);
return;

}


上述代码执行完毕后,会进入到binder\_thread\_read->binder\_thread\_read方法中并且被阻塞。

接下来代码会在客户端获取binder时的binder\_ioctl->binder\_thread\_read->binder\_thread\_read方法中被唤醒

static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;

int ret = 0;
int wait_for_proc_work;

if (*consumed == 0) {
    if (put_user(BR_NOOP, (uint32_t __user *)ptr))
        return -EFAULT;
    ptr += sizeof(uint32_t);
}

retry:
binder_inner_proc_lock(proc);
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
binder_inner_proc_unlock(proc);

thread->looper |= BINDER_LOOPER_STATE_WAITING;
trace_binder_wait_for_work(wait_for_proc_work,
               !!thread->transaction_stack,
               !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
    if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
                BINDER_LOOPER_STATE_ENTERED))) {
        binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
            proc->pid, thread->pid, thread->looper);
        wait_event_interruptible(binder_user_error_wait,
                     binder_stop_on_user_error < 2);
    }
    binder_restore_priority(current, proc->default_priority);
}

if (non_block) {
    if (!binder_has_work(thread, wait_for_proc_work))
        ret = -EAGAIN;
} else {
    //代码从这里被唤醒
    ret = binder_wait_for_work(thread, wait_for_proc_work);
}

thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

if (ret)
    return ret;

while (1) {
    uint32_t cmd;
    struct binder_transaction_data_secctx tr;
    struct binder_transaction_data *trd = &tr.transaction_data;
    struct binder_work *w = NULL;
    struct list_head *list = NULL;
    struct binder_transaction *t = NULL;
    struct binder_thread *t_from;
    size_t trsize = sizeof(*trd);

    binder_inner_proc_lock(proc);
    //拿到待执行队列
    if (!binder_worklist_empty_ilocked(&thread->todo))
        list = &thread->todo;
    else if (!binder_worklist_empty_ilocked(&proc->todo) &&
           wait_for_proc_work)
        list = &proc->todo;
    else {
        binder_inner_proc_unlock(proc);

        /* no data added */
        if (ptr - buffer == 4 && !thread->looper_need_return)
            goto retry;
        break;
    }

    if (end - ptr < sizeof(tr) + 4) {
        binder_inner_proc_unlock(proc);
        break;
    }
    
    //根据待执行队列拿到binder_work,binder_work中带有传递过来的数据
    w = binder_dequeue_work_head_ilocked(list);
    if (binder_worklist_empty_ilocked(&thread->todo))
        thread->process_todo = false;

    switch (w->type) {
        case BINDER_WORK_TRANSACTION: {

            binder_inner_proc_unlock(proc);
            //根据binder_work获取传递过来的内核数据
            t = container_of(w, struct binder_transaction, work);

        } break;
    }

    if (!t)
        continue;

    BUG_ON(t->buffer == NULL);
    if (t->buffer->target_node) {
        ......
    } else {
        trd->target.ptr = 0;
        trd->cookie = 0;
        cmd = BR_REPLY;
    }
    trd->code = t->code;
    trd->flags = t->flags;
    trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);

    t_from = binder_get_txn_from(t);
    if (t_from) {
        struct task_struct *sender = t_from->proc->tsk;

        trd->sender_pid =
            task_tgid_nr_ns(sender,
                    task_active_pid_ns(current));
    } else {
        trd->sender_pid = 0;
    }

    //封装数据
    trd->data_size = t->buffer->data_size;
    trd->offsets_size = t->buffer->offsets_size;
    trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
    trd->data.ptr.offsets = trd->data.ptr.buffer +
                ALIGN(t->buffer->data_size,
                    sizeof(void *));

    tr.secctx = t->security_ctx;
    if (t->security_ctx) {
        cmd = BR_TRANSACTION_SEC_CTX;
        trsize = sizeof(tr);
    }
    //将命令码设置至用户态的数据流中
    if (put_user(cmd, (uint32_t __user *)ptr)) {
        if (t_from)
            binder_thread_dec_tmpref(t_from);

        binder_cleanup_transaction(t, "put_user failed",
                       BR_FAILED_REPLY);

        return -EFAULT;
    }
    ptr += sizeof(uint32_t);
    //将分装好的数据拷贝至用户态数据中
    if (copy_to_user(ptr, &tr, trsize)) {
        if (t_from)
            binder_thread_dec_tmpref(t_from);

        binder_cleanup_transaction(t, "copy_to_user failed",
                       BR_FAILED_REPLY);

        return -EFAULT;
    }
    ptr += trsize;

    trace_binder_transaction_received(t);
    binder_stat_br(proc, thread, cmd);
    binder_debug(BINDER_DEBUG_TRANSACTION,
             "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
             proc->pid, thread->pid,
             (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
            (cmd == BR_TRANSACTION_SEC_CTX) ?
                 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
             t->debug_id, t_from ? t_from->proc->pid : 0,
             t_from ? t_from->pid : 0, cmd,
             t->buffer->data_size, t->buffer->offsets_size,
             (u64)trd->data.ptr.buffer,
             (u64)trd->data.ptr.offsets);

    if (t_from)
        binder_thread_dec_tmpref(t_from);
    t->buffer->allow_user_free = 1;
    if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
        binder_inner_proc_lock(thread->proc);
        t->to_parent = thread->transaction_stack;
        t->to_thread = thread;
        thread->transaction_stack = t;
        binder_inner_proc_unlock(thread->proc);c

done:

*consumed = ptr - buffer;
binder_inner_proc_lock(proc);
if (proc->requested_threads == 0 &&
    list_empty(&thread->proc->waiting_threads) &&
    proc->requested_threads_started < proc->max_threads &&
    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
     /*spawn a new thread if we leave this out */) {
    proc->requested_threads++;
    binder_inner_proc_unlock(proc);
    binder_debug(BINDER_DEBUG_THREADS,
             "%d:%d BR_SPAWN_LOOPER\n",
             proc->pid, thread->pid);
    if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
        return -EFAULT;
    binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
} else
    binder_inner_proc_unlock(proc);
return 0;

}


上述代码是将获取的数据写入读数据中,执行完毕后,将会回到获取服务操作的用户态代码中

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}

binder_write_read bwr;

// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();

// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();

// This is what we'll read.
if (doReceive && needRead) {
    bwr.read_size = mIn.dataCapacity();
    bwr.read_buffer = (uintptr_t)mIn.data();
} else {
    bwr.read_size = 0;
    bwr.read_buffer = 0;
}

IF_LOG_COMMANDS() {
    TextOutput::Bundle _b(alog);
    if (outAvail != 0) {
        alog << "Sending commands to driver: " << indent;
        const void* cmds = (const void*)bwr.write_buffer;
        const void* end = ((const uint8_t*)cmds)+bwr.write_size;
        alog << HexDump(cmds, bwr.write_size) << endl;
        while (cmds < end) cmds = printCommand(alog, cmds);
        alog << dedent;
    }
    alog << "Size of receive buffer: " << bwr.read_size
        << ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}

if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
    IF_LOG_COMMANDS() {
        alog << "About to read/write, write size = " << mOut.dataSize() << endl;
    }

#if defined(ANDROID)
//代码在这里被唤醒
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);

IF_LOG_COMMANDS() {
    alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
        << bwr.write_consumed << " (of " << mOut.dataSize()
                    << "), read consumed: " << bwr.read_consumed << endl;
}

if (err >= NO_ERROR) {
    if (bwr.write_consumed > 0) {
        if (bwr.write_consumed < mOut.dataSize())
            mOut.remove(0, bwr.write_consumed);
        else {
            mOut.setDataSize(0);
            processPostWriteDerefs();
        }
    }
    //解析读到的数据,返回的数据将会在这里
    if (bwr.read_consumed > 0) {
        mIn.setDataSize(bwr.read_consumed);
        mIn.setDataPosition(0);
    }
    IF_LOG_COMMANDS() {
        TextOutput::Bundle _b(alog);
        alog << "Remaining data size: " << mOut.dataSize() << endl;
        alog << "Received commands from driver: " << indent;
        const void* cmds = mIn.data();
        const void* end = mIn.data() + mIn.dataSize();
        alog << HexDump(cmds, mIn.dataSize()) << endl;
        while (cmds < end) cmds = printReturnCommand(alog, cmds);
        alog << dedent;
    }
    return NO_ERROR;
}

return err;

}


代码回到talkWithDriver方法后,主要对mIn读数据的参数进行一些设置,紧接着方法继续进行到waitForResponse()中

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;

while (1) {
    if ((err=talkWithDriver()) < NO_ERROR) break;
    err = mIn.errorCheck();
    if (err < NO_ERROR) break;
    if (mIn.dataAvail() == 0) continue;

    cmd = (uint32_t)mIn.readInt32();

    IF_LOG_COMMANDS() {
        alog << "Processing waitForResponse Command: "
            << getReturnString(cmd) << endl;
    }

    switch (cmd) {
   
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t),
                            freeBuffer, this);
                    } else {
                        ......
                    }
                } else {
                    ......
                }

            }

            goto finish;
}

finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}

return err;

}


waitForResponse方法便是将封装的数据设置到reply中,并返回;

reply返回后,代码将会回到最开始的BpServiceManager->checkService方法中(代码前边有,此处不再展示代码),在checkService方法中reply将执行readStrongBinder方法;

sp Parcel::readStrongBinder() const
{
sp val;

readNullableStrongBinder(&val);
return val;

}

status_t Parcel::readNullableStrongBinder(sp* val) const
{
return unflatten_binder(ProcessState::self(), *this, val);
}

status_t unflatten_binder(const sp& proc,
const Parcel& in, sp* out)
{
const flat_binder_object* flat = in.readObject(false);

if (flat) {
    switch (flat->hdr.type) {
        case BINDER_TYPE_BINDER:
            *out = reinterpret_cast<IBinder*>(flat->cookie);
            return finish_unflatten_binder(nullptr, *flat, in);
        case BINDER_TYPE_HANDLE://走这个分支
            *out = proc->getStrongProxyForHandle(flat->handle);
            return finish_unflatten_binder(
                static_cast<BpBinder*>(out->get()), *flat, in);
    }
}
return BAD_TYPE;

}

根据代码可以看到,最终是调用到了ProcessState->getStrongProxyForHandle方法中,传递的值是期望获取的Server的handle

getStrongProxyForHandle方法主要是利用handle为参数,创建了一个BpBinder,并最终将这个BpBinder返回给客户端

客户端获取Server整体流程总结一下:

  1. 客户端调用getService()方法,在此方法内部调用CheckService;
  2. checkService方法中封装Parcel数据,并且调用remote()->transact,此方法会调用至BpBinder->transact,然后再调用至IPCThreadState->transact;
  3. IPCThreadState->transact方法中封装数据,并且调用ioctl将数据发送至内核binder_ioctl
  4. 内核会分配空间并且将内核层与目标应用层进行映射内存,并将用户层的数据拷贝至内核层;紧接着会将任务插入目标进程(ServiceManager)的任务队列,唤醒目标进程然后内核进行等待
  5. ServiceManger目标进程唤醒,查找需要获取的Server的handle,并将handle返回至内核,同样与第四步一致,内存做内存映射并拷贝数据,然后将任务插入目标进程(客户端)的任务队列,唤醒目标进程然后内核进行等待;
  6. 客户端进程唤醒,解析数据得到Server的handle,并以此为参数初始化一个BpBinder并返回,这个BpBinder就是最终的Server
  • 24
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值