这个函数的实现很简单,就是把MediaPlayerService这个Binder实体的引用写到一个struct svcinfo结构体中,主要是它的名称和句柄值,然后插入到链接svclist的头部去。这样,Client来向Service Manager查询服务接口时,只要给定服务名称,Service Manger就可以返回相应的句柄值了。
这个函数执行完成后,返回到svcmgr_handler函数,函数的最后,将一个错误码0写到reply变量中去,表示一切正常:
bio_put_uint32(reply, 0);
svcmgr_handler函数执行完成后,返回到binder_parse函数,执行下面语句:
binder_send_reply(bs, &reply, txn->data, res);
我们看一下binder_send_reply的实现,从函数名就可以猜到它要做什么了,告诉Binder驱动程序,它完成了Binder驱动程序交给它的任务了。
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
void *buffer_to_free,
intstatus)
{
struct {
uint32_t cmd_free;
void *buffer;
uint32_t cmd_reply;
struct binder_txn txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offs_size = 0;
data.txn.data = &status;
data.txn.offs = 0;
}else{
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data = reply->data0;
data.txn.offs = reply->offs0;
}
binder_write(bs, &data, sizeof(data));
}
从这里可以看出,binder_send_reply告诉Binder驱动程序执行BC_FREE_BUFFER和BC_REPLY命令,前者释放之前在binder_transaction分配的空间,地址为buffer_to_free,buffer_to_free这个地址是Binder驱动程序把自己在内核空间用的地址转换成用户空间地址再传给Service Manager的,所以Binder驱动程序拿到这个地址后,知道怎么样释放这个空间;后者告诉MediaPlayerService,它的addService操作已经完成了,错误码是0,保存在data.txn.data中。
再来看binder_write函数:
intbinder_write(struct binder_state *bs, void *data, unsigned len)
{
struct binder_write_read bwr;
intres;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (unsigned) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
returnres;
}
这里可以看出,只有写操作,没有读操作,即read_size为0。
这里又是一个ioctl的BINDER_WRITE_READ操作。直入到驱动程序的binder_ioctl函数后,执行BINDER_WRITE_READ命令,这里就不累述了。
最后,从binder_ioctl执行到binder_thread_write函数,我们首先看第一个命令BC_FREE_BUFFER:
int
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer,intsize, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end= buffer +size;
while (ptr return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return-EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd)
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
......
caseBC_FREE_BUFFER: {
void __user *data_ptr;
struct binder_buffer *buffer;
if (get_user(data_ptr, (void * __user *)ptr))
return-EFAULT;
ptr += sizeof(void *);
buffer = binder_buffer_lookup(proc, data_ptr);
if (buffer ==NULL) {
binder_user_error("binder: %d:%d "
"BC_FREE_BUFFER u%p no match\n",
proc->pid, thread->pid, data_ptr);
break;
}
if (!buffer->allow_user_free) {
binder_user_error("binder: %d:%d "
"BC_FREE_BUFFER u%p matched "
"unreturned buffer\n",
proc->pid, thread->pid, data_ptr);
break;
}
if (binder_debug_mask & BINDER_DEBUG_FREE_BUFFER)
printk(KERN_INFO"binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
proc->pid, thread->pid, data_ptr, buffer->debug_id,
buffer->transaction?"active":"finished");
if (buffer->transaction) {
buffer->transaction->buffer =NULL;
buffer->transaction=NULL;
}
if (buffer->async_transaction && buffer->target_node) {
BUG_ON(!buffer->target_node->has_async_transaction);
if (list_empty(&buffer->target_node->async_todo))
buffer->target_node->has_async_transaction = 0;
else
list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
}
binder_transaction_buffer_release(proc, buffer,NULL);
binder_free_buf(proc, buffer);
break;
}
......
*consumed = ptr - buffer;
}
return0;
}
首先通过看这个语句:
get_user(data_ptr, (void * __user *)ptr)
这个是获得要删除的Buffer的用户空间地址,接着通过下面这个语句来找到这个地址对应的struct binder_buffer信息:
buffer = binder_buffer_lookup(proc, data_ptr);
因为这个空间是前面在binder_transaction里面分配的,所以这里一定能找到。
最后,就可以释放这块内存了:
binder_transaction_buffer_release(proc, buffer,NULL);
binder_free_buf(proc, buffer);
再来看另外一个命令BC_REPLY:
int
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer,intsize, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end= buffer +size;
while (ptr return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return-EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd)
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
......
caseBC_TRANSACTION:
caseBC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return-EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
......
*consumed = ptr - buffer;
}
return0;
}
又再次进入到binder_transaction函数:
staticvoid
binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
struct binder_transaction_data *tr,intreply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread =NULL;
struct binder_node *target_node =NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to =NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
......
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to ==NULL) {
......
return_error = BR_FAILED_REPLY;
gotoerr_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
.......
gotoerr_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread ==NULL) {
return_error = BR_DEAD_REPLY;
gotoerr_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
......
return_error = BR_FAILED_REPLY;
in_reply_to =NULL;
target_thread =NULL;
gotoerr_dead_binder;
}
target_proc = target_thread->proc;
}else{
......
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
}else{
......
}
/* TODO: reuse incomingtransactionforreply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t ==NULL) {
return_error = BR_FAILED_REPLY;
gotoerr_alloc_t_failed;
}
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete ==NULL) {
return_error = BR_FAILED_REPLY;
gotoerr_alloc_tcomplete_failed;
}
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from= thread;
else
t->from=NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer ==NULL) {
return_error = BR_FAILED_REPLY;
gotoerr_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction= t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0,NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"data ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
gotoerr_copy_data_failed;
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"offsets ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
gotoerr_copy_data_failed;
}
......
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
}elseif (!(t->flags & TF_ONE_WAY)) {
......
}else{
......
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
......
}
注意,这里的reply为1,我们忽略掉其它无关代码。
前面Service Manager正在binder_thread_read函数中被MediaPlayerService启动后进程唤醒后,在最后会把当前处理完的事务放在thread->transaction_stack中:
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
}
所以,这里,首先是把它这个binder_transaction取回来,并且放在本地变量in_reply_to中:
in_reply_to = thread->transaction_stack;
接着就可以通过in_reply_to得到最终发出这个事务请求的线程和进程:
target_thread = in_reply_to->from;
target_proc = target_thread->proc;
然后得到target_list和target_wait:
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
下面这一段代码:
/* TODO: reuse incomingtransactionforreply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t ==NULL) {
return_error = BR_FAILED_REPLY;
gotoerr_alloc_t_failed;
}
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete ==NULL) {
return_error = BR_FAILED_REPLY;
gotoerr_alloc_tcomplete_failed;
}
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from= thread;
else
t->from=NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer ==NULL) {
return_error = BR_FAILED_REPLY;
gotoerr_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction= t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0,NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"data ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
gotoerr_copy_data_failed;
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"offsets ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
gotoerr_copy_data_failed;
}
我们在前面已经分析过了,这里不再重复。但是有一点要注意的是,这里target_node为NULL,因此,t->buffer->target_node也为NULL。
函数本来有一个for循环,用来处理数据中的Binder对象,这里由于没有Binder对象,所以就略过了。到了下面这句代码:
binder_pop_transaction(target_thread, in_reply_to);
我们看看做了什么事情
staticvoid
binder_pop_transaction(
struct binder_thread *target_thread, struct binder_transaction *t)
{
if (target_thread) {
BUG_ON(target_thread->transaction_stack != t);
BUG_ON(target_thread->transaction_stack->from!= target_thread);
target_thread->transaction_stack =
target_thread->transaction_stack->from_parent;
t->from=NULL;
}
t->need_reply = 0;
if (t->buffer)
t->buffer->transaction=NULL;
kfree(t);
binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
}
由于到了这里,已经不需要in_reply_to这个transaction了,就把它删掉。
回到binder_transaction函数:
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
和前面一样,分别把t和tcomplete分别放在target_list和thread->todo队列中,这里的target_list指的就是最初调用IServiceManager::addService的MediaPlayerService的Server主线程的的thread->todo队列了,而thread->todo指的是Service Manager中用来回复IServiceManager::addService请求的线程。
最后,唤醒等待在target_wait队列上的线程了,就是最初调用IServiceManager::addService的MediaPlayerService的Server主线程了,它最后在binder_thread_read函数中睡眠在thread->wait上,就是这里的target_wait了:
if (target_wait)
wake_up_interruptible(target_wait);
这样,Service Manger回复调用IServiceManager::addService请求就算完成了,重新回到frameworks/base/cmds/servicemanager/binder.c文件中的binder_loop函数等待下一个Client请求的到来。事实上,Service Manger回到binder_loop函数再次执行ioctl函数时候,又会再次进入到binder_thread_read函数。这时个会发现thread->todo不为空,这是因为刚才我们调用了:
list_add_tail(&tcomplete->entry, &thread->todo);
把一个工作项tcompelete放在了在thread->todo中,这个tcompelete的type为BINDER_WORK_TRANSACTION_COMPLETE,因此,Binder驱动程序会执行下面操作:
switch (w->type) {
caseBINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return-EFAULT;
ptr += sizeof(uint32_t);
list_del(&w->entry);
kfree(w);
} break;
......
}
binder_loop函数执行完这个ioctl调用后,才会在下一次调用ioctl进入到Binder驱动程序进入休眠状态,等待下一次Client的请求。
上面讲到调用IServiceManager::addService的MediaPlayerService的Server主线程被唤醒了,于是,重新执行binder_thread_read函数:
staticint
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer,intsize, signed long *consumed,intnon_block)
{
void __user *ptr = buffer + *consumed;
void __user *end= buffer +size;
intret = 0;
intwait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return-EFAULT;
ptr += sizeof(uint32_t);
}
retry:
wait_for_proc_work = thread->transaction_stack ==NULL&& list_empty(&thread->todo);
......
if (wait_for_proc_work) {
......
}else{
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
}else
ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
}
......
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t =NULL;
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
elseif (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else{
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /*nodata added */
gotoretry;
break;
}
......
switch (w->type) {
caseBINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction,work);
} break;
......
}
if (!t)
continue;
BUG_ON(t->buffer ==NULL);
if (t->buffer->target_node) {
......
}else{
tr.target.ptr =NULL;
tr.cookie =NULL;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
......
}else{
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return-EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return-EFAULT;
ptr += sizeof(tr);
......
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
......
}else{
t->buffer->transaction=NULL;
kfree(t);
binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
}
break;
}
done:
......
return0;
}
在while循环中,从thread->todo得到w,w->type为BINDER_WORK_TRANSACTION,于是,得到t。从上面可以知道,Service Manager反回了一个0回来,写在t->buffer->data里面,现在把t->buffer->data加上proc->user_buffer_offset,得到用户空间地址,保存在tr.data.ptr.buffer里面,这样用户空间就可以访问这个返回码了。由于cmd不等于BR_TRANSACTION,这时就可以把t删除掉了,因为以后都不需要用了。
执行完这个函数后,就返回到binder_ioctl函数,执行下面语句,把数据返回给用户空间:
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
gotoerr;
}
接着返回到用户空间IPCThreadState::talkWithDriver函数,最后返回到IPCThreadState::waitForResponse函数,最终执行到下面语句:
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver())
......
cmd =mIn.readInt32();
......
switch (cmd) {
......
caseBR_REPLY:
{
binder_transaction_data tr;
err =mIn.read(&tr, sizeof(tr));
LOG_ASSERT(err == NO_ERROR,"Not enough command data for brREPLY");
if (err != NO_ERROR)gotofinish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast(tr.data.ptr.offsets),
tr.offsets_size/sizeof(size_t),
freeBuffer, this);
}else{
......
}
}else{
......
}
}
gotofinish;
......
}
}
finish:
......
returnerr;
}
注意,这里的tr.flags等于0,这个是在上面的binder_send_reply函数里设置的。最终把结果保存在reply了:
reply->ipcSetDataReference(
reinterpret_cast(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast(tr.data.ptr.offsets),
tr.offsets_size/sizeof(size_t),
freeBuffer, this);
这个函数我们就不看了,有兴趣的读者可以研究一下。
从这里层层返回,最后回到MediaPlayerService::instantiate函数中。
至此,IServiceManager::addService终于执行完毕了。这个过程非常复杂,但是如果我们能够深刻地理解这一过程,将能很好地理解Binder机制的设计思想和实现过程。这里,对IServiceManager::addService过程中MediaPlayerService、ServiceManager和BinderDriver之间的交互作一个小结:
回到frameworks/base/media/mediaserver/main_mediaserver.cpp文件中的main函数,接下去还要执行下面两个函数:
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
首先看ProcessState::startThreadPool函数的实现:
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted =true;
spawnPooledThread(true);
}
}
这里调用spwanPooledThread:
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
charbuf[32];
sprintf(buf,"Binder Thread #%d", s);
LOGV("Spawning new pooled thread, name=%s\n", buf);
sp t = new PoolThread(isMain);
t->run(buf);
}
}
这里主要是创建一个线程,PoolThread继续Thread类,Thread类定义在frameworks/base/libs/utils/Threads.cpp文件中,其run函数最终调用子类的threadLoop函数,这里即为PoolThread::threadLoop函数:
virtual bool threadLoop()
{
IPCThreadState::self()->joinThreadPool(mIsMain);
returnfalse;
}
这里和frameworks/base/media/mediaserver/main_mediaserver.cpp文件中的main函数一样,最终都是调用了IPCThreadState::joinThreadPool函数,它们的区别是,一个参数是true,一个是默认值false。我们来看一下这个函数的实现:
void IPCThreadState::joinThreadPool(bool isMain)
{
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
......
status_t result;
do {
int32_t cmd;
.......
// now get thenextcommandtobe processed, waiting if necessary
result = talkWithDriver();
if (result >= NO_ERROR) {
size_tIN=mIn.dataAvail();
if (IN
cmd =mIn.readInt32();
......
}
result = executeCommand(cmd);
}
......
} while (result != -ECONNREFUSED && result != -EBADF);
.......
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
这个函数最终是在一个无穷循环中,通过调用talkWithDriver函数来和Binder驱动程序进行交互,实际上就是调用talkWithDriver来等待Client的请求,然后再调用executeCommand来处理请求,而在executeCommand函数中,最终会调用BBinder::transact来真正处理Client的请求:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch (cmd) {
......
caseBR_TRANSACTION:
{
binder_transaction_data tr;
result =mIn.read(&tr, sizeof(tr));
......
Parcel reply;
......
if (tr.target.ptr) {
sp b((BBinder*)tr.cookie);
const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);
if (error
}else{
const status_t error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
if (error
}
......
}
break;
.......
}
if (result != NO_ERROR) {
mLastError = result;
}
returnresult;
}
接下来再看一下BBinder::transact的实现:
status_t BBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
data.setDataPosition(0);
status_t err = NO_ERROR;
switch (code) {
casePING_TRANSACTION:
reply->writeInt32(pingBinder());
break;
default:
err = onTransact(code, data, reply, flags);
break;
}
if (reply !=NULL) {
reply->setDataPosition(0);
}
returnerr;
}
最终会调用onTransact函数来处理。在这个场景中,BnMediaPlayerService继承了BBinder类,并且重载了onTransact函数,因此,这里实际上是调用了BnMediaPlayerService::onTransact函数,这个函数定义在frameworks/base/libs/media/libmedia/IMediaPlayerService.cpp文件中:
status_t BnMediaPlayerService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
caseCREATE_URL: {
......
} break;
caseCREATE_FD: {
......
} break;
caseDECODE_URL: {
......
} break;
caseDECODE_FD: {
......
} break;
caseCREATE_MEDIA_RECORDER: {
......
} break;
caseCREATE_METADATA_RETRIEVER: {
......
} break;
caseGET_OMX: {
......
} break;
default:
returnBBinder::onTransact(code, data, reply, flags);
}
}
至此,我们就以MediaPlayerService为例,完整地介绍了Android系统进程间通信Binder机制中的Server启动过程。Server启动起来之后,就会在一个无穷循环中等待Client的请求了。在下一篇文章中,我们将介绍Client如何通过Service Manager远程接口来获得Server远程接口,进而调用Server远程接口来使用Server提供的服务,敬请关注。