Android中Binder机制的实现(二)

定义一个用户态的指针来指向要写的数据,然后通过get_user(cmd, (uint32_t __user *)ptr),从用户空间获取命令到内核空间。然后指针指向下一个要写数据,相关的命令计数器更新一下。根据获取的命令的不同分别执行相应的部分,这里我们的命令是BC_ENTER_LOOPER,这里只做了一件事,就是往线程的状态添加一个BINDER_LOOPER_STATE_ENTERED标志,最后设置一下已经消耗的写数据,就返回了。进行完这些初始工作后,binder_loop才正式进入一个永久循环,来监听其他进程和相应消息。循环中首先初始化读的大小,数据和已经消耗的读数据。然后发送一个ioctl(bs->fd, BINDER_WRITE_READ, &bwr)命令,这时候后因为要读的数据不为0,所以调用函数binder_thread_read:

static int

binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,

void __user *buffer, int size, signed long *consumed, int non_block)

{

void __user *ptr = buffer + *consumed;

void __user *end = buffer + size;

int ret = 0;

int wait_for_proc_work;

if (*consumed == 0) {

if (put_user(BR_NOOP, (uint32_t __user *)ptr))

return -EFAULT;

ptr += sizeof(uint32_t);

}

retry:

wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);

if (thread->return_error != BR_OK && ptr < end) {

if (thread->return_error2 != BR_OK) {

if (put_user(thread->return_error2, (uint32_t __user *)ptr))

return -EFAULT;

ptr += sizeof(uint32_t);

if (ptr == end)

goto done;

thread->return_error2 = BR_OK;

}

if (put_user(thread->return_error, (uint32_t __user *)ptr))

return -EFAULT;

ptr += sizeof(uint32_t);

thread->return_error = BR_OK;

goto done;

}

thread->looper |= BINDER_LOOPER_STATE_WAITING;

if (wait_for_proc_work)

proc->ready_threads++;

mutex_unlock(&binder_lock);

if (wait_for_proc_work) {

if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |

BINDER_LOOPER_STATE_ENTERED))) {

binder_user_error("binder: %d:%d ERROR: Thread waiting "

"for process work before calling BC_REGISTER_"

"LOOPER or BC_ENTER_LOOPER (state %x)/n",

proc->pid, thread->pid, thread->looper);

wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);

}

binder_set_nice(proc->default_priority);

if (non_block) {

if (!binder_has_proc_work(proc, thread))

ret = -EAGAIN;

} else

ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));

} else {

if (non_block) {

if (!binder_has_thread_work(thread))

ret = -EAGAIN;

} else

ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));

}

mutex_lock(&binder_lock);

if (wait_for_proc_work)

proc->ready_threads--;

thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

if (ret)

return ret;

while (1) {

uint32_t cmd;

struct binder_transaction_data tr;

struct binder_work *w;

struct binder_transaction *t = NULL;

if (!list_empty(&thread->todo))

w = list_first_entry(&thread->todo, struct binder_work, entry);

else if (!list_empty(&proc->todo) && wait_for_proc_work)

w = list_first_entry(&proc->todo, struct binder_work, entry);

else {

if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */

goto retry;

break;

}

if (end - ptr < sizeof(tr) + 4)

break;

switch (w->type) {

case BINDER_WORK_TRANSACTION: {

t = container_of(w, struct binder_transaction, work);

} break;

case BINDER_WORK_TRANSACTION_COMPLETE: {

cmd = BR_TRANSACTION_COMPLETE;

if (put_user(cmd, (uint32_t __user *)ptr))

return -EFAULT;

ptr += sizeof(uint32_t);

binder_stat_br(proc, thread, cmd);

if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE)

printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE/n",

proc->pid, thread->pid);

list_del(&w->entry);

kfree(w);

binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;

} break;

case BINDER_WORK_NODE: {

struct binder_node *node = container_of(w, struct binder_node, work);

uint32_t cmd = BR_NOOP;

const char *cmd_name;

int strong = node->internal_strong_refs || node->local_strong_refs;

int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;

if (weak && !node->has_weak_ref) {

cmd = BR_INCREFS;

cmd_name = "BR_INCREFS";

node->has_weak_ref = 1;

node->pending_weak_ref = 1;

node->local_weak_refs++;

} else if (strong && !node->has_strong_ref) {

cmd = BR_ACQUIRE;

cmd_name = "BR_ACQUIRE";

node->has_strong_ref = 1;

node->pending_strong_ref = 1;

node->local_strong_refs++;

} else if (!strong && node->has_strong_ref) {

cmd = BR_RELEASE;

cmd_name = "BR_RELEASE";

node->has_strong_ref = 0;

} else if (!weak && node->has_weak_ref) {

cmd = BR_DECREFS;

cmd_name = "BR_DECREFS";

node->has_weak_ref = 0;

}

if (cmd != BR_NOOP) {

if (put_user(cmd, (uint32_t __user *)ptr))

return -EFAULT;

ptr += sizeof(uint32_t);

if (put_user(node->ptr, (void * __user *)ptr))

return -EFAULT;

ptr += sizeof(void *);

if (put_user(node->cookie, (void * __user *)ptr))

return -EFAULT;

ptr += sizeof(void *);

binder_stat_br(proc, thread, cmd);

if (binder_debug_mask & BINDER_DEBUG_USER_REFS)

printk(KERN_INFO "binder: %d:%d %s %d u%p c%p/n",

proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);

} else {

list_del_init(&w->entry);

if (!weak && !strong) {

if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)

printk(KERN_INFO "binder: %d:%d node %d u%p c%p deleted/n",

proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie);

rb_erase(&node->rb_node, &proc->nodes);

kfree(node);

binder_stats.obj_deleted[BINDER_STAT_NODE]++;

} else {

if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)

printk(KERN_INFO "binder: %d:%d node %d u%p c%p state unchanged/n",

proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie);

}

}

} break;

case BINDER_WORK_DEAD_BINDER:

case BINDER_WORK_DEAD_BINDER_AND_CLEAR:

case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {

struct binder_ref_death *death = container_of(w, struct binder_ref_death, work);

uint32_t cmd;

if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)

cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;

else

cmd = BR_DEAD_BINDER;

if (put_user(cmd, (uint32_t __user *)ptr))

return -EFAULT;

ptr += sizeof(uint32_t);

if (put_user(death->cookie, (void * __user *)ptr))

return -EFAULT;

ptr += sizeof(void *);

if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION)

printk(KERN_INFO "binder: %d:%d %s %p/n",

proc->pid, thread->pid,

cmd == BR_DEAD_BINDER ?

"BR_DEAD_BINDER" :

"BR_CLEAR_DEATH_NOTIFICATION_DONE",

death->cookie);

if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {

list_del(&w->entry);

kfree(death);

binder_stats.obj_deleted[BINDER_STAT_DEATH]++;

} else

list_move(&w->entry, &proc->delivered_death);

if (cmd == BR_DEAD_BINDER)

goto done; /* DEAD_BINDER notifications can cause transactions */

} break;

}

if (!t)

continue;

BUG_ON(t->buffer == NULL);

if (t->buffer->target_node) {

struct binder_node *target_node = t->buffer->target_node;

tr.target.ptr = target_node->ptr;

tr.cookie = target_node->cookie;

t->saved_priority = task_nice(current);

if (t->priority < target_node->min_priority &&

!(t->flags & TF_ONE_WAY))

binder_set_nice(t->priority);

else if (!(t->flags & TF_ONE_WAY) ||

t->saved_priority > target_node->min_priority)

binder_set_nice(target_node->min_priority);

cmd = BR_TRANSACTION;

} else {

tr.target.ptr = NULL;

tr.cookie = NULL;

cmd = BR_REPLY;

}

tr.code = t->code;

tr.flags = t->flags;

tr.sender_euid = t->sender_euid;

if (t->from) {

struct task_struct *sender = t->from->proc->tsk;

tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);

} else {

tr.sender_pid = 0;

}

tr.data_size = t->buffer->data_size;

tr.offsets_size = t->buffer->offsets_size;

tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset);

tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));

if (put_user(cmd, (uint32_t __user *)ptr))

return -EFAULT;

ptr += sizeof(uint32_t);

if (copy_to_user(ptr, &tr, sizeof(tr)))

return -EFAULT;

ptr += sizeof(tr);

binder_stat_br(proc, thread, cmd);

if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)

printk(KERN_INFO "binder: %d:%d %s %d %d:%d, cmd %d size %d-%d ptr %p-%p/n",

proc->pid, thread->pid,

(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY",

t->debug_id, t->from ? t->from->proc->pid : 0,

t->from ? t->from->pid : 0, cmd,

t->buffer->data_size, t->buffer->offsets_size,

tr.data.ptr.buffer, tr.data.ptr.offsets);

list_del(&t->work.entry);

t->buffer->allow_user_free = 1;

if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {

t->to_parent = thread->transaction_stack;

t->to_thread = thread;

thread->transaction_stack = t;

} else {

t->buffer->transaction = NULL;

kfree(t);

binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;

}

break;

}

done:

*consumed = ptr - buffer;

if (proc->requested_threads + proc->ready_threads == 0 &&

proc->requested_threads_started < proc->max_threads &&

(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |

BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */

/*spawn a new thread if we leave this out */) {

proc->requested_threads++;

if (binder_debug_mask & BINDER_DEBUG_THREADS)

printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER/n",

proc->pid, thread->pid);

if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))

return -EFAULT;

}

return 0;

}

监听到来的请求,然后调用binder_parse再做处理。这些在后面的客户端和服务端通信再通过具体实例分析。

二、注册一个服务类

Service Manager起起来后,服务类就可以注册了,下面分析服务类怎么在Service Manager的注册。Android中有一个Camera Service服务,通过这个服务来分析AndroidBinder的服务端,客户端以及两个如何通信的实现。

Android提供了一个公共库utils,编译后的名称为libutils.sobinder的相关文件作为utils的一部分,也被包含在utils中,相关的源文件在frameworks/base/include/utilsframeworks/base/libs/utils两个文件夹下。

RefBase.h :
引用计数,定义类RefBase
Parcel.h :
为在IPC中传输的数据定义容器,定义类Parcel
IBinder.h
Binder对象的抽象接口, 定义类IBinder
Binder.h
Binder对象的基本功能, 定义类BinderBpRefBase
BpBinder.h
BpBinder的功能,定义类BpBinder
IInterface.h
为抽象经过Binder的接口定义通用类,定义类IInterface,类模板BnInterface,类模板BpInterface
ProcessState.h
表示进程状态的类,定义类ProcessState
IPCThreadState.h
表示IPC线程的状态,定义类IPCThreadState

类的关系如下图所示:

其中XXX是自己要定义的服务,包含XXX的类都是自己定义的类,值得注意的是,不论是client,还是service都是这种类层次,甚至可以把ServiceManager也可以看作是服务的服务。回到我们要讨论的例子CamareService,它的类层次结构如下所示。

CamareService首先要做的工作是在ServiceManager上注册自己。在android启动时,它启动的服务在Main_mediaserver.cpp这个文件中定义(frameworks/base/media/mediaserver):

int main(int argc, char** argv)

{

sp<ProcessState> proc(ProcessState::self());

sp<IServiceManager> sm = defaultServiceManager();

LOGI("ServiceManager: %p", sm.get());

AudioFlinger::instantiate(); //Audio 服务

MediaPlayerService::instantiate(); //mediaPlayer服务

CameraService::instantiate(); //Camera 服务

ProcessState::self()->startThreadPool(); //为进程开启缓冲池

IPCThreadState::self()->joinThreadPool(); //将进程加入到缓冲池

}

CameraService::instantiate函数如下(frameworks/base/camera/libcameraservice):

void CameraService::instantiate() {

defaultServiceManager()->addService(

String16("media.camera"), new CameraService());

}

defaultServiceManager是在(frameworks/base/libs/utils/IServiceManager.cpp)中定义的一个全局函数,调用它可以返回一个全局的对象sp<IServiceManager> gDefaultServiceManager

sp<IServiceManager> defaultServiceManager()

{

if (gDefaultServiceManager != NULL) return gDefaultServiceManager;

{

AutoMutex _l(gDefaultServiceManagerLock);

if (gDefaultServiceManager == NULL) {

gDefaultServiceManager = interface_cast<IServiceManager>(

ProcessState::self()->getContextObject(NULL));

}

}

return gDefaultServiceManager;

}

sp是一个类,它继承自RefBase,为每个类的对象增加了一个引用计数功能,可以把类似sp<IServiceManager>这种形式的定义简单的当做是IServiceManager对象,所以以上返回的是一个IServiceManager对象。

第一次调用gDefaultServiceManager为空,所以调用interface_cast<IServiceManager>(ProcessState::self() ->getContextObject(NULL)ProcessState::self()的定义:

sp<ProcessState> ProcessState::self()

{

if (gProcess != NULL) return gProcess;

AutoMutex _l(gProcessMutex);

if (gProcess == NULL) gProcess = new ProcessState;

return gProcess;

}

gProcess 也是一个全局变量sp<ProcessState> gProcess;第一次为空,AutoMutex_l可以看作是一种互斥

操作,防止进程同时访问临界数据。

ProcessState的构造函数中,会打开binder驱动。然后调用方法getContextObject,

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)

{

if (supportsProcesses()) {

return getStrongProxyForHandle(0);

} else {

return getContextObject(String16("default"), caller);

}

}

Android是支持Binder驱动的,所以调用getStrongProxyForHandle(0)0ServiceManager的句柄一样。

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)

{

sp<IBinder> result;

AutoMutex _l(mLock);

handle_entry* e = lookupHandleLocked(handle);

if (e != NULL) {

// We need to create a new BpBinder if there isn't currently one, OR we

// are unable to acquire a weak reference on this current one. See comment

// in getWeakProxyForHandle() for more info about this.

IBinder* b = e->binder;

if (b == NULL || !e->refs->attemptIncWeak(this)) {

b = new BpBinder(handle);

e->binder = b;

if (b) e->refs = b->getWeakRefs();

result = b;

} else {

// This little bit of nastyness is to allow us to add a primary

// reference to the remote proxy when this team doesn't have one

// but another team is sending the handle to us.

result.force_set(b);

e->refs->decWeak(this);

}

}

return result;

}

第一次调用的时候bNULL所以为b生成一BpBinder对象:

BpBinder::BpBinder(int32_t handle)

: mHandle(handle)

, mAlive(1)

, mObitsSent(0)

, mObituaries(NULL)

{

LOGV("Creating BpBinder %p handle %d/n", this, mHandle);

extendObjectLifetime(OBJECT_LIFETIME_WEAK);

IPCThreadState::self()->incWeakHandle(handle);

}

void IPCThreadState::incWeakHandle(int32_t handle)

{

LOG_REMOTEREFS("IPCThreadState::incWeakHandle(%d)/n", handle);

mOut.writeInt32(BC_INCREFS);

mOut.writeInt32(handle);

}

ProcessState::self()->getContextObject(NULL)最终返回的是一个BpBinder对象,interface_cast是一个宏

定义,

template<typename INTERFACE>

inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)

{

return INTERFACE::asInterface(obj);

}

INTERFACE::asInterface()方法也是根据宏来定义的,所以实际扩展的最终结果是,

sp<IServiceManager> IServiceManager::asInterface(const sp<IBinder> &obj)

{

Sp<IServiceManager> intr;

if(obj != NULL)

{

intr = static_cast<IServiceManager*>{

obj->queryLocalInterface(

IServiceManager::descriptor).get());

If(intr = = NULL){

intr = new BpServiceManager(obj);

}

}

}

return intr;

}

所以最终返回的是一个BpServiceManager对象,参数obj是前面的BpBinder对象,一直传递到BpRefBase做为mRemote。然后调用BpServiceManager的方法addService

virtual status_t addService(const String16& name, const sp<IBinder>& service)

{

Parcel data, reply;

data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());

data.writeString16(name);

data.writeStrongBinder(service);

status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);

return err == NO_ERROR ? reply.readInt32() : err;

}

这里namecamera.serviceserviceCameraService。首先初始化要传输的数据dataremote()方法返回刚才的BpBinder对象,transact方法为:

status_t BpBinder::transact(

uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) //默认flags0,表示同步传输

{

// Once a binder has died, it will never come back to life.

if (mAlive) {

status_t status = IPCThreadState::self()->transact(

mHandle, code, data, reply, flags);

if (status == DEAD_OBJECT) mAlive = 0;

return status;

}

return DEAD_OBJECT;

}

mHandle是刚刚初始化时使用的句柄0,再看看IPCThreadStatetransact的实现:

status_t IPCThreadState::transact(int32_t handle,

uint32_t code, const Parcel& data,

Parcel* reply, uint32_t flags)

{

status_t err = data.errorCheck();

flags |= TF_ACCEPT_FDS;

IF_LOG_TRANSACTIONS() {

TextOutput::Bundle _b(alog);

alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "

<< handle << " / code " << TypeCode(code) << ": "

<< indent << data << dedent << endl;

}

if (err == NO_ERROR) {

LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),

(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");

err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);

}

if (err != NO_ERROR) {

if (reply) reply->setError(err);

return (mLastError = err);

}

if ((flags & TF_ONE_WAY) == 0) {

if (reply) {

err = waitForResponse(reply);

} else {

Parcel fakeReply;

err = waitForResponse(&fakeReply);

}

IF_LOG_TRANSACTIONS() {

TextOutput::Bundle _b(alog);

alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "

<< handle << ": ";

if (reply) alog << indent << *reply << dedent << endl;

else alog << "(none requested)" << endl;

}

} else {

err = waitForResponse(NULL, NULL);

}

return err;

}

writeTransactionData首先初始化数据,在waitForResponse中有个talkWithDrive方法,他与驱动通信,唤醒ServiceManager进程,在ServiceManager进程中调用do_add_service把服务添加到服务列表中。

三、Client请求服务

Client可以通过getService获取到需要连接的目的ServiceIBinder对象,这个IBinderServiceBBinderbinder kernel的一个参考,所以service IBinder binder kernel中不会存在相同的两个IBinder对象,每一个Client进程同样需要打开Binder驱动程序。对用户程序而言,我们获得这个对象就可以通过binder kernel访问service对象中的方法。ClientService在不同的进程中,通过这种方式实现了类似线程间的迁移的通信方式,对用户程序而言当调用Service返回的IBinder接口后,访问Service中的方法就如同调用自己的函数。

首先建立连接:

sp<Camera> Camera::connect()

{

sp<Camera> c = new Camera();

const sp<ICameraService>& cs = getCameraService();

if (cs != 0) {

c->mCamera = cs->connect(c);

}

if (c->mCamera != 0) {

c->mCamera->asBinder()->linkToDeath(c);

c->mStatus = NO_ERROR;

}

return c;

}

const sp<ICameraService>& Camera::getCameraService()

{

Mutex::Autolock _l(mLock);

if (mCameraService.get() == 0) {

sp<IServiceManager> sm = defaultServiceManager();

sp<IBinder> binder;

do {

binder = sm->getService(String16("media.camera"));

if (binder != 0)

break;

LOGW("CameraService not published, waiting...");

usleep(500000); // 0.5 s

} while(true);

if (mDeathNotifier == NULL) {

mDeathNotifier = new DeathNotifier();

}

binder->linkToDeath(mDeathNotifier);

mCameraService = interface_cast<ICameraService>(binder);

}

LOGE_IF(mCameraService==0, "no CameraService!?");

return mCameraService;

}

smBpServiceManager对象,它调用了gerService方法。

virtual sp<IBinder> getService(const String16& name) const

{

unsigned n;

for (n = 0; n < 5; n++){

sp<IBinder> svc = checkService(name);

if (svc != NULL) return svc;

LOGI("Waiting for sevice %s.../n", String8(name).string());

sleep(1);

}

return NULL;

}

virtual sp<IBinder> checkService( const String16& name) const

{

Parcel data, reply;

data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());

data.writeString16(name);

remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);

return reply.readStrongBinder();

}

这和前面的分析一致,最后返回serviceIbinder对象。这就建立了ClientService的连接,只剩下他们两如何进行通信的事情了。

c->mCamera = cs->connect(c)这一句来到BpCameraServiceconnect

virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient)

{

Parcel data, reply;

data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());

data.writeStrongBinder(cameraClient->asBinder());

remote()->transact(BnCameraService::CONNECT, data, &reply);

return interface_cast<ICamera>(reply.readStrongBinder());

}

这个前面的分析一直,最后返回CameraService的对象,caller进程会切入到CameraServiceandroid的每一个进程都会创建一个线程池,这个线程池用处理其他进程的请求。当没有数据的时候线程是挂起的,这时binder kernel唤醒了这个线程:

void IPCThreadState::joinThreadPool(bool isMain)

{

LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL/n", (void*)pthread_self(), getpid());

mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);

status_t result;

do {

int32_t cmd;

result = talkWithDriver();

if (result >= NO_ERROR) {

size_t IN = mIn.dataAvail();

if (IN < sizeof(int32_t)) continue;

cmd = mIn.readInt32();

IF_LOG_COMMANDS() {

alog << "Processing top-level Command: "

<< getReturnString(cmd) << endl;

}

result = executeCommand(cmd);

}

// Let this thread exit the thread pool if it is no longer

// needed and it is not the main process thread.

if(result == TIMED_OUT && !isMain) {

break;

}

} while (result != -ECONNREFUSED && result != -EBADF);

LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%p/n",

(void*)pthread_self(), getpid(), (void*)result);

mOut.writeInt32(BC_EXIT_LOOPER);

talkWithDriver(false);

}

status_t IPCThreadState::executeCommand(int32_t cmd)

{

BBinder* obj;

RefBase::weakref_type* refs;

status_t result = NO_ERROR;

switch (cmd) {

case BR_ERROR:

result = mIn.readInt32();

break;

case BR_OK:

break;

case BR_ACQUIRE:

refs = (RefBase::weakref_type*)mIn.readInt32();

obj = (BBinder*)mIn.readInt32();

LOG_ASSERT(refs->refBase() == obj,

"BR_ACQUIRE: object %p does not match cookie %p (expected %p)",

refs, obj, refs->refBase());

obj->incStrong(mProcess.get());

IF_LOG_REMOTEREFS() {

LOG_REMOTEREFS("BR_ACQUIRE from driver on %p", obj);

obj->printRefs();

}

mOut.writeInt32(BC_ACQUIRE_DONE);

mOut.writeInt32((int32_t)refs);

mOut.writeInt32((int32_t)obj);

break;

case BR_RELEASE:

refs = (RefBase::weakref_type*)mIn.readInt32();

obj = (BBinder*)mIn.readInt32();

LOG_ASSERT(refs->refBase() == obj,

"BR_RELEASE: object %p does not match cookie %p (expected %p)",

refs, obj, refs->refBase());

IF_LOG_REMOTEREFS() {

LOG_REMOTEREFS("BR_RELEASE from driver on %p", obj);

obj->printRefs();

}

obj->decStrong(mProcess.get());

break;

case BR_INCREFS:

refs = (RefBase::weakref_type*)mIn.readInt32();

obj = (BBinder*)mIn.readInt32();

refs->incWeak(mProcess.get());

mOut.writeInt32(BC_INCREFS_DONE);

mOut.writeInt32((int32_t)refs);

mOut.writeInt32((int32_t)obj);

break;

case BR_DECREFS:

refs = (RefBase::weakref_type*)mIn.readInt32();

obj = (BBinder*)mIn.readInt32();

// NOTE: This assertion is not valid, because the object may no

// longer exist (thus the (BBinder*)cast above resulting in a different

// memory address).

//LOG_ASSERT(refs->refBase() == obj,

// "BR_DECREFS: object %p does not match cookie %p (expected %p)",

// refs, obj, refs->refBase());

refs->decWeak(mProcess.get());

break;

case BR_ATTEMPT_ACQUIRE:

refs = (RefBase::weakref_type*)mIn.readInt32();

obj = (BBinder*)mIn.readInt32();

{

const bool success = refs->attemptIncStrong(mProcess.get());

LOG_ASSERT(success && refs->refBase() == obj,

"BR_ATTEMPT_ACQUIRE: object %p does not match cookie %p (expected %p)",

refs, obj, refs->refBase());

mOut.writeInt32(BC_ACQUIRE_RESULT);

mOut.writeInt32((int32_t)success);

}

break;

case BR_TRANSACTION:

{

binder_transaction_data tr;

result = mIn.read(&tr, sizeof(tr));

LOG_ASSERT(result == NO_ERROR,

"Not enough command data for brTRANSACTION");

if (result != NO_ERROR) break;

Parcel buffer;

buffer.ipcSetDataReference(

reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),

tr.data_size,

reinterpret_cast<const size_t*>(tr.data.ptr.offsets),

tr.offsets_size/sizeof(size_t), freeBuffer, this);

const pid_t origPid = mCallingPid;

const uid_t origUid = mCallingUid;

mCallingPid = tr.sender_pid;

mCallingUid = tr.sender_euid;

//LOGI(">>>> TRANSACT from pid %d uid %d/n", mCallingPid, mCallingUid);

Parcel reply;

IF_LOG_TRANSACTIONS() {

TextOutput::Bundle _b(alog);

alog << "BR_TRANSACTION thr " << (void*)pthread_self()

<< " / obj " << tr.target.ptr << " / code "

<< TypeCode(tr.code) << ": " << indent << buffer

<< dedent << endl

<< "Data addr = "

<< reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer)

<< ", offsets addr="

<< reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl;

}

if (tr.target.ptr) {

sp<BBinder> b((BBinder*)tr.cookie);

const status_t error = b->transact(tr.code, buffer, &reply, 0);

if (error < NO_ERROR) reply.setError(error);

} else {

const status_t error = the_context_object->transact(tr.code, buffer, &reply, 0);

if (error < NO_ERROR) reply.setError(error);

}

//LOGI("<<<< TRANSACT from pid %d restore pid %d uid %d/n",

// mCallingPid, origPid, origUid);

if ((tr.flags & TF_ONE_WAY) == 0) {

LOG_ONEWAY("Sending reply to %d!", mCallingPid);

sendReply(reply, 0);

} else {

LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);

}

mCallingPid = origPid;

mCallingUid = origUid;

IF_LOG_TRANSACTIONS() {

TextOutput::Bundle _b(alog);

alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "

<< tr.target.ptr << ": " << indent << reply << dedent << endl;

}

}

break;

case BR_DEAD_BINDER:

{

BpBinder *proxy = (BpBinder*)mIn.readInt32();

proxy->sendObituary();

mOut.writeInt32(BC_DEAD_BINDER_DONE);

mOut.writeInt32((int32_t)proxy);

} break;

case BR_CLEAR_DEATH_NOTIFICATION_DONE:

{

BpBinder *proxy = (BpBinder*)mIn.readInt32();

proxy->getWeakRefs()->decWeak(proxy);

} break;

case BR_FINISHED:

result = TIMED_OUT;

break;

case BR_NOOP:

break;

case BR_SPAWN_LOOPER:

mProcess->spawnPooledThread(false);

break;

default:

printf("*** BAD COMMAND %d received from Binder driver/n", cmd);

result = UNKNOWN_ERROR;

break;

}

if (result != NO_ERROR) {

mLastError = result;

}

return result;

}

将调用CameraServiceBbinderTransact函数:

status_t BBinder::transact(

uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)

{

data.setDataPosition(0);

status_t err = NO_ERROR;

switch (code) {

case PING_TRANSACTION:

reply->writeInt32(pingBinder());

break;

default:

err = onTransact(code, data, reply, flags);

break;

}

if (reply != NULL) {

reply->setDataPosition(0);

}

return err;

}

将调用BnCameraServiceOnTransact

status_t BnCameraService::onTransact(

uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)

{

switch(code) {

case CONNECT: {

CHECK_INTERFACE(ICameraService, data, reply);

sp<ICameraClient> cameraClient = interface_cast<ICameraClient>(data.readStrongBinder());

sp<ICamera> camera = connect(cameraClient);

reply->writeStrongBinder(camera->asBinder());

return NO_ERROR;

} break;

default:

return BBinder::onTransact(code, data, reply, flags);

}

}

sp<ICamera> CameraService::connect(const sp<ICameraClient>& cameraClient)

{

LOGD("Connect E from ICameraClient %p", cameraClient->asBinder().get());

Mutex::Autolock lock(mLock);

if (mClient != 0) {

sp<Client> currentClient = mClient.promote();

if (currentClient != 0) {

sp<ICameraClient> currentCameraClient(currentClient->getCameraClient());

if (cameraClient->asBinder() == currentCameraClient->asBinder()) {

// this is the same client reconnecting...

LOGD("Connect X same client is reconnecting...");

return currentClient;

} else {

// it's another client... boot the previous one...

LOGD("new client connecting, booting the old one...");

mClient.clear();

}

} else {

// can't promote, the previous client has died...

LOGD("new client connecting, old reference was dangling...");

mClient.clear();

}

}

// create a new Client object

sp<Client> client = new Client(this, cameraClient);

mClient = client;

#if DEBUG_CLIENT_REFERENCES

// Enable tracking for this object, and track increments and decrements of

// the refcount.

client->trackMe(true, true);

#endif

LOGD("Connect X");

return client;

}

这样就完成了一次通信过程,接下来在客户端要调用服务端的函数就好像掉用自己的本地函数一样,例如:

// take a picture

status_t Camera::takePicture()

{

return mCamera->takePicture();

}

ProcessState::self()就是返回一个全局变量对象ProcessState

通过defaultServiceManager得到一个远程ServiceManager的接口,通过这个接口我们可以调用addService函数将System service添加到Service Manager进程中,然后client可以通过getService获取到需要连接的目的ServiceIBinder对象,这个IBinderServiceBBinderbinder kernel的一个参考,所以service IBinder binder kernel中不会存在相同的两个IBinder对象,每一个Client进程同样需要打开Binder驱动程序。对用户程序而言,我们获得这个对象就可以通过binder kernel访问service对象中的方法。ClientService在不同的进程中,通过这种方式实现了类似线程间的迁移的通信方式,对用户程序而言当调用Service返回的IBinder接口后,访问Service中的方法就如同调用自己的函数。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值