binder机制原理分析一共分5个部分,其实省了一点,但是分析到后面都差不多了,以后再补充吧。
1、ServiceManager 进程启动
2、普通Service注册到ServiceManager
3、从ServiceManager中获取服务
4、java层service的注册过程
5、Java层service的获取过程
服务的获取和注册过程相似。获取一般经过:
- sp sm = defaultServiceManager();
- binder = sm->getService(String16(“media.player”));
- binder->linkToDeath(sDeathNotifier);
- sMediaPlayerService = interface_cast(binder);
步骤一:defaultServiceManager()
跟注册一样,defaultServiceManager最终返回的是BpServiceManager(BpBinder(0))。
步骤二:sm->getService(name)
- getService其实就是调用的checkService。
- checkService中跟addServie一样封装Parcel,再写入name和code:CHECK_SERVICE_TRANSACTION
- 然后由BpBinder透传到IPCThreadState的transact中。先通过writeStransactionData将parcel数据封装成binder_transaction_data然后写入到mOut中,binder_transaction_data就是待发送数据;
- waitForResponse方法中通过while循环,不断调用talkWithDriver方法,将消息封装成binder_write_read,通过调用ioctl方法传入BC_TRANSACTION指令将消息传入driver中。将读取到数据存放在mIn中。
- 当talkWithDriver写入数据后,service_manager的parse_looper()的BP_TRANSACTION分支中将binder_transaction_data传入svcmgrhandler句柄中。获取方法通过CHECKSERVICE/GETSERVICE分支中的do_find_service方法从svclist链表中获取service。
- 通过bio_put_ref()方法将BINDER转为HANDLER,然后通过binder_send_reply()方法将消息回复回去
- (1)当请求服务的进程与服务属于不同进程,则为请求服务所在进程创建binder_ref对象,指向服务进程中的binder_node;(2)当请求服务的进程与服务属于同一进程,则不再创建新对象,只是引用计数加1,并且修改type为BINDER_TYPE_BINDER或BINDER_TYPE_WEAK_BINDER。
首先获取服务,调用getService()方法,其实就是checkService()
virtual sp<IBinder> getService(const String16& name) const{
for (n = 0; n < 5; n++){
sp<IBinder> svc = checkService(name);
if (svc != NULL) return svc;
sleep(1);
}
return NULL;
}
然后就是封装parcel对象
virtual sp<IBinder> checkService( const String16& name) const
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
然后通过BpBinder透传到IpcThreadState中去
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
跟注册一样,构造一个binder_transaction_data结构体,只是其中的code是CHECK_XXXX了,
binder的同步和异步由flags标志位确定,TF_ONE_WAY就是同步,不然就是异步
status_t IPCThreadState::transact(int32_t handle,uint32_t code, const Parcel& data,Parcel* reply,uint32_t flags){
// 数据没有错误,将其写入到驱动
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
// 同步调用,等待驱动返回结果
if ((flags & TF_ONE_WAY) == 0) {
err = waitForResponse(reply);
} else {
// 异步方式 ,直接返回结果
err = waitForResponse(NULL, NULL);
}
return err;
}
然后一样在waitForResponse中开启talk WithDriver不断的与binder驱动进行通信
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult){uint32_t cmd;int32_t err;
while (1) {
// 将写缓冲区mOut中的数据写入驱动
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
case BR_DEAD_REPLY:
err = DEAD_OBJECT;
goto finish;
case BR_FAILED_REPLY:
err = FAILED_TRANSACTION;
goto finish;
case BR_ACQUIRE_RESULT:{
ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
const int32_t result = mIn.readInt32();
if (!acquireResult) continue;
*acquireResult = result ? NO_ERROR : INVALID_OPERATION;
}
goto finish;
// 服务端响应请求
case BR_REPLY:{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
return err;
}
在talkWithDriver中构造一个binder_write_read结构体,然后调用ioctl进行写入
status_t IPCThreadState::talkWithDriver(bool doReceive){
binder_write_read bwr;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();//将bwr的读缓冲指向mIn的数据区
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
//与驱动交互,将mOut的数据写入到驱动,读取的数据保存到mIn
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
} while (err == -EINTR);
if (err >= NO_ERROR) {
// 将已写入驱动的数据从mOut中移除
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
// 将读取到的数据信息保存到mIn
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
这里通过func方法找到SM中的handle结构体,然后执行其中的SVC_MGR_CHECK_SERVICE分支
int binder_parse(struct binder_state *bs, struct binder_io *bio,uintptr_t ptr, size_t size, binder_handler func){
...............
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
res = func(bs, txn, &msg, &reply);
if (txn->flags & TF_ONE_WAY) {
binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
break;
}
..............
return r;
}
在SVC_MGR_CHECK_SERVICE分支中通过do_find_service从msgList中找到已经注册的服务,然后返回
int svcmgr_handler(struct binder_state *bs,struct binder_transaction_data *txn,struct binder_io*msg,struct binder_io *reply){
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
bio_put_ref(reply, handle);
return 0;
bio_put_uint32(reply, 0);
return 0;
}
void bio_put_ref(struct binder_io *bio, uint32_t handle){
struct flat_binder_object *obj;
if (handle)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE;
obj->handle = handle;
obj->cookie = 0;
}
void binder_send_reply(struct binder_state *bs,struct binder_io *reply,binder_uintptr_t buffer_to_free,int status){
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
................
binder_write(bs, &data, sizeof(data));
}
步骤三:linkToDeath死亡连接
- DeathNotifier是服务的内部类,重写了binderDied方法,当Bn端死掉后,会通过binderDied()通知Bp端。
- 获得到binder对象后,通过linkToDeath()将DeathNotifier注册到binder中去。
void IMediaDeathNotifier::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
SortedVector< wp<IMediaDeathNotifier> > list;{
Mutex::Autolock _l(sServiceLock);
sMediaPlayerService.clear();
list = sObitRecipients;
}
size_t count = list.size();
for (size_t iter = 0; iter < count; ++iter) {
sp<IMediaDeathNotifier> notifier = list[iter].promote();
if (notifier != 0) {
notifier->died();
}
}
}
向binder驱动发送BC_REQUEST_DEATH_NOTIFICATION这个指令
IPCThreadState* self = IPCThreadState::self();
self->requestDeathNotification(mHandle, this);
self->flushCommands();
触发时机:(摘录)
每当service进程退出时,service manager会收到来自Binder驱动的死亡通知。 这项工作是在启动Service Manager时通过binder_link_to_death(bs, ptr, &si->death)完成。另外,每个Bp端也可以自己注册死亡通知,能获取Binder的死亡消息,比如前面的IMediaDeathNotifier。
触发原理:(摘录)
Binder死亡通知是如何触发的呢?对于Binder IPC进程都会打开/dev/binder文件,当进程异常退出时,Binder驱动会保证释放将要退出的进程中没有正常关闭的/dev/binder文件,实现机制是binder驱动通过调用/dev/binder文件所对应的release回调函数,执行清理工作,并且检查BBinder是否有注册死亡通知,当发现存在死亡通知时,那么就向其对应的BpBinder端发送死亡通知消息。