binder源码分析(一)
binder源码分析(二)
binder源码分析(三)
binder源码分析(四)
binder源码分析(五)
Service获取
ServiceManager
- getService(),先从cache里面查找服务是否已注册,没有就getIServiceManager().getService()。
- getIServiceManager()中返回ServiceManagerNative.asInterface(Binder.allowBlocking(BinderInternal.getContextObject()))
- getContextObject()是native函数,实际调用frameworks\base\core\jni\android_util_Binder.cpp中的android_os_BinderInternal_getContextObject()
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
ProcessState::self()
重要的是打开binder驱动,创建内存映射。
// 加锁后返回ProcessState的实例。
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState("/dev/binder");
return gProcess;
}
// 打开binder驱动的设备文件,打开成功则创建内存映射。
ProcessState::ProcessState(const char *driver)
: mDriverName(String8(driver))
// 这里获得binder驱动的文件描述符。
, mDriverFD(open_driver(driver))
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
// 需要注意的是,这里创建内存映射和open一样会调用对应的binder_mmap,该函数会将内存映射的信息(vm_area_struct)保存到进程的proc中
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
...
}
}
LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}
// 获取binder驱动的版本号和设置binder驱动最多的线程
static int open_driver(const char *driver)
{
int fd = open(driver, O_RDWR | O_CLOEXEC);
if (fd >= 0) {
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
... // io失败以及版本不符的操作
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
... // io失败操作
}
return fd;
}
// binder_open主要是创建binder_proc,初始化proc中的等待队列,todo队列等,并将这个proc结构放在文件描述符中
// 当之后调用ioctl时都可以使用该proc信息。
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;
...
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
...
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
...
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
...
hlist_add_head(&proc->proc_node, &binder_procs);
...
return 0;
}
ProcessState::getContextObject
getContextObject直接return getStrongProxyForHandle(0),主要查找是否已经有该handle对应的entry, entry中有IBinder的弱引用,这里尝试获取,必要的话创建新的BpBinder,并对应的强引用返回。
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
// 获取handle对应的entry,如果handle大于entry的个数N,会插入handle-N个entry。
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
// context manager特殊情况,(context manager是唯一没有引用前就拥有实例了,见lookupHandleLocked() )。
// 为了确保context manager在我们使用它的引用前就已经创建了,需要进行一次假的事务。
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
b = BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
IPCThreadState:: self
采用单例模式,用pthread_key_create创建线程私有数据,返回IPCThreadState的实例
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
}
if (gShutdown) {
ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");
return NULL;
}
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
strerror(key_create_value));
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
// 这里把this设置为gTLS的value值,所以self中可以读取IPCThreadState的指针,并且设置了mIn和mOut的大小。
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
pthread_setspecific(gTLS, this);
clearCaller();
mIn.setDataCapacity(256);
mOut.setDataCapacity(256);
}
IPCThreadState::transact()
这里首先将事务的数据写入Parcel数据结构中,根据是否双向的通信,调用waitForResponse的参数会有所不同。如果双向通信且reply为null就会构造一个虚假的reply作为参数,然后包装好需要通信的数据,
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err;
flags |= TF_ACCEPT_FDS;
... // log相关
// 将data(Parcel)封装进binder_transaction_data,设置code和flag等。
// 然后再放进IPCThreadState的成员变量mOut(Parcel)里面。
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
...
if ((flags & TF_ONE_WAY) == 0) {
... // log相关
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
... // log相关
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects(); // 这里存疑
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
//
struct binder_transaction_data {
union {
__u32 handle;
binder_uintptr_t ptr;
} target;
binder_uintptr_t cookie;
__u32 code;
__u32 flags;
pid_t sender_pid;
uid_t sender_euid;
binder_size_t data_size;
binder_size_t offsets_size;
union {
struct {
binder_uintptr_t buffer;
binder_uintptr_t offsets;
} ptr;
__u8 buf[8];
} data;
};
包装后准备与驱动通信(talkWithDriver)。
// 与驱动交互,并将mIn的cmd读出来,执行对应的操作。
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
... //变量声明
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
... // log
switch (cmd) {
... // case 见下
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
// 这里需要注意的是,needRead表示这次与binder驱动通信是否需要读数据,
// 如果已经上次还在读数据(mIn.dataPosition() < mIn.dataSize()),这次就不读了,让上次的通信继续读完。
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
... // log相关
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
... // log
#if defined(__ANDROID__)
// 调用ioctl
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
... // log
} while (err == -EINTR);
... // log
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else {
mOut.setDataSize(0);
processPostWriteDerefs();
}
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
// 重头开始读
mIn.setDataPosition(0);
}
... // log
return NO_ERROR;
}
return err;
}
binder_ioctl
talkWithDriver重要的是调用ioctl,自然而然调用到binder_ioctl,binder_ioctl根据cmd的类型,执行不同的函数,cmd为BINDER_WRITE_READ时,调用的是binder_ioctl_write_read。
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
...
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
... // other cases
}
ret = 0;
...
return ret;
}
binder_ioctl_write_read
binder_ioctl_write_read的逻辑比较简单,先从用户空间拷贝对应的地址,然后先写后读(这里虽然只有四个字,但其中还是比较复杂的 = =),最后再把取得的数据拷贝回用户空间。
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
// 从用户空间中拷贝相应的数据,binder_write_read包含了要读写的长度,以及IPCThreadState
// mIn mOut两个parcel成员变量。
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
... // log
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
// 唤醒在进程中等待的线程。
binder_inner_proc_unlock(proc);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
... // log
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
binder_thread_write
循环从用户空间中读取cmd,并执行相应逻辑,这里只有一个cmd,BC_TRANSACTION,在writeTransactionData中写入,函数会读取binder_transaction_data,也是writeTransactionData中写入,然后调用重要的binder_transaction。
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
// 这里都是用户空间的指针。
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error.cmd == BR_OK) {
int ret;
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
// 读取cmd 这里是BC_TRANSACTION,在writeTransactionData中写入mOut
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
... // other cases
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);
break;
}
... // other case
*consumed = ptr - buffer;
}
return 0;
}
binder_transaction篇幅较长,下一篇继续分析。