再来看客户端通过 ActivityManagerNative.getDefault().startActivity启动一个Activity, ActivityManagerNative.getDefault()方法
static public IActivityManager getDefault() {
return gDefault.get();
}
private static final Singleton<IActivityManager> gDefault = new Singleton<IActivityManager>() {
protected IActivityManager create() {
IBinder b = ServiceManager.getService("activity");
if (false) {
Log.v("ActivityManager", "default service binder = " + b);
}
IActivityManager am = asInterface(b);
if (false) {
Log.v("ActivityManager", "default service = " + am);
}
return am;
}
};
我们看到ServiceManager.getService("activity"),而刚才AMS注册的时候,名称是"activity",和上面注册的流程一样,只是调用命令变成了GET_SERVICE_TRANSACTION,通过handle为0,初始化BpBinder(0),创建BinderProxy,成员变量指向BpBinder(0),调用geService方法,往下然后调用BinderProxy的transact方法,命令是GET_SERVICE_TRANSACTION,接着调用BpBinder的transact方法,最后调用到IPCThreadState::self()->transact方法,然后调用到ioctl,通过系统调用,陷入内核态,调用binder驱动的binder_ioctl,此时命令是BINDER_WRITE_READ,然后执行binder_thread_write,由于handle为0,获取到binder_ref引用,最后因此获取到Service Manager的全局binder_node节点,binder_context_mgr_node,然后构建binder_transaction节点插入到目标binder_proc即Service Manager的binder_proc的todo队列中,Service Manager将数据读出来,然后解析,执行下面代码
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
//ALOGI("target=%p code=%d pid=%d uid=%d\n",
// (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);
if (txn->target.ptr != BINDER_SERVICE_MANAGER)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
...
switch(txn->code) {
case SVC_MGR_GET_SERVICE://这里获取服务
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);//根据服务名获取服务的handle
if (!handle)
break;
bio_put_ref(reply, handle);//将handle作为结果返回
return 0;
case SVC_MGR_ADD_SERVICE:
.....
}
bio_put_uint32(reply, 0);
return 0;
}
根据客户端传过来的服务名称,然后找到服务的handle返回,
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
struct flat_binder_object *obj;
if (handle)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE;//类型
obj->handle = handle;//将handle赋值给flat_binder_object的handle
obj->cookie = 0;
}
刚才已经看到Service Manager通过binder_send_reply将结果发送回来
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data));
}
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
可以知道通过BINDER_WRITE_READ将数据写入写缓存中。又回到了binder驱动的BINDER_WRITE_READ,然后是binder_thread_write,接着是binder_transaction,然后通过这个handle从service manger对应的binder_proc获取binder_ref引用,然后通过引用获取到AMS的binder_node节点
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle,
fp->type == BINDER_TYPE_HANDLE);
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
ref->debug_id, ref->desc, ref->node->debug_id,
(u64)ref->node->ptr);
} else {//进程不是同一个,走这里
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);//生成一个新的binder_ref指向ref->node即AMS的binder_node
fp->binder = 0;
fp->handle = new_ref->desc;//这里的handle是重新分配的最小handle和上面的fp->handle不一定相同
fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
}
} break;
然后创建binder_transaction节点加入到客户端的todo队列当中,作为返回值返回,我们在IPCThreadState::self()->transact中知道,需要有返回值时,会调用waitForResponse,此时的命令应该是BR_REPLY
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
...
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));//读取缓存中内容到binder_transaction_data结构体tr中
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);//将结果保存到reply中,偏移数组用于保存了binder对象的位置
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
...
return err;
}
上述代码的结果处理是在ServiceManagerProxy中
public IBinder getService(String name) throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
private static final Singleton<IActivityManager> gDefault = new Singleton<IActivityManager>() {
protected IActivityManager create() {
IBinder b = ServiceManager.getService("activity");
if (false) {
Log.v("ActivityManager", "default service binder = " + b);
}
IActivityManager am = asInterface(b);
if (false) {
Log.v("ActivityManager", "default service = " + am);
}
return am;
}
};
static public IActivityManager asInterface(IBinder obj) {
if (obj == null) {
return null;
}
IActivityManager in =
(IActivityManager)obj.queryLocalInterface(descriptor);
if (in != null) {
return in;
}
return new ActivityManagerProxy(obj);
}
因此我们获取到了AMS的代理,ActivityManagerProxy,于是可以向直接操作AMS一样调用代理。下面我们调用startActivity,发送部分和上面流程是一样的,不同的是我们不会发送的Service Manager,而是发送到了AMS所在的进程system_server,由于sertem_server也是由zygote fork创建的,和普通的进程走的流程有部分是相同的,也是会调用zygoteInitNative,前面binder初探中有提到,进程创建会开启线程,调用IPCThreadState::slef()->waitForResponse() 这里的命令不是BR_REPLY,而是BR_TRANSACTION
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
...
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
.....
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
...
}
因此执行executeCommand方法,
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
...
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
....
Parcel reply;
status_t error;
..
if (tr.target.ptr) {
sp<BBinder> b((BBinder*)tr.cookie);//这里是实体binder的地址,AMS对象,生成BBinder子类JavaBBinder
error = b->transact(tr.code, buffer, &reply, tr.flags);
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
<< tr.target.ptr << ": " << indent << reply << dedent << endl;
}
}
break;
.....
}
上面由于是在同一个进程中,返回的结果是Binder实体的binder_node地址,然后生成BBinder,调用其transact方法
BBinder在frameworks/native/libs/binder/Binder.cpp中
status_t BBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
data.setDataPosition(0);
status_t err = NO_ERROR;
switch (code) {
case PING_TRANSACTION:
reply->writeInt32(pingBinder());
break;
default:
err = onTransact(code, data, reply, flags);//走这里
break;
}
if (reply != NULL) {
reply->setDataPosition(0);
}
return err;
}
然后调用到
android_util_Binder.cpp
中JavaBBinder的onTransact
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0)
{
JNIEnv* env = javavm_to_jnienv(mVM);
IPCThreadState* thread_state = IPCThreadState::self();
const int32_t strict_policy_before = thread_state->getStrictModePolicy();
//这里调用Binder.execTransact方法
jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,
code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags);
if (env->ExceptionCheck()) {
jthrowable excep = env->ExceptionOccurred();
report_exception(env, excep,
"*** Uncaught remote exception! "
"(Exceptions are not yet supported across processes.)");
res = JNI_FALSE;
/* clean up JNI local ref -- we don't return to Java code */
env->DeleteLocalRef(excep);
}
...
}
Binder.java的execTransact方法
private boolean execTransact(int code, long dataObj, long replyObj,
int flags) {
Parcel data = Parcel.obtain(dataObj);
Parcel reply = Parcel.obtain(replyObj);
...
try {
res = onTransact(code, data, reply, flags);
} ...
return res;
}
之后就调动到了ActivityManagerNative的onTransac方法
public boolean onTransact(int code, Parcel data, Parcel reply, int flags)
throws RemoteException {
switch (code) {
case START_ACTIVITY_TRANSACTION:
{
data.enforceInterface(IActivityManager.descriptor);
IBinder b = data.readStrongBinder();
IApplicationThread app = ApplicationThreadNative.asInterface(b);
String callingPackage = data.readString();
Intent intent = Intent.CREATOR.createFromParcel(data);
String resolvedType = data.readString();
IBinder resultTo = data.readStrongBinder();
String resultWho = data.readString();
int requestCode = data.readInt();
int startFlags = data.readInt();
ProfilerInfo profilerInfo = data.readInt() != 0
? ProfilerInfo.CREATOR.createFromParcel(data) : null;
Bundle options = data.readInt() != 0
? Bundle.CREATOR.createFromParcel(data) : null;
int result = startActivity(app, callingPackage, intent, resolvedType,
resultTo, resultWho, requestCode, startFlags, profilerInfo, options);//这里调用了AMS的startActivity方法
reply.writeNoException();
reply.writeInt(result);
return true;
}
...
}
}
最终调用到了AMS的startActivity方法,以上就是整个Activity跨进程启动方式