二,CameraSerivce服务的注册
同样在init.rc中能看到如下语句
service media /system/bin/mediaserver
user media
group system audio camera graphics inet net_bt net_bt_admin
这个服务的入口是Main_mediaservice.c中的main()函数。而且是在servicemanager服务之后才启动的。
2.1 CameraSerivce服务的注册入口
@Main_mediaservice.c)
int main(int argc, char** argv)
{
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();//取得Service_Manager的代理句柄
AudioFlinger::instantiate(); //Audio 服务
MediaPlayerService::instantiate(); //mediaPlayer服务
CameraService::instantiate(); //Camera 服务
ProcessState::self()->startThreadPool(); //为进程开启缓冲池
IPCThreadState::self()->joinThreadPool(); //将进程加入到缓冲池
}
这个函数也比较简单,就是几个子服务的初始化。这里我们只
关心CameraService的初始化。最后这个函数将启动自己的线程,等待其他应用来请求服务。
2.2 Camera 服务的初始化
@CameraService.cpp
void CameraService::instantiate() {
defaultServiceManager()->addService(
String16("media.camera"), new CameraService());
}
这个函数非常简单,就是取得服务管理器的代理句柄,并调用他们的addServie服务函数,addService的参数要注意,一是”media.camera” 字符串,在获取的时候,就要输入这个字符串。在getService的时候将看到这一样的字符串。
另外第二个参数传入的是CameraService类的一个对象。
在我们获取这个服务的时候,将获得CameraService类的一个代理对象。
2.3 获取服务管理器的代理句柄
@IServiceManager.cpp
sp<IServiceManager> defaultServiceManager()
{
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
if (gDefaultServiceManager == NULL)
{
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));
}
}
return gDefaultServiceManager;
}
这个函数第一次被调用前gDefaultServiceManager为NULL,因此系统要调用ProcessState::self()获取一个ProcessState实例。该ProcessState会打开/dev/binder驱动供IPCThreadState使用,从而确保supportsProcesses()返回 true.
@ProcessState.cpp
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{// caller这个参数被忽略。
if (supportsProcesses()) {
return getStrongProxyForHandle(0);// caller这个参数被忽略,直接传入参数0
} else {
return getContextObject(String16("default"), caller);
}
}
@ProcessState.cpp
bool ProcessState::supportsProcesses() const
{ return mDriverFD >= 0; //表示如果有IBinder驱动,则返回为true.}
@ProcessState.cpp
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result; //通过getContextObject返回一个BpBinder实例
}
在第一次调用时,b为NULL,因此会先创建一个BpBinder实例。
从以上调用关系可知ProcessState::self()->getContextObject(NULL)最终将返回一个BpBinder实例,这个函数的返回值将作为interface_cast<IServiceManager>(obj)的参数。
2.4 BpBinder实例转换为IServiceManager接口
interface_cast定义如下:
@IInterface.h
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
return INTERFACE::asInterface(obj);
}
查找INTERFACE::asInterface(obj)定义如下
@IInterface.h
#defineIMPLEMENT_META_INTERFACE(INTERFACE, NAME) /
const String16 I##INTERFACE::descriptor(NAME); /
const String16 &I##INTERFACE::getInterfaceDescriptor() const /
return I##INTERFACE::descriptor; /
} /
sp<I##INTERFACE> I##INTERFACE::asInterface(const sp<IBinder>& obj) /
{ /
sp<I##INTERFACE> intr; /
if(obj!=NULL) /
intr= static_cast<I##INTERFACE*>( / obj->queryLocalInterface( /
I##INTERFACE::descriptor).get()); /
if (intr==NULL) /
intr = new Bp##INTERFACE(obj); /
} /
} /
return intr; /
} /
I##INTERFACE::I##INTERFACE(){} /
I##INTERFACE::~I##INTERFACE(){} /
将这个宏interface_cast<IServiceManager>(obj)扩展后最终得到的是:
sp<IServiceManager> IServiceManager::asInterface(const sp<IBinder>& obj)
{
sp<IServiceManager> intr;
if (obj != NULL) {
intr = static_cast<IServiceManager*>(
obj->queryLocalInterface(
IServiceManager::descriptor).get());
if (intr == NULL) {
intr = new BpServiceManager(obj);
}
}
return intr;
}
即返回一个BpServiceManager对象,这里obj就是前面创建的BpBInder对象
2.5 BpServiceManager对象与IServiceManager的关系
@IServiceManager.cpp
从class BpServiceManager定义就可知道,BpServiceManager是IServiceManager的基类,也就是IServiceManager的一个实现类。
class BpServiceManager : public BpInterface<IServiceManager>
{
public:
BpServiceManager(const sp<IBinder>& impl)
: BpInterface<IServiceManager>(impl)
{
}
2.6 Camera Service注册具体过程。
获得BpServiceManager对象后,
instantiate()函数会调用IServiceManager::addService (String16("media.camera"), new CameraService());
我们看到传递给IServiceManager::addService的参数是一个CameraService实例化地址,而CameraService继成于BnCameraService
BnCameraService继成于BnInterface,BnInterface继成于BBinder
BBinder继成于IBinder。
IServiceManager::addService将毁调用到BpServiceManager::addService
因为IServiceManager::addService是纯虚函数。
@IServiceManager.cpp
virtual status_t addService(const String16& name, const sp<IBinder>& service)
{Parcel data, reply; data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readInt32() : err;
}
首先注意data.writeStrongBinder(service)
@Parcel.cpp
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp<ProcessState>& proc,
const sp<IBinder>& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
IBinder *local = binder->localBinder();
if (!local) {
BpBinder *proxy = binder->remoteBinder();
if (proxy == NULL) {
LOGE("null proxy");
}
const int32_t handle = proxy ? proxy->handle() : 0;
obj.type = BINDER_TYPE_HANDLE;
obj.handle = handle;
obj.cookie = NULL;
} else {//我们知道传进的是一个CameraService对象,所以会走到这个分支来
obj.type = BINDER_TYPE_BINDER;
//在binder_transaction()函数中会检查这个类型,并生成这个 binder对应的handle,系统有了handle后,通过readStrongBinder()àunflatten_binder()就可以生成对应的BpBinder对象。
obj.binder = local->getWeakRefs();
obj.cookie = local;
}
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = NULL;
obj.cookie = NULL;
}
return finish_flatten_binder(binder, obj, out);
}
其次注意这里的remote()将返回我们前面得BpBinder对象
我们还记得intr = new BpServiceManager(obj);
@IServiceManager.cpp
而BpServiceManager(const sp<IBinder>& impl)
: BpInterface<IServiceManager>(impl)//基类BpInterface
{
}
@IInterface.h
template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
: BpRefBase(remote) //基类BpRefBase
{
}
@ Binder.cpp
BpRefBase::BpRefBase(const sp<IBinder>& o)
: mRemote(o.get()), mRefs(NULL), mState(0)
{
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
if (mRemote) {
mRemote->incStrong(this); // Removed on first IncStrong().
mRefs = mRemote->createWeak(this); // Held for our entire lifetime.
}
}
@include/binder/Binder.h
class BpRefBase : public virtual RefBase
{
protected:
BpRefBase(const sp<IBinder>& o);
virtual ~BpRefBase();
virtual void onFirstRef();
virtual void onLastStrongRef(const void* id);
virtual bool onIncStrongAttempted(uint32_t flags, const void* id);
inline IBinder* remote() { return mRemote; }
….
@BpBinder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
}
mHandle为0,BpBinder往下调用IPCThreadState:transact函数将数据发给与mHandle相关联的Service Manager Process。
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
....
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
....
}
IPCThreadState::transact首先调用writeTransactionData函数为binder内核驱动构造一个transaction结构。
@IPCThreadState.cpp
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags, int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);
tr.data.ptr.offsets = data.ipcObjects();
}
…
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
}
waitForResponse将调用talkWithDriver与对Binder kernel进行读写操作。当Binder kernel接收到数据后,service_mananger线程的ThreadPool就会启动
@IPCThreadState.cpp
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
..............................................
}
@IPCThreadState.cpp
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
....
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
...
}
系统分析到这里,事务数据已经发送到了binder驱动,下面将讨论binder驱动的相关处理过程。
@ drivers/misc/binder.c
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
get_task_struct(current);
proc->tsk = current;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
mutex_lock(&binder_lock);
binder_stats.obj_created[BINDER_STAT_PROC]++;
hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
mutex_unlock(&binder_lock);
if (binder_proc_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc);
}
return 0;
}
当任何进程打开/dev/binder驱动时,会分配相应的binder_proc结构(给进程)。因此当进行ioctl调用时,binder系统通过binder_proc结构知道应该和那个进程交换数据了。
Client与Binder Driver交互过程:
@ drivers/misc/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
thread = binder_get_thread(proc); //根据当前caller进程消息获取该进程线程池数据结构
......
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);// 恢复挂起的caller进程
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
......
}
驱动首先处理写,然后读。让我们先看看binder_thread_write()函数。该函数的核心是一个从写缓冲中解析命令并执行该命令。
@ drivers/misc/binder.c
Int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,void __user *buffer, int size, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
case BC_INCREFS:
…
case BC_TRANSACTION://IPCThreadState通过writeTransactionData()设置该cmd
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
default:
…
}
*consumed = ptr - buffer;
}
return 0;
}
在binder_transaction ()中,首先设置target_node,target_proc和target_thread,并将请求放入链表,并唤醒binder_thread_read()中的等待线程。
@ drivers/misc/binder.c
static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply)
{
target_node = binder_context_mgr_node;
e->to_node = target_node->debug_id;
target_proc = target_node->proc;
…
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
…
switch (fp->type) {
case BINDER_TYPE_BINDER:// writeStrongBinder()àflatten_binder()
会设置这个type
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
…
}
ref = binder_get_ref_for_node(target_proc, node);
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;//生成handle,以后通过他创建BpBinder
} break;
......
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);//唤醒服务器中的线程
return;
......
}
这样数据就写入binder Driver中,service_manager的读将获得数据
从而binder_thread_read()将数据读给service_manager.
@ drivers/misc/binder.c
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed, int non_block)
{
......
下面的语句将client的写缓冲区的数据拷贝到service_manager的读缓冲区。
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
…
}
到现在为止,service_manager已经得到了一个从client发送过来的BR_TRANSACTION类型的数据包,service_manager将通过binder_parse()来分析这些数据包并作相应处理.最后通过binder_send_reply()返回处理结果.
binder_loop()函数
@Service_manager.c
void binder_loop(struct binder_state *bs, binder_handler func)
{
…
binder_write(bs, readbuf, sizeof(unsigned));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //如果没有要处理的请求进程将挂起
if (res < 0) {
LOGE("binder_loop: ioctl failed (%s)/n", strerror(errno));
break;
}
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);//这里func就是
//svcmgr_handler
…
}
}
@ frameworks/base/cmds/servicemanager/binder.c
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uint32_t *ptr, uint32_t size, binder_handler func)
{
......
case BR_TRANSACTION: {
struct binder_txn *txn = (void *) ptr;
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply); // 即svcmgr_handler()
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
......
}
binder_parse会调用svcmgr_handler(也就是参数func),按照BpServerManager相反的步骤处理BR_TRANSACTION数据包。这里binder_txn结构实际上与binder_transaction_data结构是一样的。在本文的例子中,事务码(transaction code)为SVC_MGR_ADD_SERVICE。
@/frameworks/base/cmds/servicemanager/binder.c
int svcmgr_handler(struct binder_state *bs,
struct binder_txn *txn,
struct binder_io *msg,
struct binder_io *reply)
{
…
s = bio_get_string16(msg, &len);
…
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
ptr = do_find_service(bs, s, len);
bio_put_ref(reply, ptr);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
if (do_add_service(bs, s, len, ptr, txn->sender_euid))
return -1;
break;
default:
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
于是service_manager通过bio_get_ref()获取到这个服务的相关信息。并进行注册处理.
@frameworks/base/cmds/servicemanager/binder.c
void *bio_get_ref(struct binder_io *bio)
{
struct binder_object *obj;
obj = _bio_get_obj(bio);
if (obj->type == BINDER_TYPE_HANDLE)
return obj->pointer;
return 0;
}
svcmgr_handler()返回后将通过binder_send_reply()将反馈结果给 client的Binder driver,最后返回到IPCThreadState::waitForResponse(),并退回到IPCThreadState::transact(),
再退回到BpBinder::transact(),再退回到BpServiceManager ::addService(),CameraService::instantiate(),最后退回到main()@main_mediaserver.cpp,从而完成CameraService的注册