定义全局性的变量
Static.cpp
Mutex gProcessMutex;
sp<IServiceManager> gDefaultServiceManager;
每个进程只有一个ProcessState
main_mediaserver.cpp
单例模式
ProcessState::self()
gProcess = new ProcessState;
构造函数
static int open_driver()
跟bind发生关系
int fd = open("/dev/binder", O_RDWR);
映射到进程的虚拟地址
#define BINDER_VM_SIZE ((1*1024*1024) - (4096 *2))
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
注册服务管理
gDefaultServiceManager = interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));
服务管理端句柄默认为0
getStrongProxyForHandle(0);
生成代理端
new BpBinder(handle);
DECLARE_META_INTERFACE(ServiceManager);
#define DECLARE_META_INTERFACE(INTERFACE)
static const android::String16 descriptor;
static android::sp<IServiceManager> asInterface(const android::sp<android::IBinder>& obj);
virtual const android::String16& getInterfaceDescriptor() const;
IServiceManager();
virtual ~IServiceManager();
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME)
const android::String16 IServiceManager::descriptor(NAME);
const android::String16& IServiceManager::getInterfaceDescriptor() const
{
return IServiceManager::descriptor;
}
android::sp<IServiceManager> IServiceManager::asInterface(const android::sp<android::IBinder>& obj)
{
android::sp<IServiceManager> intr;
if (obj != NULL)
{
intr = static_cast<IServiceManager*>(obj->queryLocalInterface(IServiceManager::descriptor).get());
if (intr == NULL)
{
intr = new BpServiceManager(obj);
}
}
return intr;
}
IServiceManager::IServiceManager() { }
IServiceManager::~IServiceManager() { }
#define CHECK_INTERFACE(interface, data, reply)
if (!data.checkInterface(this)) { return PERMISSION_DENIED; }
IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
class BpInterface : public INTERFACE, public BpRefBase
IBinder* const mRemote;
BpRefBase::BpRefBase(const sp<IBinder>& o): mRemote(o.get()), mRefs(NULL), mState(0)
初始化工作
MediaPlayerService::instantiate();
注册服务到ServerManager
void MediaPlayerService::instantiate()
{
defaultServiceManager()->addService(String16("media.player"), new MediaPlayerService());
}
开始运用进程间通信机制
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
运用到BpBinder
BpBinder.cpp
status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);
正在干活的线程
IPCThreadState::IPCThreadState()
用来设置接收到的数据
mIn.setDataCapacity(256);
用来设置发送出去的数据
mOut.setDataCapacity(256);
status_t IPCThreadState::transact(int32_t handle,uint32_t code, const Parcel& data,Parcel* reply, uint32_t flags)
写传输数据
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
status_t IPCThreadState::talkWithDriver(bool doReceive)
在驱动端写消息,将信息写进内核
ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr)
status_t IPCThreadState::executeCommand(int32_t cmd)
void ProcessState::startThreadPool()
新建线程池
sp<Thread> t = new PoolThread(isMain);
服务端编程
bs = binder_open(128*1024);分配128K内存
bs->fd = open("/dev/binder", O_RDWR);
bs->mapsize = mapsize;
映射到内核
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
成为管理服务
ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
处理用户消息
binder_loop(bs, svcmgr_handler);
解析数据
int binder_parse(struct binder_state *bs, struct binder_io *bio,uint32_t *ptr, uint32_t size, binder_handler func)
具体消息处理机制
int svcmgr_handler(struct binder_state *bs,struct binder_txn *txn,struct binder_io *msg,struct binder_io *reply)
做实际的增加/删除/获取服务的操作
增加服务函数
int do_add_service(struct binder_state *bs,uint16_t *s, unsigned len,void *ptr, unsigned uid, int allow_isolated)
现在来分析MediaClient和MediaServer的关系了
class MediaPlayer : public BnMediaPlayerClient,public virtual IMediaDeathNotifier
获取mediaplayer服务
IMediaDeathNotifier::getMediaPlayerService()
sp<IServiceManager> sm = defaultServiceManager();
binder = sm->getService(String16("media.player"));
在这里实现onTransact
MediaPlayerService : public BnMediaPlayerService
处理具体的switch/case流程
class BnMediaPlayerService: public BnInterface<IMediaPlayerService>
{
public:
virtual status_t onTransact( uint32_t code,const Parcel& data,Parcel* reply,uint32_t flags = 0);
};
驱动端
struct binder_proc
{
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
int pid;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;//线程最大数
int requested_threads;
int requested_threads_started;
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
};
struct binder_write_read
{
binder_size_t write_size;
binder_size_t write_consumed;
binder_uintptr_t write_buffer;
binder_size_t read_size;
binder_size_t read_consumed;
binder_uintptr_t read_buffer;
};
获取线程
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,binder_uintptr_t binder_buffer, size_t size,binder_size_t *consumed)
static int binder_thread_read(struct binder_proc *proc,struct binder_thread *thread,binder_uintptr_t binder_buffer, size_t size,binder_size_t *consumed, int non_block)