Android Binder机制学习总结(二)-Driver部分

    本文主要分析Binder Driver的实现。Binder Driver 的实现代码位于 android kernel中:<kernel>\drivers\staging\android\binder.c & <kernel>\drivers\staging\android\binder.h.代码量不大,总计4000行左右。关于Kernal代码的下载,可以使用指令(如果https不行,就用http试试):

git clone https://android.googlesource.com/kernel/goldfish.git

Binder Driver 指令

        Binder支持如下指令:

#define BINDER_WRITE_READ   		_IOWR('b', 1, struct binder_write_read) //最常用的指令
#define	BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, int64_t)
#define	BINDER_SET_MAX_THREADS		_IOW('b', 5, size_t) //设定最大处理线程,一般供Service使用
#define	BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, int)
#define	BINDER_SET_CONTEXT_MGR		_IOW('b', 7, int) //设定ServiceManager,仅限ServiceManager使用
#define	BINDER_THREAD_EXIT		_IOW('b', 8, int)
#define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
        其中,BINDER_WRITE_READ是最常用的指令,数据写入和读取都通过这个指令执行。BINDER_WRITE_READ指令,限定数据格式为binder_write_read(这个struct后面会介绍),binder_write_read结构体的write_buffer/read_buffer缓冲用于保存需要写入/读出的协议和数据。

        具体来说,BINDER_WRITE_READ写入协议如下:

enum BinderDriverCommandProtocol {//按照字面翻译的话,应该是命令协议
	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),//数据发送
	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),//数据答复
	/*
	 * binder_transaction_data: the sent command.
	 */

	BC_ACQUIRE_RESULT = _IOW('c', 2, int),
	/*
	 * not currently supported
	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
	 * Else you have acquired a primary reference on the object.
	 */

	BC_FREE_BUFFER = _IOW('c', 3, int),//buffer释放
	/*
	 * void *: ptr to transaction data received on a read
	 */
        // binder_ref 引用计数控制指令
	BC_INCREFS = _IOW('c', 4, int),
	BC_ACQUIRE = _IOW('c', 5, int),
	BC_RELEASE = _IOW('c', 6, int),
	BC_DECREFS = _IOW('c', 7, int),
	/*
	 * int:	descriptor
	 */
        
	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
	/*
	 * void *: ptr to binder
	 * void *: cookie for binder
	 */

	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
	/*
	 * not currently supported
	 * int: priority
	 * int: descriptor
	 */

	BC_REGISTER_LOOPER = _IO('c', 11),
	/*
	 * No parameters.
	 * Register a spawned looper thread with the device.
	 */
        // 线程进入/退出循环指令
	BC_ENTER_LOOPER = _IO('c', 12),
	BC_EXIT_LOOPER = _IO('c', 13),
	/*
	 * No parameters.
	 * These two commands are sent as an application-level thread
	 * enters and exits the binder loop, respectively.  They are
	 * used so the binder can have an accurate count of the number
	 * of looping threads it has available.
	 */
        // 注册死亡通知
	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
	/*
	 * void *: ptr to binder
	 * void *: cookie
	 */
        // 注销死亡通知
	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
	/*
	 * void *: ptr to binder
	 * void *: cookie
	 */

	BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
	/*
	 * void *: cookie
	 */
};
        BINDER_WRITE_READ读出协议如下:

enum BinderDriverReturnProtocol { //字面翻译的话,应该是返回协议
	BR_ERROR = _IOR('r', 0, int), //操作失败
	/*
	 * int: error code
	 */

	BR_OK = _IO('r', 1),//操作成功
	/* No parameters! */

	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), // 读取到数据
	BR_REPLY = _IOR('r', 3, struct binder_transaction_data), // 读取到答复
	/*
	 * binder_transaction_data: the received command.
	 */

	BR_ACQUIRE_RESULT = _IOR('r', 4, int),
	/*
	 * not currently supported
	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
	 * Else the remote object has acquired a primary reference.
	 */

	BR_DEAD_REPLY = _IO('r', 5),
	/*
	 * The target of the last transaction (either a bcTRANSACTION or
	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
	 */

	BR_TRANSACTION_COMPLETE = _IO('r', 6), // 数据发送完成
	/*
	 * No parameters... always refers to the last transaction requested
	 * (including replies).  Note that this will be sent even for
	 * asynchronous transactions.
	 */

	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
	/*
	 * void *:	ptr to binder
	 * void *: cookie for binder
	 */

	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
	/*
	 * not currently supported
	 * int:	priority
	 * void *: ptr to binder
	 * void *: cookie for binder
	 */

	BR_NOOP = _IO('r', 12), // 空操作指令,无需处理
	/*
	 * No parameters.  Do nothing and examine the next command.  It exists
	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
	 */

	BR_SPAWN_LOOPER = _IO('r', 13), // 新线程创建指令
	/*
	 * No parameters.  The driver has determined that a process has no
	 * threads waiting to service incomming transactions.  When a process
	 * receives this command, it must spawn a new service thread and
	 * register it via bcENTER_LOOPER.
	 */

	BR_FINISHED = _IO('r', 14),
	/*
	 * not currently supported
	 * stop threadpool thread
	 */

	BR_DEAD_BINDER = _IOR('r', 15, void *),
	/*
	 * void *: cookie
	 */
	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
	/*
	 * void *: cookie
	 */

	BR_FAILED_REPLY = _IO('r', 17),
	/*
	 * The the last transaction (either a bcTRANSACTION or
	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
	 */
};

Binder Driver中的Struct

        先把主要的Struct介绍一下,方便理解后面的内容,看不太懂或者没耐心看的,也可以直接跳过本节,后面用到了再回来联系起来看,比较好理解。

binder_proc

        简单的说,binder_pro代表了使用binder driver的process,保存了process的相关信息。binder driver会为每一个调用过open函数打开“dev/binder”文件的进程创建一个binder_proc.

struct binder_proc {
	struct hlist_node proc_node; //全局binder_proc列表,双向列表
	struct rb_root threads; //binder_thread红黑树,process内使用binder driver的线程都会被保存在这颗红黑树中
	struct rb_root nodes; //binder_node(binder实体)红黑树,process所持有的binder_node都保存在这颗红黑树中
	struct rb_root refs_by_desc;//binder_ref(binder引用)红黑树,process所持有的biner_refs都会保存在颗红黑树中,红黑树以binder_ref.desc排序
	struct rb_root refs_by_node;//同refs_by_desc,不过以binder_ref.node排序
	int pid;//进程号
	struct vm_area_struct *vma;//进程虚地址分配表
	struct task_struct *tsk;//进程描述符
	struct files_struct *files;//文件描述符列表
	struct hlist_node deferred_work_node;
	int deferred_work;
	void *buffer;//binder缓冲区在内核态的地址
	ptrdiff_t user_buffer_offset;//binder缓冲区在内核态地址与用户态地址的偏移量

	struct list_head buffers;//binder_buffer列表
	struct rb_root free_buffers;//空闲binder_buffer红黑树
	struct rb_root allocated_buffers;//已分配binder_buffer红黑树
	size_t free_async_space;

	struct page **pages;//内存分配页表
	size_t buffer_size;//binder缓冲区到大小
	uint32_t buffer_free;//空闲的binder缓冲区
	struct list_head todo;//binder_work列表,等待被处理的binder_work
	wait_queue_head_t wait;//linux内核等待队列,参考函数wait_event&wake_up
	struct binder_stats stats;
	struct list_head delivered_death;
	int max_threads;//最大线程数量
	int requested_threads;
	int requested_threads_started;
	int ready_threads;
	long default_priority;
};
        针对特定的key,Linux内核提供的红黑树实现可以提供O(log2N)的搜索效率(关于linux kernel红黑树并不是本文的主题,所以不做讲解,有兴趣的请自行Google)。为了提高对于binder_ref的搜索性能, refs_by_desc和refs_by_node存储了相同的内容,但分别以不同的key进行排序,这样,binder driver使用这两种key进行搜索时,都可以获得较高的效率。

binder_thread

        binder_thread代表了binder_proc内的线程,保存了线程相关信息。binder driver会为每一个调用过ioctl函数操作“dev/binder”文件的线程创建binder_thread.

struct binder_thread {
	struct binder_proc *proc;//线程所属进程的binder_proc
	struct rb_node rb_node;//binder_thread通过rb_node链入到binder_proc的threads成员指向的红黑树中
        int pid;//线程号,为什么线程号也叫pid呢?
	int looper;
	struct binder_transaction *transaction_stack;//暂存binder_transaction
	struct list_head todo;//binder_work列表,等待被处理的binder_work
	uint32_t return_error; /* Write failed, return error code in read buf */
	uint32_t return_error2; /* Write failed, return error code in read */
		/* buffer. Used when sending a reply to a dead process that */
		/* we are also waiting on */
	wait_queue_head_t wait;//linux内核等待队列,参考函数wait_event&wake_up
	struct binder_stats stats;
};

binder_node

        binder_node代表一个内核态的binder实体,每一个binder_node都关联到用户态的BBinder对象。

struct binder_node {
	int debug_id;
	struct binder_work work;//work.type=BINDER_WORK_NODE
	union {
		struct rb_node rb_node;//binder_node通过本节点,链入到binder_proc的nodes成员所指向的红黑树中
		struct hlist_node dead_node;
	};
	struct binder_proc *proc;//binder_node所属进程的binder_proc
	struct hlist_head refs;//binder_ref列表,储存了所有引用本binder_node的binder_ref
	int internal_strong_refs;//指向本binder_node的强binder_ref计数
	int local_weak_refs;//本地弱引用计数
	int local_strong_refs;//本地强引用计数
	void __user *ptr;//指向用户态Binder实例的指针,通常指向BBinder的弱引用
	void __user *cookie;//自定义数据,通常为指向BBinder的指针
	unsigned has_strong_ref : 1;
	unsigned pending_strong_ref : 1;
	unsigned has_weak_ref : 1;
	unsigned pending_weak_ref : 1;
	unsigned has_async_transaction : 1;
	unsigned accept_fds : 1;
	int min_priority : 8;//最低优先级
	struct list_head async_todo;//异步binder
};

binder_ref

        binder_ref代表内核态的binder引用,用户态每一个有效的BpBinder都关联到特定的binder_ref。同时binder_ref总是关联到一个binder_node

struct binder_ref {
	/* Lookups needed: */
	/*   node + proc => ref (transaction) */
	/*   desc + proc => ref (transaction, inc/dec ref) */
	/*   node => refs + procs (proc exit) */
	int debug_id;
	struct rb_node rb_node_desc;//binder_ref通过本节点,链入到binder_proc的refs_by_desc所指向的红黑树中
	struct rb_node rb_node_node;//同上,链入到binder_proc的refs_by_node所指向的红黑树中
	struct hlist_node node_entry;//binder_ref通过本节点,链入到binder_node的refs成员所指向的双向链表中
	struct binder_proc *proc;//所属的binder_proc
	struct binder_node *node;//所指向的binder_node
	uint32_t desc;//序号,等于BpBinder.mhandle
	int strong;//强引用计数
	int weak;//弱引用计数
	struct binder_ref_death *death;//Binder死亡通知
};

binder_work

        binder_work代表一个未完成的Binder工作项,可以被保存在todo队列中。

struct binder_work {
    struct list_head entry;//binder_worker通过本节点链入到binder_proc或者binder_thread的todo成员指向的列表中
    enum {
        BINDER_WORK_TRANSACTION = 1,  // binder_work的owner是binder_transaction
        BINDER_WORK_TRANSACTION_COMPLETE, // 发送binder_transaction数据成功
        BINDER_WORK_NODE,    // binder_work的owner为binder_node
        BINDER_WORK_DEAD_BINDER, // binder_work的owner为binder_ref_deatch,通知Client,Service已经死亡
        BINDER_WORK_DEAD_BINDER_AND_CLEAR,  //在BINDER_WORK_DEAD_BINDER的基础上,再执行BINDER_WORK_CLEAR_DEATH_NOTIFICATION
        BINDER_WORK_CLEAR_DEATH_NOTIFICATION, // binder_work的owner为binder_ref_deatch,通知Client,binder死亡通知已经清除
    } type;//binder工作项类型
};
        大部分情况下,binder_work总是作为binder_transaction的成员使用,除此以外,它还是binder_node的成员。

binder_write_read

        binder_write_read为BINDER_WRITE_READ指定的数据类型,它的定义如下:

struct binder_write_read {
	signed long	write_size;	/* bytes to write */
	signed long	write_consumed;	/* bytes consumed by driver */
	unsigned long	write_buffer;
	signed long	read_size;	/* bytes to read */
	signed long	read_consumed;	/* bytes consumed by driver */
	unsigned long	read_buffer;
};

binder_transaction_data

        binder_transaction_data为写入协议BC_TRANSACTION、BC_REPLY以及读出协议BR_TRANSACTION、BR_REPLY所指定的数据类型,Binder驱动的使用者(e.i. Client、Service、Service Manager)通过binder_transaction_data和Binder driver进行数据交换。

struct binder_transaction_data {
	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
	 * identifying the target and contents of the transaction.
	 */
	union {
		size_t	handle;	/* target descriptor of command transaction */
		void	*ptr;	/* target descriptor of return transaction */
	} target;
	void		*cookie;	/* target object cookie */
	unsigned int	code;		/* transaction command */ //Service自定义的指令码,以SeviceManager的addService为函数例的话,code=ADD_SERVICE_TRANSACTION

	/* General information about the transaction. */
	unsigned int	flags; //TF_ACCEPT_FDS TF_ONE_WAY
	pid_t		sender_pid; //发送方进程id
	uid_t		sender_euid; //发送方用户id
	size_t		data_size;	/* number of bytes of data */
	size_t		offsets_size;	/* number of bytes of offsets */

	/* If this transaction is inline, the data immediately
	 * follows here; otherwise, it ends with a pointer to
	 * the data buffer.
	 */
	union {
		struct { 
			/* transaction data */
			const void	*buffer; 
			/* offsets from buffer to flat_binder_object structs */
			const void	*offsets;
		} ptr;
		uint8_t	buf[8];
	} data;
};
            首先,就像注释中说明的那样,target成员和cookie成员仅在BC_TRANSACTION和BR_TRANSACTION协议中使用。通常,Client使用BC_TRANSACTION协议写入数据时,需要通过target.handle指定数据接收方。而Service读取到BR_TRANSACTION的binder_transaction_data.ptr成员保存了用户空间binder实体的地址(实际上,BBinder的弱引用地址),而cookie成员保存了用户数据(实际上,cookie才真正保存了BBinder的地址)。而使用BC_REPLY写入时,Binder driver忽略这两个参数,而读取到BR_REPLY的 binder_transaction_data的target和cookie成员则恒为空。

            其次,flag的含义与下面的常量相关。常用的标记为TF_ONE_WAY和TF_ACCEPT_FDS。

enum transaction_flags {
	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
};

           最后,也是对于理解binder_transaction_data最重要的一点,binder_transaction_data结构体,并不包含传输的数据,而是通过其ptr.buffer成员保存了数据的内存地址。而ptr.offsets成员则保存了Binder对象(或者说flat_binder_object)在ptr.buffer的偏移量数组的首地址。data_size成员则记录了数据的长度,offsets_size则是编译量数组的长度(以字节为单位,所以,编译量数组实际的长度是offsets_size/4)。

            举个例子来说(这个例子的内容,后面的文章中还会讲到,不明白没关系,这里只是为了帮助理解这几个成员的含义),当Client调用ServiceManager的getService接口请求MediaPlayerService的Binder时,Client端会调用BpServiceManager的checkService函数:

    virtual sp<IBinder> checkService( const String16& name) const
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());//IServiceManager::getInterfaceDescriptor()="android.os.IServiceManager"
        data.writeString16(name); //name=“media.player”
        remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);//这个函数内部最终会调用ioctl操作dev/binder文件写入数据,并获得返回数据
        return reply.readStrongBinder();
    }
         下面是Parcel writeInterfaceToken的实现:

// Write RPC headers.  (previously just the interface token)
status_t Parcel::writeInterfaceToken(const String16& interface)
{
    writeInt32(IPCThreadState::self()->getStrictModePolicy() |
               STRICT_MODE_PENALTY_GATHER);
    // currently the interface identification token is just its name as a string
    return writeString16(interface);
}
        可以看到,实际上就是写入一个int和一个string。所以加上前面的Service name,总共是写入一个int32和两个string。另外,writeString函数执行时,会在字符串内容前先写入一个int32以表示字符串的长度,所以,最后内存上的数据可能是这样的(假设buffer地址从0x10000开始):

  • 这种情况下,未向binder driver写入binder数据(即写入数据中不包含flat_binder_object结构体),所以未创建offsets对应的缓冲区。 
  • buffer占用内存0x10000~0x1005B,总计92个字节
  • Strict Mode Policy为一个int32,占用了0x10000~0x10003这四个字节。
  • interface token总计占用0x10004~0x1003D,总计58个字节,其中:
    • 字符串长度为int32,占用0x10004~0x10007,4个字节
    • "android.os.IServiceManager"总计26个字符,加上末尾的空字符,还有宽字符集的关系,总计需要(26+1)*2=54个字节,占用0x10008~0x1003D
  • service name总计占用0x1003E~0x1005B,总计30个字节,其中:
    • 字符串长度为int32,占用0x1003E~0x10041,总计4字节
    • “media.player”总计12个字符,加上末尾空字符,总计需要(12+1)*2=26字节,占用0x10042~0x1005B

所以,binder_transaction_data应该是这样的:

binder_transaction_data tr;
tr.data_size=92;
tr.offsets_size=0;
tr.ptr.buffer=0x10000;
tr.ptr.offsets=NULL;
        Service Manager接收到请求以后,会发送MediaPlayService的Binder给Client 。这时候,ServiceManager发送的数据为一个flat_binder_object结构体,Service Manager的数据缓存是这样子的(假设数据缓存从0x10000地址开始):

  • offset占用0x10000~0x1003,总计4字节,内容为0(即flat_binder_object结构体在buffer中的偏移量为0).
  • buffer占用0x10010~0x1002B,总计16个字节(sizeof(flat_binder_object)=16)

所以,binder_transaction_data应该是这样子的:

binder_transaction_data tr;
tr.data_size=16;
tr.offsets_size=4;
tr.ptr.buffer=0x10010;
tr.ptr.offsets=0x10000;
        是不是看得有点乱了?简单总结一下:

  1. tr.ptr.buffer指定了数据开始地址
  2. tr.data_size指定了数据的长度
  3. tr.offsets_size的值是数组中flat_binder_object数量的4倍,因为偏移量是用int32表示的,而offsets_size则以字节为单位
  4. tr.ptr.offsets则是偏移量数组的首地址,其中偏移量的个数=offsets_size/4

flat_binder_object

        flat_binder_object结构体用于表示在binder中传输的binder。

struct flat_binder_object {
	/* 8 bytes for large_flat_header. */
	unsigned long		type;  // binder类型
	unsigned long		flags; // flags   

	/* 8 bytes of data. */
	union {
		void		*binder;	/* local object */  //指向BBinder
		signed long	handle;		/* remote object */ //Bpbinder.handle
	};

	/* extra data associated with local object */
	void			*cookie;      //自定义数据
};
        其中,type成员可选的值如下:

#define B_PACK_CHARS(c1, c2, c3, c4) \
	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
#define B_TYPE_LARGE 0x85

enum {
	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
};
        忽略强弱引用的差异,type可以分为三类BINDER、HANDLE、FD。

  • type为BINDER类型时,flat_binder_object代表binder_node,flat_binder_object.binder等于相应binder_node.ptr,指向Service用户空间的BBinder。
  • type为HANDLE类型时,flat_binder_object代表binder_ref,flat_binder_object.handle等于相应binder_refs.desc,也就是等于Client用户空间BpBinder.handle。
  • type为FD类型时,flat_binder_object代表文件Binder,flat_binder_object.handle是文件在进程内的文件号。本文不讨论文件Binder,有兴趣的可以参考《Android设计与实现 设计篇》的5.2.1章节

        另外,flat_binder_object的cookies和flag成员仅当type为BINDER类型,并且Binder driver还未创建相应的binder_node时起效。这时,Binder driver会创建新的binder_node,并根据flag值来设定binder_node.min_priority和binder_node.accept_fds,并保存cookies到binder_node.cookies中。其他情况下,忽略这两个成员。

        flag成员可取的值如下:

enum {
	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,//binder_node.min_priority = flat_binder_object.flags & FLAT_BIDNER_FLAG_PRIORITY_MASK
	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,//binder_node.accept_fds = flat_binder_object.flags & FLAT_BINDER_FLAG_ACCEPT_FDS
};
        Binder driver在进程间传递flat_binder_object时,需要对flat_binder_object做处理。以Client向ServiceManager请求MedialPlayerService的Binder为例,ServiceManager向Client发送的flat_binder_object.type=BINDER_TYPE_HANDLE,flat_binder_object.handle=ServiceManager进程内MediaPlayerService的binder_ref.desc。如果Binder Driver不做任何处理,就把数据发送给Client,Client的确可以根据flat_binder_object.handle构造BpBinder,但是Client使用BpBinder与MediaPlayerService通信的时候,根据BpBinder的handle值却找不到对应的binder_ref或者找错了binder_ref,而通信失败。

         处理的逻辑简单描述如下:

如果发送的flat_binder_object.type=BINDER,

  1. 在发送进程内查找flat_binder_object.binder对应的binder_node
  2. 如果找到则执行步骤4,否则执行步骤3
  3. 在发送进程内创建新的binder_node,binder_node.ptr=flat_binder_object.binder
  4. 在接收进程内查找binder_node对应的binder_ref,如果找到则执行步骤6,否则执行步骤5
  5. 在接收进程内创建新的binder_ref,binder_ref.node=binder_node,分配binder_ref.desc值
  6. 修改flat_binder_object.type=HANDLE(BINDER_TYPE_BINDER->BINER_TYPE_HANDLE;BINDER_TYPE_WEAK_BINDER->BINDER_TYPE_WEAK_HANDLE).
  7. 修改flat_binder_object.handle=binder_refs.desc

如果发送的flat_binder_object.type=HANDLE,

  1. 在发送进程内查找flat_binder_object.handle对应的binder_ref,如果找到,执行步骤3,否则执行步骤2
  2. 设置error为BR_FAILED_REPLY,执行步骤11(这里的设计,防止了Client通过蒙猜的方式进行非法通信
  3. binder_ref.node.proc是否为接收进程,如果是,则执行步骤4,否则执行步骤7
  4. 设置flat_binder_obecjt.type=BINDER(BINDER_TYPE_HANDLE->BINDER_TYPE_BINDER;BINDER_TYPE_WEAK_HANDLE->BINDER_TYPE_WEAK_BINDER)
  5. 设置flat_binder_object.binder=binder_ref.node.binder.
  6. 设置flat_binder_object.cookies=binder_ref.node.cookies,执行步骤11
  7. 在接收进程内超找对应的binder_ref,如果未找到,执行步骤8,否则执行步骤10
  8. 在接收进程内创建新的binder_ref
  9. 设置binder_ref(接收进程).node=binder_ref(发送进程).node,并分配binder_refs(接收进程).desc
  10. 设置flat_binder_object.handle=binder_ref(接收进程).desc
  11. 结束


binder_transaction

        如果说binder_transaction_data是binder driver与binder 使用者之间的数据通信的工具的话,binder_transaction则是binder_driver内部,进程间交换数据的“载体”。

struct binder_transaction {
	int debug_id;
	struct binder_work work;//通过本节点链入到binder_proc或者binder_thread的todo队列中
	struct binder_thread *from;//数据发送方
	struct binder_transaction *from_parent;
	struct binder_proc *to_proc;//目标进程
	struct binder_thread *to_thread;//目标线程
	struct binder_transaction *to_parent; 
	unsigned need_reply : 1; //code = BC_TRANSACTION, 并且binder_transaction_data.flags&TF_ONE_WAY= NULL时,need_reply=1
	/*unsigned is_dead : 1;*/ /* not used at the moment */

	struct binder_buffer *buffer; // binder_buffer,数据缓冲区
	unsigned int	code;    //同binder_transaction_data.code
	unsigned int	flags;   //同binder_transaction_data.flags
	long	priority; // 数据发送方的线程优先级线程优先级
	long	saved_priority; // 数据接收方的线程优先级
	uid_t	sender_euid; // 发送方用户id
};

binder_ref_death

        在理想情况下,通过对于binder_node强/弱引用的管理,可以保证在Client请求期间,biner_node实体以及Service内的BBinder不会被回收。但是Binder机制毕竟属于跨进程的实现,当遇到exception,kill指令,LMK等情况下,Service进程可能会退出,导致binder_node实体被回收。所以,Binder Driver为Client提供了死亡通知功能。而binder_ref_death正是服务于死亡通知功能。

struct binder_ref_death {
    struct binder_work work; //用以链入到todo队列,其中binder_work.type的可选值为:BINDER_WORK_DEAD_BINDER、
                             //                                              BINDER_WORK_DEAD_BINDER_AND_CLEAR、
                             //                                              BINDER_WORK_CLEAR_DEATH_NOTIFICATION
    void __user *cookie;
};

Binder Dirver中的Methods

         Linux内核启动时,会调用下面的代码来注册Binder驱动:

static const struct file_operations binder_fops = {
    .owner = THIS_MODULE,
    .poll = binder_poll,// poll的类型为函数指针,定义为unsigned int (*poll) (struct file *, struct poll_table_struct *);
                        // 把binder_poll的地址赋值给poll成员,系统就可以通过poll来调用binder_poll函数
                        // 以下的成员也同理
    .unlocked_ioctl = binder_ioctl,
    .mmap = binder_mmap,
    .open = binder_open,
    .flush = binder_flush,
    .release = binder_release,
};

static struct miscdevice binder_miscdev = {
    .minor = MISC_DYNAMIC_MINOR, //自动生成次设备号
    .name = "binder", //设备文件名
    .fops = &binder_fops
};

static int __init binder_init(void)
{
	int ret;

        ...... //在/sys/kernel/debug/目录下创建binder目录

        ret = misc_register(&binder_miscdev);//注册驱动
	
        ...... //在/sys/kernel/debug/binder目录下继续创建proc目录,
	
        return ret;
}

......

device_initcall(binder_init); //声明驱动启动函数
        misc_register函数为杂项设备注册函数,执行这个函数以后,我们就可在dev/文件夹下看到binder文件了。而misc设备为字符设备,所以binder也属于字符设备。然后,binder_miscdev.fops即binder_fops指定了Binner驱动提供了那些系统调用。


        一般,Binder使用者通过下面的步骤使用Binder机制:

  1. 调用open函数打开dev/binder,获得文件描述符fd
  2. 调用mmap函数映射fd
  3. 调用ioctl函数操作fd,进行写入和读取
  4. 进程主动调用close关闭fd,或者当进程退出时由系统调用close关闭fd

        下面,逐个解释这些函数的实现方式。

binder_open

        当Binder使用者在用户态执行

    int fd = open("/dev/binder", O_RDWR);
        时,进程就会转入核心态,系统会调用binder_open函数:

static int binder_open(struct inode *nodp, struct file *filp)
{
	struct binder_proc *proc;

	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
		     current->group_leader->pid, current->pid);

	proc = kzalloc(sizeof(*proc), GFP_KERNEL);//kzalloc函数:分配内存,并把分配的内存空间的内容设置成NULL
	if (proc == NULL)
		return -ENOMEM;
	get_task_struct(current);//增加进程信息块的引用计数
	proc->tsk = current;
	INIT_LIST_HEAD(&proc->todo); //初始化进程的todo队列,相当于 java中new一个容器
	init_waitqueue_head(&proc->wait); //初始化等待队列
	proc->default_priority = task_nice(current); // 保存当前优先级

	binder_lock(__func__);// 进入互斥区, __func__ 返回当前函数名

	binder_stats_created(BINDER_STAT_PROC);
	hlist_add_head(&proc->proc_node, &binder_procs); //把proc链入到全局的binder_procs中
        proc->pid = current->group_leader->pid; //当前进程号
	INIT_LIST_HEAD(&proc->delivered_death); //初始化proc_delivered_death
	filp->private_data = proc; //保存proc到filp中,所以,后面binder_mmap, binder_ioctl函数可以获取到proc

        binder_unlock(__func__);//退出互斥区

	if (binder_debugfs_dir_entry_proc) { //调试相关,可以忽略
		char strbuf[11];
		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
	}

	return 0;
}      

        从代码还是比较简单易懂的,创建一个binder_proc实例,初始化binder_proc实例,然后保存到file结构体filp中。

binder_mmap

        当进程在用户态调用:

mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0); //
        时,进程就会转入内核态,执行binder_mmap

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;
	struct vm_struct *area;
	struct binder_proc *proc = filp->private_data; //获取在binder_open函数中创建的binder_proc
	const char *failure_string;
	struct binder_buffer *buffer;

	if ((vma->vm_end - vma->vm_start) > SZ_4M) // binder 缓冲区最大为4MB
		vma->vm_end = vma->vm_start + SZ_4M;

	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
		     proc->pid, vma->vm_start, vma->vm_end,
		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
		     (unsigned long)pgprot_val(vma->vm_page_prot));

	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
		ret = -EPERM;
		failure_string = "bad vm_flags";
		goto err_bad_arg;
	}
	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;

	mutex_lock(&binder_mmap_lock);
	if (proc->buffer) {
		ret = -EBUSY;
		failure_string = "already mapped";
		goto err_already_mapped;
	}

	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); //在kernel space虚地址空间分配配;vma->vm_end - vma->vm_start大小的空间
	if (area == NULL) {
		ret = -ENOMEM;
		failure_string = "get_vm_area";
		goto err_get_vm_area_failed;
	}
	proc->buffer = area->addr;
	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; // 计算用户区虚地址和内核态虚地址的地址偏移量, 这个很重要!
	mutex_unlock(&binder_mmap_lock); 

#ifdef CONFIG_CPU_CACHE_VIPT
	if (cache_is_vipt_aliasing()) { 
		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {  
			printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 
			vma->vm_start += PAGE_SIZE; 
		}  
	} 
#endif 
	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); //分配页表空间
	if (proc->pages == NULL) {  
		ret = -ENOMEM; 
		failure_string = "alloc page array";  
		goto err_alloc_pages_failed; 
	} 
	proc->buffer_size = vma->vm_end - vma->vm_start; 

	vma->vm_ops = &binder_vm_ops; //设定虚地址操作函数
	vma->vm_private_data = proc; 

	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
		ret = -ENOMEM;
		failure_string = "alloc small buf"; 
		goto err_alloc_small_buf_failed; 
	} 
	buffer = proc->buffer;
	INIT_LIST_HEAD(&proc->buffers); //初始化buffer list 
	list_add(&buffer->entry, &proc->buffers); //保存刚刚分配的buffer
	buffer->free = 1;
	binder_insert_free_buffer(proc, buffer); //保存到未分配的buffer列表中
	proc->free_async_space = proc->buffer_size / 2;
	barrier();
	proc->files = get_files_struct(proc->tsk); 
	proc->vma = vma;
	proc->vma_vm_mm = vma->vm_mm;

	/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
	return 0;

err_alloc_small_buf_failed:
	kfree(proc->pages);
	proc->pages = NULL;
err_alloc_pages_failed:
	mutex_lock(&binder_mmap_lock);
	vfree(proc->buffer);
	proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
	mutex_unlock(&binder_mmap_lock);
err_bad_arg:
	printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
	return ret;
}

        binder_mmap函数,主要完成了两个操作,首先,在内核态分配与用户态的内存空间同等大小的虚地址空间。并保存两个虚地址起始地址的偏移量。

	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); //在kernel space虚地址空间内分配                                                                       vma->vm_end - vma->vm_start大小的空间
	if (area == NULL) {
		ret = -ENOMEM;
		failure_string = "get_vm_area";
		goto err_get_vm_area_failed;
	}
	proc->buffer = area->addr;
	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; // 计算用户区虚地址和内核态虚地址的地址偏移量, 这个很重要!
        上面的代码中,vma是linux的进程虚地址管理单元,表示mmap函数在用户态分配的虚地址空间。然后,通过get_vm_area函数,在内核态分配了同一大小的虚地址空间。最后,计算了两个虚地址之间的偏移量(vma->vm_start - proc->buffer),并保存。

        其次,通过binder_update_page_range为刚刚分配的虚地址空间分配第一个物理内存页面,并保存到proc->buffers和proc->free_buffers中:

if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
		
  • 3
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值