\
参考:
分析类:
http://blog.csdn.net/universus/article/details/6211589
http://www.cnblogs.com/albert1017/p/3849585.html
http://blog.csdn.net/a220315410/article/details/17914531
https://m.oschina.net/blog/149575
https://m.oschina.net/blog/167314
源码:
http://androidxref.com/6.0.0_r1/xref/frameworks/native/cmds/servicemanager/service_manager.c
http://androidxref.com/6.0.0_r1/xref/frameworks/native/cmds/servicemanager/binder.c#96
可以在这里查一下要下载哪些源码:
https://android.googlesource.com/
如:
git clone https://android.googlesource.com/platform/frameworks/native
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
//vma是用户进程调用mmap方法时,系统给该进程新增的线性区。
int ret;
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;
if (proc->tsk != current)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M; //最大4M?
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
ret = -EPERM;
failure_string = "bad vm_flags";
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
mutex_lock(&binder_mmap_lock);
if (proc->buffer) { //该进程已经映射过了,则不能再次进行映射
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
//内核态获取未使用的线性地址区域,大小与传入的参数vma的一样,
//并分配一个vm_struct结构来保存这次得到信息(注意,这个vm_struct本身也需要
//申请物理空间来存放,这些都在get_vm_area中完成)
//参考:http://networking.ctocio.com.cn/tips/412/9237912.shtml
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
proc->buffer = area->addr;//指向新申请得到的内核的线性地址
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
//关键:计算内核的线性地址与用户进程的线性地址的距离。
mutex_unlock(&binder_mmap_lock);
#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
vma->vm_start += PAGE_SIZE;
}
}
#endif
/*分配实际的地址连续的物理内存,但是返回的是线性地址,不过由于这里的物理地址与线性地址的映射关系仅仅是一个偏移量而已,所以很容易转换。
申请的这段物理内存的用处是什么?
(vma->vm_end - vma->vm_start) / PAGE_SIZE)这里得到的是需要几页,
sizeof(proc->pages[0])这个页指针的大小,
所以这里得到的是指向页表的指针。
参考:http://blog.csdn.net/hbhhww/article/details/7236695
*/
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
if (proc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
proc->buffer_size = vma->vm_end - vma->vm_start;
vma->vm_ops = &binder_vm_ops; //指向线性区的操作方法,一个很重要的就是缺页的处理
vma->vm_private_data = proc;
//binder_update_page_range这个方法就是要建立起内核的线性地址与用户进程的虚拟地址,对同一段物理内存的映射。
//注意,到这个时候为止,还没有真正的申请用于存放数据的物理内存
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
list_add(&buffer->entry, &proc->buffers); //加到内存队列统一管理
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;
barrier();
proc->files = get_files_struct(current);//相当于把调用线程的一些信息存放到这个proc结构,就好像binder驱动额外有一个进程描述符
proc->vma = vma;
proc->vma_vm_mm = vma->vm_mm;
/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
err_alloc_small_buf_failed:
kfree(proc->pages);
proc->pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_mmap_lock);
vfree(proc->buffer);
proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
页指针示意 |
---|
页指针1 |
页指针2 |
页指针3 |
… |
上面的图显示了proc->pages = kzalloc()这句话的效果。
分配了物理空间用来存放这些指针,但是这些指针指向的页表还没有分配,页表指向的物理页就更加还没有分配了。
这些都在binder_update_page_range这个函数里完成。
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
struct vm_struct tmp_area;
struct page **page;
struct mm_struct *mm;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: %s pages %p-%p\n", proc->pid,
allocate ? "allocate" : "free", start, end);
if (end <= start)
return 0;
trace_binder_update_page_range(proc, allocate, start, end);
if (vma)
mm = NULL;
else
mm = get_task_mm(proc->tsk);
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
if (vma && mm != proc->vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
proc->pid);
vma = NULL;
}
}
if (allocate == 0)
goto free_range;
if (vma == NULL) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
proc->pid);
goto err_no_vma;
}
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
BUG_ON(*page);
//分配一个页表,让页指针指向这个新分配的页表空间
//在高端内存分配这个页表,返回页描述符的线性地址。参考《深入理解linux内核》 P307
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (*page == NULL) {
pr_err("%d: binder_alloc_buf failed for page at %p\n",
proc->pid, page_addr);
goto err_alloc_page_failed;
}
//内核线形区的信息填充:
tmp_area.addr = page_addr;//线性地址的起始地址
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;//大小
//在页表内建立映射,
//参考http://www.360doc.com/content/13/0110/20/7044580_259431706.shtml
ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
proc->pid, page_addr);
goto err_map_kernel_failed;
}
user_page_addr =
(uintptr_t)page_addr + proc->user_buffer_offset;
//根据用户态的虚拟地址,插入一页到用户空间的vma,
//从而用户空间访问从user_page_addr开始的一页内存时,
//从而可以访问到与page对应的物理页中对应的存储单元
ret = vm_insert_page(vma, user_page_addr, page[0]);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
proc->pid, user_page_addr);
goto err_vm_insert_page_failed;
}
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return 0;
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
if (vma)
zap_page_range(vma, (uintptr_t)page_addr +
proc->user_buffer_offset, PAGE_SIZE, NULL);
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
__free_page(*page);
*page = NULL;
err_alloc_page_failed:
;
}
err_no_vma:
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return -ENOMEM;
}
Synopsis
int vm_insert_page ( struct vm_area_struct * vma,
unsigned long addr,
struct page * page);
Arguments | 说明 |
---|---|
vma | user vma to map to |
addr | target user address of this page |
page | source kernel page |
参考文章:
http://blog.csdn.net/bullbat/article/details/7311955
http://www.kerneltravel.net/journal/v/mem.htm
http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/