这些天需要了解binder驱动,所以看了一下代码,记录一下。
初始化,open很简单,略过。我们从mmap开始
static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { ret = -EPERM; failure_string = "bad vm_flags"; goto err_bad_arg; } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } /*这儿才开始做事。首先vma已经传过来了,属于进程的一段空间,用于与内核空间映射用的。get_vm_area的目的是在内核的vmalloc区域获得一个相同大小的连续空间,表示为vm_area,同时将该结构加入到vm_list统一管理 */
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; goto err_get_vm_area_failed; } proc->buffer = area->addr;//kernel continus addr proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; VONNYFLY_printk("vma->vm_start=0x%lx,proc->buffer=0x%lx,proc->user_buffer_offset=%l",vma->vm_start,(uintptr_t)proc->buffer,proc->user_buffer_offset); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } #endif proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); if (proc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc;
/*这儿又开始做事了。此时proc->buffer指向内核的vmalloc 区域。我们有了vma(vm_area_struct) area(vm_struct),接下来很显然要1、分配物理页 2、分别对vma用户空间建立页表、对vmalloc区域建立页表映射关系。待会儿详细分析源码……*/ if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); buffer->free = 1; binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(current); proc->vma = vma; /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: err_bad_arg: printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; }
在binder_update_page_range里面
static int binder_update_page_range(struct binder_proc *proc, int allocate, void *start, void *end, struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct vm_struct tmp_area; struct page **page; struct mm_struct *mm; if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) printk(KERN_INFO "binder: %d: %s pages %p-%p\n", proc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; if (vma) mm = NULL; else mm = get_task_mm(proc->tsk); if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; } if (allocate == 0) goto free_range; if (vma == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " "map pages in userspace, no vma\n", proc->pid); goto err_no_vma; } /*分配页面了,但是为什么在GFP_KERNEL区分配呢?没高明白,既然都要更新页表,何不到highmemory分配呢。也许认为lowmem足够大了吧,毕竟896M,没有多少arm设备有这么大的物理ram。 */ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; struct page **page_array_ptr; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (*page == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "for page at %p\n", proc->pid, page_addr); goto err_alloc_page_failed; } tmp_area.addr = page_addr; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; page_array_ptr = page;
/*为vmalloc 区域的连续地址空间进行页表映射,当然需要vm_struct (提供虚拟地址)参数和 page参数(用来make pte的),这就完成了内核区的映射*/ ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); if (ret) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "to map page at %p in kernel\n", proc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = (uintptr_t)page_addr + proc->user_buffer_offset;
/*更新vma对应的页表,这样就是实现了mmap功能*/ ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "to map page at %lx in userspace\n", proc->pid, user_page_addr); goto err_vm_insert_page_failed; } /* vm_insert_page does not seem to increment the refcount */ } if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; if (vma) zap_page_range(vma, (uintptr_t)page_addr + proc->user_buffer_offset, PAGE_SIZE, NULL); err_vm_insert_page_failed: unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: __free_page(*page); *page = NULL; err_alloc_page_failed: ; } err_no_vma: if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return -ENOMEM; }
对binder驱动来说,我们需要了解数据的传输。
从buffer开始,打出log,看到 binder_buffer size结构体大小为40,
[ binder_mmap ] vonnyfly(lfeng^-^)~~~~~~~~~~~binder_buffer size=40 binder_mmap: 31 40009000-40029000 (128 K) vma 75 pagep 30f [ binder_mmap ] vonnyfly(lfeng^-^)~~~~~~~~~~~vma->vm_start=0x40009000,proc->buffer=0xc6840000 binder: 31: allocate pages c6840000-c6841000 binder: 31: add free buffer, size 131032, at c6840000
这样就合理了,加入新的buffer大小 131032 即为117k984B 开头的40B 是用来管理的。
但是仔细看程序,我们会发现
binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)调用的时候只分配了1页,这个是为了节约空间,按需分配。而进程虚拟空间和vmalloc内核空间随便,反正有不占用实际内存,所以开始就占用了所需的全部空间,而实际的物理页按需获取。最后,buffer被挂到proc->buffers上面的开头,此时可用的空间很大,实际能用的只有一页,并且放到了free_buffers上通过rb树来管理。此时proc的各个成员的值如下:proc->vma=调用进程的一段用户空间proc->files=调用进程的files_struct结构。proc->buffer_size=需要映射的长度(小于4m)-sizeof(struct binder_buffer)通过这个函数返回。如果是最后一个buffer,则直接用buffer末地址 - buffer->data首地址,要不就用下一个buffer的首地址 - buffer->data的首地址,细心的话会注意到,这意味着buffer是连续排列的。实际上就是这样的,在binder_alloc_buf这个函数里面我们会看到实现。static size_t binder_buffer_size( struct binder_proc *proc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &proc->buffers)) return proc->buffer + proc->buffer_size - (void *)buffer->data; else return (size_t)list_entry(buffer->entry.next, struct binder_buffer, entry) - (size_t)buffer->data; }
proc->free_async_space = proc->buffer_size / 2;(不知道干嘛)proc->pages=分配的物理页page的指针数组,开始只有一项,即1页,但是长度还是预留好了。proc->buffer=内核连续映射区首地址 proc->user_buffer_offset =用户空间映射区首地址-内核空间连续映射的首地址。在open的时候proc->tsk = current;通过log可以看到,调用open和mmap的进程有servicemanager、mediaserver等,这些都是server端,进行实际通信时,客户端将数据拷贝到proc->buffer区就ok了,server就可以从用户空间获取,少了一次复制的操作。