现在的位置: 首页 > 综合 > 正文

android binder驱动源码分析(二)

2017年11月27日 ⁄ 综合 ⁄ 共 4842字 ⁄ 字号 评论关闭

接着上篇的讲。

我们注意到binder在使用buffer的时候一次声明一个proc(对应一个进程)的buffer总大小,然后分配一页并做好映射。当使用时,发现空间不足,我们接着映射,同时把这个buffer拆成两个,并把剩余的继续放到free_buffers里面。下面从binder_alloc_buf这个函数讲起:


static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
	size_t data_size, size_t offsets_size, int is_async)
{
	struct rb_node *n = proc->free_buffers.rb_node;
	struct binder_buffer *buffer;
	size_t buffer_size;
	struct rb_node *best_fit = NULL;
	void *has_page_addr;
	void *end_page_addr;
	size_t size;

	if (proc->vma == NULL) {
		printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
		       proc->pid);
		return NULL;
	}

	size = ALIGN(data_size, sizeof(void *)) +
		ALIGN(offsets_size, sizeof(void *));

	if (size < data_size || size < offsets_size) {
		binder_user_error("binder: %d: got transaction with invalid "
			"size %zd-%zd\n", proc->pid, data_size, offsets_size);
		return NULL;
	}

	if (is_async &&
	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
			printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd f"
			       "ailed, no async space left\n", proc->pid, size);
		return NULL;
	}
/*这个地方开始行动,在rb树上查找和请求的size一样大的buffer,size=data_size+offsets_size按照指针长度向上补齐*/
	while (n) {
		buffer = rb_entry(n, struct binder_buffer, rb_node);
		BUG_ON(!buffer->free);
		buffer_size = binder_buffer_size(proc, buffer);

		if (size < buffer_size) {
			best_fit = n;
			n = n->rb_left;
		} else if (size > buffer_size)
			n = n->rb_right;
		else {
			best_fit = n;
			break;
		}
	}
	if (best_fit == NULL) {
		printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
		       "no address space\n", proc->pid, size);
		return NULL;
	}
/*刚好匹配的概率是很低的哦,所以一般n=NULL,这个地方,buffer此时要不是最小的一个,就是最大的一个*/

if (n == NULL) {buffer = rb_entry(best_fit, struct binder_buffer, rb_node);buffer_size = binder_buffer_size(proc, buffer);}/*buffer的终点,按照page向下对其,因为buffer的总大小只有data+buffer_size这么大,否则就超了*/has_page_addr
=(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);



/*发现如果size比较大,那么就不分割了,具体的分割操作在后边进行。size + sizeof(struct binder_buffer)+4怎么理解呢,如果这个buffer空间能容下size并且还有一个
binder_buffer剩下还多一点,那么就分割了,否则,少于一个binder_buffer结构体的空间也没啥用啊,因为放不了数据,在上一篇我们说到过,buffer控制部分和数据是放在一起的,只有头放不下数据的话也就没有意义。明白了吧。
*/
	if (n == NULL) {
		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
			buffer_size = size; /* no room for other buffers */
		else
			buffer_size = size + sizeof(struct binder_buffer);
	}
/*计算实际的映射终点,向上对齐*/
	end_page_addr =
		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);

	


	if (end_page_addr > has_page_addr)
		end_page_addr = has_page_addr;//上面计算end的时候可能过大,恢复到合适值

/*从这个地方开始,就进入到上篇文章一样的操作呢,分配物理页,分别对vma(用户空间)和vm(内核空间)建立页表关联*/
	if (binder_update_page_range(proc, 1,
	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)
	    )
		return NULL;
	

	rb_erase(best_fit, &proc->free_buffers);//从free上面删除掉并插入到已分配里面进行管理
	buffer->free = 0;
	binder_insert_allocated_buffer(proc, buffer);
	if (buffer_size != size) {/*这个就是该不该拆分buffer的标志。相等的话说明已经容不下第二个buffer,虽然可以有小于44B的浪费,因为44B留着也没啥用的。把大buffer拆成小的buffer 接着放入free队列*/
		struct binder_buffer *new_buffer = (void *)buffer->data + size;
		list_add(&new_buffer->entry, &buffer->entry);
		new_buffer->free = 1;
		binder_insert_free_buffer(proc, new_buffer);
	}




	buffer->data_size = data_size;
	buffer->offsets_size = offsets_size;
	buffer->async_transaction = is_async;
	if (is_async) {
		proc->free_async_space -= size + sizeof(struct binder_buffer);
		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
			printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd "
			       "async free %zd\n", proc->pid, size,
			       proc->free_async_space);
	}

	return buffer;
}

log如下:

binder_open: 31:31,name=servicemanager
[ binder_mmap ] vonnyfly(lfeng^-^)~~~~~~~~~~~binder_buffer size=40
binder_mmap: 31 40009000-40029000 (128 K) vma 75 pagep 30f   获取vma区域
[ binder_mmap ] vonnyfly(lfeng^-^)~~~~~~~~~~~vma->vm_start=0x40009000,proc->buffer=0xc6840000  
binder: 31: allocate pages c6840000-c6841000  这个地方,分配一个物理页
binder: 31: add free buffer, size 131032, at c6840000   添加到free链表里


binder: 31: buffer_size 1ffd8---buffer->data=c6840028,(uintptr_t)buffer->data + buffer_size=c6860000,has_page_addr=c6860000
binder: 31: binder_alloc_buf size 128 got buffer c6840000 buffer_size 168--data_size=124,offsets_size=4---buffer->data=c6840028,has_page_addr=c6860000,end_page_addr=c6841000
binder: 31: allocate pages c6841000-c6841000  已经在binder_free_buf里面了,从上面log看出,请求的size=128B,所以无需重新映射,之前mmap里面的一页够用了
binder: 31: add free buffer, size 130864, at c68400a8  把剩余的  130864B=131032 - 128 -40(头部)。首地址现在成了0xc6840000+128=0xc68400a8放入到free链表里。可以算一下:
binder: 31: binder_free_buf c6840000 size 128 buffer_size 128


binder: 31: free pages c6841000-c6840000
binder: 31: merge free, buffer c68400a8 share page with c6840000
binder: 31: add free buffer, size 131032, at c6840000



binder: 31: buffer_size 1ffd8---buffer->data=c6840028,(uintptr_t)buffer->data + buffer_size=c6860000,has_page_addr=c6860000
binder: 31: binder_alloc_buf size 116 got buffer c6840000 buffer_size 156--data_size=112,offsets_size=4---buffer->data=c6840028,has_page_addr=c6860000,end_page_addr=c6841000
binder: 31: allocate pages c6841000-c6841000
binder: 31: add free buffer, size 130876, at c684009c
binder: 31: binder_free_buf c6840000 size 116 buffer_size 116
binder: 31: free pages c6841000-c6840000
binder: 31: merge free, buffer c684009c share page with c6840000
binder: 31: add free buffer, size 131032, at c6840000

接下来是重头戏,看binder_ioctl怎么实现的,我们上面的函数又是怎么调用到的。(未完,待续)

抱歉!评论已关闭.