Transaction in Binder Kernel Driver
当任何进程打开/dev/binder驱动时,会分配相应的binder_proc结构(给进程)。
-
- static int binder_open(struct inode *nodp, struct file *filp)
- {
- struct binder_proc *proc;
- if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
- printk(KERN_INFO "binder_open: %d:%d/n", current->group_leader->pid, current->pid);
- proc = kzalloc(sizeof(*proc), GFP_KERNEL);
- if (proc == NULL)
- return -ENOMEM;
- get_task_struct(current);
- proc->tsk = current;
- INIT_LIST_HEAD(&proc->todo);
- init_waitqueue_head(&proc->wait);
- proc->default_priority = task_nice(current);
- mutex_lock(&binder_lock);
- binder_stats.obj_created[BINDER_STAT_PROC]++;
- hlist_add_head(&proc->proc_node, &binder_procs);
- proc->pid = current->group_leader->pid;
- INIT_LIST_HEAD(&proc->delivered_death);
- filp->private_data = proc;
- mutex_unlock(&binder_lock);
- if (binder_proc_dir_entry_proc) {
- char strbuf[11];
- snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
- create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc);
- }
- return 0;
- }
当任何ioctl调用时,驱动能够知道进程的信息。事务数据通过BINDER_WRITE_READ ioctl传递。
-
- static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- ......
- switch (cmd) {
- case BINDER_WRITE_READ: {
- struct binder_write_read bwr;
- if (size != sizeof(struct binder_write_read)) {
- ret = -EINVAL;
- goto err;
- }
- if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
- ret = -EFAULT;
- goto err;
- }
- if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
- printk(KERN_INFO "binder: %d:%d write %ld at %08lx, read %ld at %08lx/n",
- proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer);
- if (bwr.write_size > 0) {
- ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
- if (ret < 0) {
- bwr.read_consumed = 0;
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
- goto err;
- }
- }
- if (bwr.read_size > 0) {
- ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
- if (!list_empty(&proc->todo))
- wake_up_interruptible(&proc->wait);
- if (ret < 0) {
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
- goto err;
- }
- }
- if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
- printk(KERN_INFO "binder: %d:%d wrote %ld of %ld, read return %ld of %ld/n",
- proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size);
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
- ret = -EFAULT;
- goto err;
- }
- break;
- }
- ......
- }
驱动首先处理写,然后读。让我们先看看binder_thread_write()函数。该函数的核心是一个从写缓冲中解析命令并执行该命令的循环。
-
- int
- binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
- void __user *buffer, int size, signed long *consumed)
- {
- uint32_t cmd;
- void __user *ptr = buffer + *consumed;
- void __user *end = buffer + size;
- while (ptr < end && thread->return_error == BR_OK) {
- if (get_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
- binder_stats.bc[_IOC_NR(cmd)]++;
- proc->stats.bc[_IOC_NR(cmd)]++;
- thread->stats.bc[_IOC_NR(cmd)]++;
- }
- switch (cmd) {
- case ***:
- ......
- default:
- printk(KERN_ERR "binder: %d:%d unknown command %d/n", proc->pid, thread->pid, cmd);
- return -EINVAL;
- }
- *consumed = ptr - buffer;
- }
- return 0;
- }
让我们看看与本文例子相关的两个命令。一个是C_INCREFS。
-
- case BC_INCREFS:
- case BC_ACQUIRE:
- case BC_RELEASE:
- case BC_DECREFS: {
- uint32_t target;
- struct binder_ref *ref;
- const char *debug_string;
- if (get_user(target, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (target == 0 && binder_context_mgr_node &&
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
- ref = binder_get_ref_for_node(proc,
- binder_context_mgr_node);
- if (ref->desc != target) {
- binder_user_error("binder: %d:"
- "%d tried to acquire "
- "reference to desc 0, "
- "got %d instead/n",
- proc->pid, thread->pid,
- ref->desc);
- }
- } else
- ref = binder_get_ref(proc, target);
- if (ref == NULL) {
- binder_user_error("binder: %d:%d refcou"
- "nt change on invalid ref %d/n",
- proc->pid, thread->pid, target);
- break;
- }
- switch (cmd) {
- case BC_INCREFS:
- debug_string = "IncRefs";
- binder_inc_ref(ref, 0, NULL);
- break;
- case ***:
- ......
- }
- }
我们在前面说过,在我们这个例子中,target为0。当system_manager调用BINDER_SET_CONTEXT_MGR ioctl时会自创建binder_context_mgr_node代表handle 0。所以这里仅仅是增加了一个指向binder_context_mgr_node的弱引用(weak reference)。
- binder_context_mgr_node = binder_new_node(proc, NULL);
另外是一个是BC_TRANSACTION命令。
- case BC_TRANSACTION:
- case BC_REPLY: {
- struct binder_transaction_data tr;
- if (copy_from_user(&tr, ptr, sizeof(tr)))
- return -EFAULT;
- ptr += sizeof(tr);
- binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
- break;
- }
当包包含一个BINDER_TYPE_BINDER flattened对象时,binder_transaction()函数会创建一个新的binder节点。
-
- static void
- binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
- struct binder_transaction_data *tr, int reply)
- {
- ......
- off_end = (void *)offp + tr->offsets_size;
- for (; offp < off_end; offp++) {
- struct flat_binder_object *fp;
- if (*offp > t->buffer->data_size - sizeof(*fp)) {
- binder_user_error("binder: %d:%d got transaction with "
- "invalid offset, %d/n",
- proc->pid, thread->pid, *offp);
- return_error = BR_FAILED_REPLY;
- goto err_bad_offset;
- }
- fp = (struct flat_binder_object *)(t->buffer->data + *offp);
- switch (fp->type) {
- case BINDER_TYPE_BINDER:
- case BINDER_TYPE_WEAK_BINDER: {
- struct binder_ref *ref;
- struct binder_node *node = binder_get_node(proc, fp->binder);
- if (node == NULL) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
- if (node == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_new_node_failed;
- }
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
- }
- if (fp->cookie != node->cookie) {
- binder_user_error("binder: %d:%d sending u%p "
- "node %d, cookie mismatch %p != %p/n",
- proc->pid, thread->pid,
- fp->binder, node->debug_id,
- fp->cookie, node->cookie);
- goto err_binder_get_ref_for_node_failed;
- }
- ref = binder_get_ref_for_node(target_proc, node);
- if (ref == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- if (fp->type == BINDER_TYPE_BINDER)
- fp->type = BINDER_TYPE_HANDLE;
- else
- fp->type = BINDER_TYPE_WEAK_HANDLE;
- fp->handle = ref->desc;
- binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
- if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
- printk(KERN_INFO " node %d u%p -> ref %d desc %d/n",
- node->debug_id, node->ptr, ref->debug_id, ref->desc);
- } break;
- ......
- } end of switch
- }
- ......
- }
binder_transaction()会知道target为handle 0,因此它会执行以下代码分支去获取target_node,target_proc和target_thread。
- } else {
- target_node = binder_context_mgr_node;
- if (target_node == NULL) {
- return_error = BR_DEAD_REPLY;
- goto err_no_context_mgr_node;
- }
- }
- e->to_node = target_node->debug_id;
- target_proc = target_node->proc;
- if (target_proc == NULL) {
- return_error = BR_DEAD_REPLY;
- goto err_dead_binder;
- }
- if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
- struct binder_transaction *tmp;
- tmp = thread->transaction_stack;
- while (tmp) {
- if (tmp->from && tmp->from->proc == target_proc)
- target_thread = tmp->from;
- tmp = tmp->from_parent;
- }
- }
最终,binder_transaction()会将请求放入链表,并唤醒binder_thread_read()中的等待线程。
-
- static void
- binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
- struct binder_transaction_data *tr, int reply)
- {
- ......
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait)
- wake_up_interruptible(target_wait);
- return;
- ......
- }
现在,让我们来看看binder_thread_read()函数。当service_manager运行时,它会在这里等待,直到有请求到达。
-
- static int
- binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
- void __user *buffer, int size, signed long *consumed, int non_block)
- {
- ......
- ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
- ......
- }
因为media_server之前的写操作唤醒了它,于是binder_thread_read()函数开始执行。下面的命令将media_server的写缓冲区的数据拷贝到system_manager的读缓冲区。
- tr.data_size = t->buffer->data_size;
- tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset);
- tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
- return -EFAULT;
- ptr += sizeof(tr);
小结:
本节展示了数据如何从RPC调用的client端流到RPC的服务端。
|