共计 18430 个字符,预计需要花费 47 分钟才能阅读完成。
在 Android 系统中可以所是无处不 Binder,Binder 传输在每时每刻都发生着。很多情况下,一个进程中都不会只存在一个独立的 Binder 传输,经常是并发多个 Binder 传输,而且会存在 Binder 嵌套。尤其像 system_server 这种重要的进程 Binder 传输会更多。在系统发生问题时,如果追踪到 system_server,会发现大部分情况都是在 Binder 传输中。但无论有多少 Binder 传输或多复杂的 Binder 嵌套,最终都是通过两种 Binder 传输实现的:同步传输和异步传输。这里试图通过最简单的传输来解释 Binder 通信流程。
Binder 同步传输
Binder 传输中最常见的就是同步传输。同步传输中,IPC 通信的发起端需要等到对端处理完消息才能继续。一个完整的同步传输如下图所示。
跳过 Binder 设备初始化的过程,直接看传输过程。客户端通过 ioctl 的 BINDER_WRITE_READ 发送 BC_TRANSACTION 命令到 Binder 驱动。
drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
......
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
......
// 需要写数据
if (bwr.write_size > 0) {ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
// 需要读数据
if (bwr.read_size > 0) {ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
......
break;
}
......
1.BC_TRANSACTION
发起 Binder 传输时,需要写入 BC_TRANSACTION 命令,然后等待命令返回。
drivers/staging/android/binder.c
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
......
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
......
}
BC_TRANSACTION 和 BC_REPLY 都会调用 binder_transaction(),区别在于是否设置 reply。binder_transaction() 也是写数据的核心函数。函数很长,逻辑很多,尽量分析一下。
drivers/staging/android/binder.c
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
......
if (reply) {......} else {if (tr->target.handle) {
// 根据 handle 找到相应的 binder 实体
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle, true);
......
target_node = ref->node;
} else {
// handle 为 0 时为 service manager 的 binder 实体
target_node = binder_context_mgr_node;
......
}
e->to_node = target_node->debug_id;
// binder 实体的 binder_proc
target_proc = target_node->proc;
......
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
......
// 如果是同步传输,寻找是否传输栈中是否有来自对端的传输,如果有就使用对端线程处理传输
while (tmp) {if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}
// 找到对端线程这使用线程 todo list,否则使用进程 todo list
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
e->to_proc = target_proc->pid;
// 分配 binder transaction
t = kzalloc(sizeof(*t), GFP_KERNEL);
......
// 分配 binder_work 用于处理传输完成
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
......
// 同步的非 reply 传输,设置当前线程为 from
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
// 设置传输的目标进程和线程
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
// 从目标进程中分配传输空间
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
......
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
// 增加 binder 实体的引用计数
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
// 拷贝用户数据到 binder 实体的传输空间中
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {......}
// 拷贝用户数据的 flat_binder_object 对象信息
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {......}
......
off_end = (void *)offp + tr->offsets_size;
off_min = 0;
// 处理 flat_binder_object 对象信息
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
......
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
// 类型为 binder 实体,用于 server 注册
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
// 如果找不到 binder 实体就创建一个
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {node = binder_new_node(proc, fp->binder, fp->cookie);
......
}
......
// 在目标进程中创建引用
ref = binder_get_ref_for_node(target_proc, node);
......
// 修改 binder 对象的类型为 handle
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
// 将引用的 handle 赋值给对象
fp->handle = ref->desc;
fp->cookie = 0;
// 增加引用计数
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
......
} break;
// 类型为 binder 引用,client 向 server 传输
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
// 获取当前进程中的 binder 引用
struct binder_ref *ref = binder_get_ref(
proc, fp->handle,
fp->type == BINDER_TYPE_HANDLE);
......
if (ref->node->proc == target_proc) {
// 如果 binder 传输发生在同一进程中则直接使用 binder 实体
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
......
} else {
struct binder_ref *new_ref;
// 在目标进程中创建 binder 引用
new_ref = binder_get_ref_for_node(target_proc, ref->node);
......
fp->binder = 0;
fp->handle = new_ref->desc;
fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
......
}
} break;
// 类型为文件描述符,用于共享文件或内存
case BINDER_TYPE_FD: {......} break;
......
}
}
if (reply) {......} else if (!(t->flags & TF_ONE_WAY)) {
// 当前线程的传输入栈
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
// 异步传输使用 aync todo list
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
// 将传输添加到目标队列中
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
// 将传输完成添加到当前线程 todo 队列中
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
// 唤醒目标线程或进程
if (target_wait)
wake_up_interruptible(target_wait);
return;
......
}
BC_TRANSACTION 简单来说流程如下,
- 找到目标进程或线程。
- 将用户空间的数据拷贝到目前进程空间,并解析 flat_binder_object。
- 将传输入栈到当前线程中。
- 将 BINDER_WORK_TRANSACTION 加入到目标队列,将 BINDER_WORK_TRANSACTION_COMPLETE 加入到当前线程队列。
- 唤醒目标进程或线程进行处理。
2.BR_TRANSACTION_COMPLETE
Client 在执行 BINDER_WRITE_READ 时,先通过 binder_thread_write() 写数据,将 BINDER_WORK_TRANSACTION_COMPLETE 放入工作队列。紧接着就执行 binder_thread_read() 读取返回数据。这里会将命令 BR_TRANSACTION_COMPLETE 返回给 Client 线程。
drivers/staging/android/binder.c
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
......
// 第一次读时,插入命令 BR_NOOP 返回给用户
if (*consumed == 0) {if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
// 当前线程没有传输并且 todo 队列为空时,处理进程的工作队列
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
......
thread->looper |= BINDER_LOOPER_STATE_WAITING;
// 如果处理进程工作队列,则当前线程为空闲线程
if (wait_for_proc_work)
proc->ready_threads++;
......
// 等待进程或线程工作队列被唤醒
if (wait_for_proc_work) {
......
ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
......
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
......
// 唤醒后,开始处理传输,空闲线程减 1
if (wait_for_proc_work)
proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
......
while (1) {
......
// 优先处理线程工作队列,再处理进程工作队列
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
goto retry;
break;
}
......
switch (w->type) {
......
case BINDER_WORK_TRANSACTION_COMPLETE: {
// 发送命令 BR_TRANSACTION_COMPLETE 给用户
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
......
list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
......
if (!t)
continue;
......
}
3.BR_TRANSACTION
Server 端线程启动后就调用 talkWithDriver() 等待读取数据。Binder 驱动处理 Client 发送的 BC_TRANSACTION 命令后,会唤醒 Server 线程。Server 线程读取数据进行处理同样是在 binder_thread_read() 中完成的。
drivers/staging/android/binder.c
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
......
while (1) {switch (w->type) {// binder_transaction() 将工作 BINDER_WORK_TRANSACTION 加入队列后唤醒目标进程
case BINDER_WORK_TRANSACTION: {t = container_of(w, struct binder_transaction, work);
} break;
......
// 只有 BINDER_WORK_TRANSACTION 取出传输事件,所以可以继续执行
if (!t)
continue;
BUG_ON(t->buffer == NUL);
// target_node 存在时表明是 BC_TRANSACTION 产生的工作事件,需要回复 BR_TRANSACTION。// 否则是 BC_REPLY 产生的工作事件,回复 BR_REPLY
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
// 同步传输时,sender_pid 为调用进程的 pid。异步传输时为 0。if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {tr.sender_pid = 0;}
......
// 将数据拷贝到用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
......
// 从队列中移除当前工作事件
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
// 同步传输时,命令为 BR_TRANSACTION 的情况下,将工作事件入栈
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
// 其他情况下,表明传输已经完成,释放工作事件
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
......
}
BR_REPLY 也是同样的流程,区别在于发送 BR_REPLY 意味着传输已经完成,可以释放工作事件。
4.BC_REPLY
Server 端接收到 BR_TRANSACTION 命令后,取出 buffer 进行处理,完成后会发送 BC_REPLY 给 Binder 驱动。
frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::executeCommand(int32_t cmd)
{
......
case BR_TRANSACTION:
{
// 取出传输数据
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
......
Parcel reply;
......
// BBinder 对数据进行解析
if (tr.target.ptr) {sp<BBinder> b((BBinder*)tr.cookie);
error = b->transact(tr.code, buffer, &reply, tr.flags);
} else {error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
if ((tr.flags & TF_ONE_WAY) == 0) {LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
// 同步传输需要发送 BC_REPLY
sendReply(reply, 0);
} else {LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
......
}
break;
......
}
BC_REPLY 也是通过 binder_transaction() 处理,只是需要设置参数 reply。下面只分析与之前不同的地方。
drivers/staging/android/binder.c
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
......
if (reply) {
// 从当前线程中出栈
in_reply_to = thread->transaction_stack;
......
thread->transaction_stack = in_reply_to->to_parent;
// 目标线程为发起端线程
target_thread = in_reply_to->from;
......
target_proc = target_thread->proc;
} else {......}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {......}
......
// reply 传输的 from 为空
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
......
if (reply) {
// 从目标线程中出栈
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {......} else {......}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
......
}
binder_transaction() 执行完 BC_REPLY 后同样是加入工作队列,唤醒 target。BINDER_WORK_TRANSACTION_COMPLETE 会将 BR_TRANSACTION_COMPLETE 返回给当前线程,也就是 Server 端。BINDER_WORK_TRANSACTION 由 target 处理,这时的 target 为 Client 端。根据上面分析,驱动将返回 BR_REPLY 给 Client 端。
5.BC_FREE_BUFFER
Binder 的每一次传输,无论是从 Client 到 Sever 还是 Server 到 Client,在对端接收到数据并处理完成后,都会通过 BC_FREE_BUFFER 来释放传输空间。在同步传输中会包含两次传输,由 Client 发出的 BC_TRANSACTION 和由 Server 发出的 BC_REPLY。
在 BC_TRANSACTION 中,Server 端接收到 BR_TRANSACTION 命令开始处理 Binder 数据,处理完成后就会发出 BC_FREE_BUFFER 来释放 buffer。这个释放命令不是直接发出的,是通过 Parcel 的释放函数完成的。将 freeBuffer 设置为 Parcel 实例 buffer 的释放函数,在 buffer 析构时会调用释放函数 freeBuffer。
frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::executeCommand(int32_t cmd)
{
......
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
......
Parcel buffer;
// 设置 buffer 的释放函数为 freeBuffer
buffer.ipcSetDataReference(reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
......
sp<BBinder> b((BBinder*)tr.cookie);
error = b->transact(tr.code, buffer, &reply, tr.flags);
......
}
break;
......
}
......
void IPCThreadState::freeBuffer(Parcel* parcel, const uint8_t* data,
size_t /*dataSize*/,
const binder_size_t* /*objects*/,
size_t /*objectsSize*/, void* /*cookie*/)
{
......
if (parcel != NULL) parcel->closeFileDescriptors();
// 发送 BC_FREE_BUFFER 命令
IPCThreadState* state = self();
state->mOut.writeInt32(BC_FREE_BUFFER);
state->mOut.writePointer((uintptr_t)data);
}
在 BC_REPLY 中,Client 端接收到 BR_REPLY 会将 freeBuffer 设置为释放函数或直接调用 freeBuffer。
frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
......
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {if ((tr.flags & TF_STATUS_CODE) == 0) {
// 设置 freeBuffer 为释放函数
reply->ipcSetDataReference(reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
// 发生错误时直接调用 freeBuffer
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
......
}
......
FreeBuffer() 发送 BC_FREE_BUFFER 命令给 Binder 驱动。
drivers/staging/android/binder.c
static void binder_free_buf(struct binder_proc *proc,
struct binder_buffer *buffer)
{
size_t size, buffer_size;
// 获取 buffer 的大小
buffer_size = binder_buffer_size(proc, buffer);
size = ALIGN(buffer->data_size, sizeof(void *)) +
ALIGN(buffer->offsets_size, sizeof(void *));
......
// 更新异步传输的 free_async_space
if (buffer->async_transaction) {proc->free_async_space += size + sizeof(struct binder_buffer);
......
}
// 释放物理内存
binder_update_page_range(proc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL);
// 将 buffer 从 allocated_buffers 树上擦除
rb_erase(&buffer->rb_node, &proc->allocated_buffers);
buffer->free = 1;
// 向后合并空闲 buffer
if (!list_is_last(&buffer->entry, &proc->buffers)) {
struct binder_buffer *next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (next->free) {rb_erase(&next->rb_node, &proc->free_buffers);
binder_delete_free_buffer(proc, next);
}
}
// 向前合并空闲 buffer
if (proc->buffers.next != &buffer->entry) {
struct binder_buffer *prev = list_entry(buffer->entry.prev,
struct binder_buffer, entry);
if (prev->free) {binder_delete_free_buffer(proc, buffer);
rb_erase(&prev->rb_node, &proc->free_buffers);
buffer = prev;
}
}
// 将合并后的 buffer 插入到 free_buffers 上
binder_insert_free_buffer(proc, buffer);
}
......
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
......
case BC_FREE_BUFFER: {
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
// 获取用户空间数据
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
// 从 buffer 树中找到相应的 binder_buffer
buffer = binder_buffer_lookup(proc, data_ptr);
......
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
buffer->transaction = NULL;
}
// 异步传输在释放 buffer 时将未完成的 async_todo 工作移动到线程的 todo 队列上
if (buffer->async_transaction && buffer->target_node) {BUG_ON(!buffer->target_node->has_async_transaction);
if (list_empty(&buffer->target_node->async_todo))
buffer->target_node->has_async_transaction = 0;
else
list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
}
trace_binder_transaction_buffer_release(buffer);
// 减少 binder 引用计数
binder_transaction_buffer_release(proc, buffer, NULL);
// 释放 buffer 内存空间
binder_free_buf(proc, buffer);
break;
}
......
}
Binder 异步传输
Binder 通信中,如果 Client 端只希望发送数据而不管 Server 端的执行结果,可以使用异步传输。异步传输需要在传输数据的 flag 中设置 TF_ONE_WAY 位,简单的传输流程如下图。
异步传输在 Binder 驱动中的处理流程与同步传输一样,我们重点看一下对 TF_ONE_WAY 标志的处理流程。
drivers/staging/android/binder.c
static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t data_size,
size_t offsets_size, int is_async)
{
......
// 异步传输需要考虑 free_async_space
if (is_async &&
proc->free_async_space < size + sizeof(struct binder_buffer)) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
proc->pid, size);
return NULL;
}
......
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
// buffer 中设置 is_async 标志
buffer->async_transaction = is_async;
if (is_async) {
// 更新 free_async_space
proc->free_async_space -= size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
proc->pid, size, proc->free_async_space);
}
return buffer;
}
......
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
......
// 异步传输时需要将传输事件的 from 设置为空
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
......
// 分配 buffer 时带有异步标志位
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
......
if (reply) {......} else if (!(t->flags & TF_ONE_WAY)) {......} else {
// 异步传输使用 async_todo 队列
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
......
}
......
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
......
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {......} else {
// 异步传输是单向的,不需要回复。t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
......
}