Binder—获取服务
Binder—获取服务
获取服务
(1)Client 进程将进程间通信数据封装在 binder_write_read
中,用来传递给 Binder 驱动程序
Client 进程发送的进程间通信数据:目的进程 ServerManager + 服务名称(要访问的服务) + 要调用的函数:该函数返回服务句柄
(2)Client 进程向 Binder 驱动程序发送命令协议 BC_TRANSACTION
,Binder 驱动程序根据协议内容(binder_transaction_data 封装的进程间通信数据)找到 ServiceManager 进程
- Binder 驱动程序先立马向 Client 进程发送返回协议
BINDER_WORK_TRANSACTION_COMPLETE
,表示 Client 进程的进程间通信请求已被接受 Binder 驱动程序发送的BINDER_WORK_TRANSACTION_COMPLETE
,然后 Client 进程继续进入到 Binder 驱动程序中等待服务句柄返回 - 唤醒 ServiceManager 进程,处理工作项(任务类型为 BINDER_WORK_TRANSACTION),从 Binder 驱动程序返回到用户空间 ServiceManager 进程,向 ServiceManager 进程发送返回协议
BR_TRANSACTION
,执行对应函数
(3)ServiceManager 进程处理返回协议 BR_TRANSACTION
,执行对应函数获取服务句柄
(4)ServiceManager 进程向 Binder 驱动程序发送命令协议 BC_REPLY
,Binder 驱动程序根据在 ServiceManager 中所获取服务的句柄,然后在进程 Client 进程(获取服务进程)中创建 Binder 引用对象
- Binder 驱动程序先立马向 ServiceManager 进程发送返回协议
BINDER_WORK_TRANSACTION_COMPLETE
,表示返回的信息回复成功,处理后 ServiceManager 进程等待下一次 Binder 进程间通信请求 - 向 Client 进程发送返回协议
BR_REPLY
,从 Binder 驱动程序返回到用户空间 Client 进程,返回的数据:数据缓冲区中有一个 type 为 BINDER_TYPE_HANDLE 的 Binder 引用对象(其中句柄值 handle 由 Binder 驱动程序生成)
(5)Client 进程处理返回协议 BR_REPLY
,获取返回的数据(包含了 Binder 引用对象的句柄值 handle)
至此,Client 进程便获取到了 Binder 引用对象的句柄值 handle
// ioctl -> binder_ioctl:Client 进程获取 Server 进程服务,该请求会经由 Binder 驱动程序转发给 ServerManager 进程处理
一、BC_TRANSACTION
Client 进程发送命令协议 BC_TRANSACTION
binder_ioctl
[-> kernel/drivers/android/binder.c, binder_ioctl()]
// cmd = BINDER_WRITE_READ
// arg = &binder_write_read
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
// 取出 binder_open 中给 ServiceManager 创建的 binder_proc
struct binder_roc *proc = filp->private_ata;
struct binder_thread *thread;
void __user *ubuf = (void __user *)arg;
// 获取当前线程对应的 binder_thread,在 binder_become_context_manager 中已创建,这里直接返回
thread = binder_get_thread(proc);
// cmd = BINDER_WRITE_READ
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
// 拷贝从用户空间传来的 binder_write_read,保存到 bwr 变量
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
// 拷贝的 bwr 中,只有 write_size 大于 0,write_buffer 用于向内核驱动发送数据
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
default:
ret = -EINVAL;
goto err;
}
// 没有出错,则最终返回 0
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
return ret;
}
[–>kernel/drivers/android/binder.c, binder_thread_write()]
static int binder_thread_write(struct binder_proc *proc, // 获取服务的进程
struct binder_thread *thread, // 获取服务的线程
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
// ptr 实际为 write_buffer,因此 cmd = BC_TRANSACTION
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
// 收到的通信数据:[BC_TRANSACTION + 内核缓冲区用户空间地址]
switch (cmd) {
case BC_TRANSACTION: // BC_TRANSACTION + 内核缓冲区用户空间地址
case BC_REPLY: {
struct binder_transaction_data tr;
// 从用户空间拷贝 ptr(包含 BC_TRANSACTION + binder_transaction_data)到内核空间,转为 binder_transaction_data
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY); // 最后一个参数为 false
break;
}
*consumed = ptr - buffer;
}
return 0;
}
[–>kernel/drivers/android/binder.c, binder_transaction()]
static void binder_transaction(struct binder_proc *proc, // 获取服务的进程
struct binder_thread *thread, // 获取服务的线程
struct binder_transaction_data *tr, int reply) // reply 为 false
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
uint32_t return_error = BR_OK;
if (reply) {
...
} else { // reply 为 false,走这个
// tr->target.handle = 目的进程 ServerManager,值为 0
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else { // 走这个
// 给目标 target_node 赋值为 binder_context_mgr_node
target_node = binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
e->to_node = target_node->debug_id;
target_proc = target_node->proc;
if (target_proc == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
// 首次请求,获取服务的线程 thread 事务堆栈 transaction_stack 为空
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}
if (target_thread) {
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else { // 目标线程为空,走这个
// 获取目标进程 todo 队列
target_list = &target_proc->todo;
// 获取目标进程 wait 等待队列
target_wait = &target_proc->wait;
}
// 创建 binder_transaction 结构体
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
// 创建 binder_work 结构体
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node; // target_node 此时为空(请求阶段有值,回复阶段没有值)
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
// offp:数据缓冲区起始位置
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
// 下面两个 if 从用户空间拷贝数据到内核空间
// tr->data.ptr.buffer:binder_transaction_data 数据缓冲区中只包含要访问的服务名称
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
// tr->data.ptr.offsets:binder_transaction_data 偏移数组记录了 Binder 对象在数据缓冲区中的位置(当前数据缓冲区中不含 Binder 对象)
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
// offsets_size:偏移数组大小
off_end = (void *)offp + tr->offsets_size;
// 当前数据缓冲区中不含 Binder 对象,跳过 for 循环(若有 Binder 对象时,则遍历数据缓冲区中的 Binder 对象,遍历范围 [offp, off_end])
for (; offp < off_end; offp++) {
...
}
if (reply) {
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) { // 走这个
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
// 此时才对获取服务的线程 thread 事务堆栈 transaction_stack 赋值,将 t(binder_work 结构体)放入事务堆栈栈顶,即将得到处理
thread->transaction_stack = t;
} else {
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
// 设置 t(binder_transaction 结构体)任务类型为 BINDER_WORK_TRANSACTION
t->work.type = BINDER_WORK_TRANSACTION;
// 将 t(binder_transaction 结构体)添加到目标线程 todo 队列 target_list 中
list_add_tail(&t->work.entry, target_list);
// 设置 tcomplete(binder_work 结构体)任务类型为 BINDER_WORK_TRANSACTION_COMPLETE
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
// 将 tcomplete(binder_work 结构体)添加到获取服务的线程的 todo 队列 中
list_add_tail(&tcomplete->entry, &thread->todo);
// 目标进程 ServiceManager 处于睡眠中,此时唤醒目标进程 ServiceManager
if (target_wait)
wake_up_interruptible(target_wait);
return;
...
}
- binder_context_mgr_node 赋值时机
- BINDER_WORK_TRANSACTION_COMPLETE:Binder 驱动程序先告诉 Client 进程发送的获取服务请求已被接受,然后 Client 进程继续进入到 Binder 驱动程序中等待请求结果
- BINDER_WORK_TRANSACTION:目标进程 ServiceManager要处理的工作项(任务类型为 BINDER_WORK_TRANSACTION)
[–>kernel/drivers/android/binder.c, binder_thread_read()]
static int binder_thread_read(struct binder_proc *proc, // 目标进程 ServiceManager
struct binder_thread *thread, // 目标进程 ServiceManager 主线程
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
// 将 BR_NOOP 写回到用户空间
// ptr = buffer + *consumed = bwr.read_buffer + bwr.consumed = bwr.read_buffer + 0 = bwr.read_buffer
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
// while 循环用于处理进程或线程 todo 队列中存在的未处理工作项
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work) // 目标进程 ServiceManager 的 todo 队列不为空
// 取出任务
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
// 获取任务类型
switch (w->type) {
case BINDER_WORK_TRANSACTION: { // 唤醒后收到 BINDER_WORK_TRANSACTION
t = container_of(w, struct binder_transaction, work);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: { // 唤醒需要时间,因此先收到 BINDER_WORK_TRANSACTION_COMPLETE
cmd = BR_TRANSACTION_COMPLETE;
// 将 cmd = BR_TRANSACTION_COMPLETE 写回到用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
}
// 收到 BINDER_WORK_TRANSACTION 时,t 不为空
if (!t)
continue;
// 请求阶段 target_node 不为空(请求阶段有值,回复阶段没有值),当前为 binder_context_mgr_node
if (t->buffer->target_node) { // 当前是请求阶段,走这个
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION; // BR_TRANSACTION
} else {
tr.target.ptr = NULL;
tr.cookie = NULL;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
} else {
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
// tr(binder_transaction_data)数据缓冲区中只包含要访问的服务名称
tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
// 将 cmd = BR_TRANSACTION 写回到用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
// 将 tr(binder_transaction_data,数据缓冲区中只包含要访问的服务名称)写回到用户空间
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_stat_br(proc, thread, cmd);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
done:
*consumed = ptr - buffer;
if (proc->requested_threads + proc->ready_threads == 0 && // 正在请求的次数 + 空闲线程数 = 0,即两个都为 0
proc->requested_threads_started < proc->max_threads && // 通过请求增加的线程数 < 最大线程数(该值在 ProcessState.cpp 的 open_driver 中设置为 15)
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { // 当前线程是一个 Binder 线程(已注册成为 Binder 线程)
proc->requested_threads++; // requested_threads + 1,则 if 条件不满足,一次只能增加一个线程,当线程增加成功后,驱动程序会将 requested_threads 会减一
// 将返回协议 BR_SPAWN_LOOPER 写入到用户空间缓冲区 buffer,请求进程 proc 创建一个新的线程加入到 Binder 线程池中
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
}
// 返回
return 0;
}
ServiceManager 进程的主线程有待处理的工作项,会在 Binder 驱动程序的 binder_thread_read 中被唤醒,binder_parse 会根据收到的返回协议对工作项进行相应处理
二、BR_TRANSACTION
[–>binder.c, binder_parse()]
// func = svcmgr_handler
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_NOOP: // 首先收到上面在 binder_thread_read 中 put_user 过来的 BR_NOOP(睡眠之前已收到)
break; // 返回
case BR_TRANSACTION: { // 处理驱动程序转发的请求(唤醒之后才收到)
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr; // binder_transaction_data 用于描述进程间通信过程中传输的数据
// func = svcmgr_handler
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
// 初始化 reply(将进程间通信结果数据保存到缓冲区 rdata 中),用于回复数据给 Binder 驱动程序
bio_init(&reply, rdata, sizeof(rdata), 4);
// 初始化 msg(解析并保存从 Binder 驱动程序中读取到的进程间通信数据服务名称),此时 msg 中包含服务名称
// 解析数据,将 txn(binder_transaction_data)转为 msg(binder_io)
bio_init_from_txn(&msg, txn);
// 调用函数 svcmgr_handler 处理 BR_TRANSACTION,并将结果保存在 reply 中
res = func(bs, txn, &msg, &reply);
// 回复结果 reply 给 Binder 驱动程序
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
}
ptr += sizeof(*txn);
r = 0;
break;
}
default:
return -1;
}
}
return r;
}
[–>service_manager.c, svcmgr_handler()]
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) || memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
return -1;
}
switch(txn->code) {
// 获取服务
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
// 服务名称
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
// 根据服务名称查找相应服务句柄
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
// 把服务句柄写回 Binder 驱动程序
bio_put_ref(reply, handle);
// 返回 0
return 0;
default:
return -1;
}
// 操作成功(把 00 00 00 00 作为 reply(binder_io)data,表示操作成功),之后会通过 Binder 驱动程序返回给 Client 进程
bio_put_uint32(reply, 0);
return 0;
}
[–>service_manager.c, do_find_service()]
uint32_t do_find_service(const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
struct svcinfo *si;
// 服务是否满足查询条件
if (!svc_can_find(s, len, spid)) {
return 0;
}
// 查询相应的服务
si = find_svc(s, len);
if (si && si->handle) {
if (!si->allow_isolated) {
// 如果此服务不允许从独立进程进行访问,请检查 uid 是否已隔离
uid_t appid = uid % AID_USER;
// 检查该服务是否允许独立于进程而单独存在
if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
return 0;
}
// 返回服务句柄
return si->handle;
}
} else {
return 0;
}
}
[–>binder.c, bio_put_ref()]
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
struct flat_binder_object *obj;
if (handle)
// 从 bio(binder_io)的数据缓冲区中分配一个 obj(flat_binder_object)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
// 对 obj(flat_binder_object)进行初始化
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE; // 设置 obj(flat_binder_object)描述的 Binder 对象类型为 BINDER_TYPE_HANDLE
obj->handle = handle; // 设置 obj(flat_binder_object)描述的 Binder 对象句柄值为 handle
obj->cookie = 0;
}
[–>binder.c, binder_send_reply()]
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply, // 写给 Binder 驱动程序的数据
binder_uintptr_t buffer_to_free,
int status) // 正常为 0
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free; // BC_FREE_BUFFER 后面跟的通信数据是内核缓冲区用户空间地址
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0; // BC_REPLY 后面跟的通信数据是 binder_transaction_data
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else { // 正常走这个
// 将 reply(binder_io)数据缓冲区和偏移数组设置到匿名结构体 data 中(主要成员 binder_transaction_data)
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0; // 数据缓冲区大小
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0); // 偏移数组大小
data.txn.data.ptr.buffer = (uintptr_t)reply->data0; // 数据缓冲区中有一个 type 为 BINDER_TYPE_HANDLE 的 Binder 对象
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0; // 偏移数组记录了 Binder 对象在数据缓冲区中的位置
}
// 将命令协议 BC_FREE_BUFFER 和 BC_REPLY 发送给 Binder 驱动程序
binder_write(bs, &data, sizeof(data));
}
三、BC_REPLY
[–>binder.c, binder_write()]
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
// 通过 IO 控制命令 BINDER_WRITE_READ 将命令协议 BC_FREE_BUFFER 和 BC_REPLY 发送给 Binder 驱动程序
// write_size 大于 0,read_size 等于 0,只写不读,写完立马返回 ServiceManager 用户空间
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
return res;
}
通过 ioctl() 将 binder_write_read 数据发给 Binder 驱动,调用 binder_ioctl() 方法,write_size 大于 0,read_size 等于 0,调用的是 binder_thread_write
[–>kernel/drivers/android/binder.c, binder_thread_write()]
static int binder_thread_write(struct binder_proc *proc, // ServiceManager 进程
struct binder_thread *thread, // ServiceManager 进程主线程
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
// ptr 实际为 write_buffer,因此 cmd = BC_FREE_BUFFER + BC_REPLY
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
// 收到的通信数据:[BC_FREE_BUFFER + 内核缓冲区用户空间地址]+ [BC_REPLY + binder_transaction_data]
switch (cmd) {
case BC_FREE_BUFFER: { // BC_FREE_BUFFER + 内核缓冲区用户空间地址
void __user *data_ptr;
struct binder_buffer *buffer;
if (get_user(data_ptr, (void * __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
buffer = binder_buffer_lookup(proc, data_ptr);
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
if (list_empty(&buffer->target_node->async_todo))
buffer->target_node->has_async_transaction = 0;
else
list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
}
binder_transaction_buffer_release(proc, buffer, NULL);
binder_free_buf(proc, buffer);
break;
}
case BC_TRANSACTION:
case BC_REPLY: { // BC_REPLY + binder_transaction_data
struct binder_transaction_data tr;
// 从用户空间拷贝 ptr(包含 BC_REPLY + binder_transaction_data)到内核空间,转为 binder_transaction_data
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY); // 最后一个参数为 true
break;
}
*consumed = ptr - buffer;
}
return 0;
}
[–>kernel/drivers/android/binder.c, binder_transaction()]
static void binder_transaction(struct binder_proc *proc, // 获取服务的进程
struct binder_thread *thread, // ServiceManager 进程主线程
struct binder_transaction_data *tr, int reply) // reply 为 true
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
uint32_t return_error = BR_OK;
if (reply) { // reply 为 true,走这个
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
// 恢复目标线程优先级(之前 Binder 驱动程序分发进程间通信请求交给线程处理时,将处理线程优先级设置成发起请求线程优先级,现请求已处理完成,线程优先级需要恢复)
binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
// 将 to_parent(下一个需要处理的事务)放到 thread(ServiceManager 进程主线程)事务堆栈栈顶,即将得到处理
thread->transaction_stack = in_reply_to->to_parent;
// target_thread:与 thread(ServiceManager 进程主线程)通信的线程,发起请求获取服务句柄的线程
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
// target_proc:获取服务的进程
target_proc = target_thread->proc;
} else {
...
}
if (target_thread) { // 目标线程不为空,走这个
// 获取目标线程 todo 队列
target_list = &target_thread->todo;
// 获取目标线程 wait 等待队列
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
// 创建 binder_transaction 结构体
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
// 创建 binder_work 结构体
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node; // target_node 此时为空(请求阶段有值,回复阶段没有值)
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
// offp:数据缓冲区起始位置
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
// 下面两个 if 从用户空间拷贝数据到内核空间
// tr->data.ptr.buffer:binder_transaction_data 数据缓冲区中有一个 type 为 BINDER_TYPE_HANDLE 的 Binder 对象
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
// tr->data.ptr.offsets:binder_transaction_data 偏移数组记录了 Binder 对象在数据缓冲区中的位置
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
// offsets_size:偏移数组大小
off_end = (void *)offp + tr->offsets_size;
// for 循环遍历数据缓冲区中的 Binder 对象,遍历范围 [offp, off_end]
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
// 根据数据缓冲区起始位置 + 偏移确定一个 Binder 对象,赋给 fp(flat_binder_object)
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_HANDLE: // BINDER_TYPE_HANDLE
case BINDER_TYPE_WEAK_HANDLE: {
// 根据 fp->handle(在 ServiceManager 中所获取服务的句柄)在进程 proc(获取服务进程)中找到 Binder 引用对象
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
// ref->node->proc:Binder 引用对象引用的 Binder 实体对象所在进程,即提供服务的进程
// ref:Binder 引用对象
// ref->node:Binder 引用对象引用的 Binder 实体对象
// target_proc:获取服务的进程
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
} else { // 提供服务的进程 != 获取服务的进程,走这个
struct binder_ref *new_ref;
// 在 target_proc(获取服务的进程)中查找一个 Binder 引用对象(没有则创建),然后返回
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
// 设置 fp->handle 为 Binder 引用对象的句柄值
// fp 和 t->buffer->data 有关,t 是 binder_transaction 结构体
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
}
} break;
}
}
if (reply) {
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
// 设置 t(binder_transaction 结构体)任务类型为 BINDER_WORK_TRANSACTION
t->work.type = BINDER_WORK_TRANSACTION;
// 将 t(binder_transaction 结构体)添加到目标线程 todo 队列 target_list 中
list_add_tail(&t->work.entry, target_list);
// 设置 tcomplete(binder_work 结构体)任务类型为 BINDER_WORK_TRANSACTION_COMPLETE
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
// 将 tcomplete(binder_work 结构体)添加到 ServiceManager 进程主线程的 todo 队列 中
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
...
}
- BINDER_WORK_TRANSACTION_COMPLETE:Binder 驱动程序先告诉 ServiceManager 进程返回的信息回复成功,处理后 ServiceManager 进程等待下一次 Binder 进程间通信请求
- BINDER_WORK_TRANSACTION:目标线程 Client 要处理的工作项(任务类型为 BINDER_WORK_TRANSACTION)
[–>kernel/drivers/android/binder.c, binder_get_ref_for_node()]
// 注册服务时会在 ServiceManager 进程中创建 Binder 实体对象对应的 Binder 引用对象,之后获取直接返回创建的 Binder 引用对象
static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, // 获取服务的进程
struct binder_node *node) // Binder 引用对象引用的 Binder 实体对象
{
struct rb_node *n;
// proc->refs_by_node.rb_node:获取服务的进程中所有 Binder 引用对象组成的红黑树的根节点
// proc:获取服务的进程
// proc->refs_by_node:获取服务的进程中所有 Binder 引用对象组成的红黑树
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
struct binder_ref *ref, *new_ref;
// 首次进入红黑树没有节点,跳过 while 循环
// 遍历由获取服务的进程中所有 Binder 引用对象组成的红黑树 refs_by_node(节点 node 以 Binder 引用对象的 node 作为关键词),检查是否存在 Binder 实体对象对应的 Binder 引用对象
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
if (node < ref->node)
p = &(*p)->rb_left;
else if (node > ref->node)
p = &(*p)->rb_right;
else // else 对应 node == ref->node 的情况,即找到了 Binder 实体对象对应的 Binder 引用对象
// 返回已存在的 Binder 引用对象
return ref;
}
// 执行到这,说明获取服务的进程中所有 Binder 引用对象组成的红黑树上没有找到 Binder 实体对象对应的 Binder 引用对象
// 创建一个 Binder 引用对象
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
new_ref->debug_id = ++binder_last_id;
// 设置 Binder 引用对象 new_ref 对应 proc 为获取服务的进程
new_ref->proc = proc;
// 设置 Binder 引用对象 new_ref 对应 node 为 Binder 实体对象
new_ref->node = node;
// 以 Binder 引用对象的 node 作为关键词把 Binder 引用对象挂到获取服务的进程的红黑树 refs_by_node 上
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
// 此时 node(Binder 引用对象引用的 Binder 实体对象)不是 ServiceManager 进程对应的 binder_context_mgr_node,设置 Binder 引用对象句柄值为 1
new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
// 首次进入红黑树没有节点,跳过 for 循环
// 遍历由获取服务的进程中所有 Binder 引用对象组成的红黑树 refs_by_desc(节点 node 以 Binder 引用对象的 desc 作为关键词)
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
// 遍历完红黑树上所有节点,此时 new_ref->desc 最终比红黑树节点总数大 1,跳出 for 循环
if (ref->desc > new_ref->desc)
break;
// 更新 Binder 引用对象句柄值 new_ref->desc,每次加一
new_ref->desc = ref->desc + 1;
}
// 获取服务的进程中所有 Binder 引用对象组成的红黑树的根节点
p = &proc->refs_by_desc.rb_node;
// 走到这是因为没有 Binder 实体对象对应的 Binder 引用对象,而后创建了 Binder 引用对象,在此需要
// 遍历由获取服务的进程中所有 Binder 引用对象组成的红黑树 refs_by_desc(节点 node 以 Binder 引用对象的 desc 作为关键词),更新节点 p
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
// new_ref 不在红黑树 refs_by_desc 上,不会走最下面的 else,只会走 if 和 else if,直至 *p 为 NULL(在红黑树上不为 NULL),跳出 while 循环
if (new_ref->desc < ref->desc)
p = &(*p)->rb_left;
else if (new_ref->desc > ref->desc)
p = &(*p)->rb_right;
else
BUG();
}
// 以 Binder 引用对象的 desc 作为关键词把 Binder 引用对象挂到获取服务的进程的红黑树 refs_by_desc 上
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
if (node) {
// 以 Binder 引用对象的 node_entry 为关键词把 Binder 引用对象添加到 Binder 实体对象引用链表中
// node->refs:Binder 实体对象的 Binder 引用链表
hlist_add_head(&new_ref->node_entry, &node->refs);
}
// 返回创建的 Binder 引用对象
return new_ref;
}
[–>kernel/drivers/android/binder.c, binder_thread_read()]
static int binder_thread_read(struct binder_proc *proc, // 获取服务的进程
struct binder_thread *thread, // 获取服务的线程
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
// 将 BR_NOOP 写回到用户空间
// ptr = buffer + *consumed = bwr.read_buffer + bwr.consumed = bwr.read_buffer + 0 = bwr.read_buffer
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
// 线程事务堆栈为空(没有等待其他线程完成事务) && 线程 todo 队列为空,此时可以去等待处理进程 todo 队列中的待处理项
wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
// 设置线程处于空闲状态
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
proc->ready_threads++;
// 线程等待处理进程 todo 队列中可能存在的未处理工作项
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) {
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
// non_block 表示不阻塞,没有可处理工作项时,不休眠立即返回
if (non_block) {
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
// 线程睡眠等待进程有待处理工作项
ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else { // 线程处理自己 todo 队列中可能存在的未处理工作项
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
} else
// 线程睡眠等待自己有待处理工作项
ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
}
if (wait_for_proc_work)
proc->ready_threads--;
// 取消线程处于空闲状态
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
// while 循环用于处理进程或线程 todo 队列中存在的未处理工作项
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
// 线程的 todo 队列不为空
if (!list_empty(&thread->todo))
// 取出任务
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
// 获取任务类型
switch (w->type) {
case BINDER_WORK_TRANSACTION: { // 唤醒后收到 BINDER_WORK_TRANSACTION
t = container_of(w, struct binder_transaction, work);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: { // 唤醒需要时间,因此先收到 BINDER_WORK_TRANSACTION_COMPLETE
cmd = BR_TRANSACTION_COMPLETE;
// 将 cmd = BR_TRANSACTION_COMPLETE 写回到用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
}
// 收到 BINDER_WORK_TRANSACTION 时,t 不为空
if (!t)
continue;
// 回复阶段 target_node 为空(请求阶段有值,回复阶段没有值)
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else { // 当前是 reply 回复阶段,走这个
tr.target.ptr = NULL;
tr.cookie = NULL;
cmd = BR_REPLY; // BR_REPLY
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
} else {
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
// tr(binder_transaction_data)数据缓冲区中有一个 type 为 BINDER_TYPE_HANDLE 的 Binder 引用对象,其中 handle 值由 Binder 驱动程序生成(handler -> binder_ref -> binder_node -> binder_proc)
tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
// 将 cmd = BR_REPLY 写回到用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
// 将 tr(binder_transaction_data,数据缓冲区中有一个 type 为 BINDER_TYPE_HANDLE 的 Binder 引用对象,其中 handle 值由 Binder 驱动程序生成)写回到用户空间
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_stat_br(proc, thread, cmd);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
done:
*consumed = ptr - buffer;
if (proc->requested_threads + proc->ready_threads == 0 && // 正在请求的次数 + 空闲线程数 = 0,即两个都为 0
proc->requested_threads_started < proc->max_threads && // 通过请求增加的线程数 < 最大线程数(该值在 ProcessState.cpp 的 open_driver 中设置为 15)
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { // 当前线程是一个 Binder 线程(已注册成为 Binder 线程)
proc->requested_threads++; // requested_threads + 1,则 if 条件不满足,一次只能增加一个线程,当线程增加成功后,驱动程序会将 requested_threads 会减一
// 将返回协议 BR_SPAWN_LOOPER 写入到用户空间缓冲区 buffer,请求进程 proc 创建一个新的线程加入到 Binder 线程池中
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
}
// 返回
return 0;
}
四、BR_REPLY
[–>binder.c, binder_parse()]
// bio = reply
// func = svcmgr_handler
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
// 获取 cmd = BR_REPLY
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_REPLY: {
// 获取返回协议 BR_REPLY 对应的数据 ptr,转为 binder_transaction_data
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if (bio) {
// 初始化 bio = reply(解析并保存从 Binder 驱动程序中读取到的进程间通信数据)
// 解析数据,将 txn(binder_transaction_data)转为 bio(binder_io)
bio_init_from_txn(bio, txn);
bio = 0;
}
ptr += sizeof(*txn);
r = 0;
break;
}
default:
return -1;
}
}
return r;
}
五、Client 收到服务句柄 handle
// 从 reply 中获取服务的 handle
uint32_t handle = bio_get_ref(&reply);
uint32_t bio_get_ref(struct binder_io *bio)
{
struct flat_binder_object *obj;
obj = _bio_get_obj(bio);
if (!obj)
return 0;
if (obj->type == BINDER_TYPE_HANDLE)
return obj->handle; // 返回 Binder 引用对象的 handle 值
return 0;
}
本文地址:https://blog.csdn.net/qq_21586317/article/details/107503557