Binder 驱动及数据结构

Binder 驱动及数据结构

目录

在这里插入图片描述

binder 驱动相关的代码在 drivers/android/ 目录下。

binder_open

static int binder_open(struct inode *nodp, struct file *filp)
{
  struct binder_proc *proc, *itr; // 创建 binder_proc
  struct binder_device *binder_dev;
  struct binderfs_info *info;
  struct dentry *binder_binderfs_dir_entry_proc = NULL;
  bool existing_pid = false;

  binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
         current->group_leader->pid, current->pid);

  proc = kzalloc(sizeof(*proc), GFP_KERNEL); // 在内核中分配 binder_proc 内存
  if (proc == NULL)
    return -ENOMEM;
  spin_lock_init(&proc->inner_lock);
  spin_lock_init(&proc->outer_lock);
  get_task_struct(current->group_leader);
  // 保存 当前进程的信息
  proc->tsk = current->group_leader;
  proc->cred = get_cred(filp->f_cred);
  INIT_LIST_HEAD(&proc->todo); // 初始化 proc->todo
  init_waitqueue_head(&proc->freeze_wait);
  proc->default_priority = task_nice(current);
  /* binderfs stashes devices in i_private */
  if (is_binderfs_device(nodp)) {
    binder_dev = nodp->i_private;
    info = nodp->i_sb->s_fs_info;
    binder_binderfs_dir_entry_proc = info->proc_log_dir;
  } else {
    binder_dev = container_of(filp->private_data,
            struct binder_device, miscdev);
  }
  refcount_inc(&binder_dev->ref);
  proc->context = &binder_dev->context;
  binder_alloc_init(&proc->alloc); // 初始化 proc->alloc,用于管理 binder_buffer

  binder_stats_created(BINDER_STAT_PROC);
  proc->pid = current->group_leader->pid;
  INIT_LIST_HEAD(&proc->delivered_death);
  INIT_LIST_HEAD(&proc->waiting_threads);
  filp->private_data = proc; // 将 proc 赋值给 private_data

  // 同步锁,因为 binder 支持多线程访问
  mutex_lock(&binder_procs_lock);
  hlist_for_each_entry(itr, &binder_procs, proc_node) {
    if (itr->pid == proc->pid) {
      existing_pid = true;
      break;
    }
  }
  // 将 proc_node 插入到 binder_procs 队列头部
  hlist_add_head(&proc->proc_node, &binder_procs);
  // 释放同步锁
  mutex_unlock(&binder_procs_lock);

  if (binder_debugfs_dir_entry_proc && !existing_pid) {
    char strbuf[11];

    snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
    /*
     * proc debug entries are shared between contexts.
     * Only create for the first PID to avoid debugfs log spamming
     * The printing code will anyway print all contexts for a given
     * PID so this is not a problem.
     */
    proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
      binder_debugfs_dir_entry_proc,
      (void *)(unsigned long)proc->pid,
      &proc_fops);
  }

  if (binder_binderfs_dir_entry_proc && !existing_pid) {
    char strbuf[11];
    struct dentry *binderfs_entry;

    snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
    /*
     * Similar to debugfs, the process specific log file is shared
     * between contexts. Only create for the first PID.
     * This is ok since same as debugfs, the log file will contain
     * information on all contexts of a given PID.
     */
    binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
      strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
    if (!IS_ERR(binderfs_entry)) {
      proc->binderfs_entry = binderfs_entry;
    } else {
      int error;

      error = PTR_ERR(binderfs_entry);
      pr_warn("Unable to create file %s in binderfs (error %d)\n",
        strbuf, error);
    }
  }

  return 0;
}

binder_mmap

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
  // 取出 binder_proc
  struct binder_proc *proc = filp->private_data;

  if (proc->tsk != current->group_leader)
    return -EINVAL;

  binder_debug(BINDER_DEBUG_OPEN_CLOSE,
         "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
         __func__, proc->pid, vma->vm_start, vma->vm_end,
         (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
         (unsigned long)pgprot_val(vma->vm_page_prot));

  if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
    pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
           proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
    return -EPERM;
  }
  vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);

  // 设置操作 vma 的操作函数 
  vma->vm_ops = &binder_vm_ops;
  // 保存 binder_proc 信息
  vma->vm_private_data = proc;

  return binder_alloc_mmap_handler(&proc->alloc, vma);
}

int binder_alloc_mmap_handler(struct binder_alloc *alloc,
            struct vm_area_struct *vma)
{
  int ret;
  const char *failure_string;
  struct binder_buffer *buffer;

  if (unlikely(vma->vm_mm != alloc->mm)) {
    ret = -EINVAL;
    failure_string = "invalid vma->vm_mm";
    goto err_invalid_mm;
  }

  mutex_lock(&binder_alloc_mmap_lock);
  if (alloc->buffer_size) {
    ret = -EBUSY;
    failure_string = "already mapped";
    goto err_already_mapped;
  }
  // 最大分配 SZ_4M 的虚拟内存
  alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
           SZ_4M);
  mutex_unlock(&binder_alloc_mmap_lock);

  // alloc->buffer 指向用户空间的 vm_start
  alloc->buffer = (void __user *)vma->vm_start;

  // alloc->pages 是一个二维数组指针,分配指针数组的物理页,数组大小为 vma的page个数
  alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
             sizeof(alloc->pages[0]),
             GFP_KERNEL);
  if (alloc->pages == NULL) {
    ret = -ENOMEM;
    failure_string = "alloc page array";
    goto err_alloc_pages_failed;
  }

  // 分配一个 binder_buffer
  buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  if (!buffer) {
    ret = -ENOMEM;
    failure_string = "alloc buffer struct";
    goto err_alloc_buf_struct_failed;
  }

  // binder_buffer 对象指向 alloc->buffer的地址(用户空间的虚拟地址)
  buffer->user_data = alloc->buffer;
  list_add(&buffer->entry, &alloc->buffers);
  buffer->free = 1;
  // 将 binder_buffer 插入到 proc->alloc->free_buffer 中
  binder_insert_free_buffer(alloc, buffer);
  alloc->free_async_space = alloc->buffer_size / 2;

  /* Signal binder_alloc is fully initialized */
  binder_alloc_set_vma(alloc, vma);

  return 0;

err_alloc_buf_struct_failed:
  kfree(alloc->pages);
  alloc->pages = NULL;
err_alloc_pages_failed:
  alloc->buffer = NULL;
  mutex_lock(&binder_alloc_mmap_lock);
  alloc->buffer_size = 0;
err_already_mapped:
  mutex_unlock(&binder_alloc_mmap_lock);
err_invalid_mm:
  binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
         "%s: %d %lx-%lx %s failed %d\n", __func__,
         alloc->pid, vma->vm_start, vma->vm_end,
         failure_string, ret);
  return ret;
}

binder_proc

// 相关数据结构可参考 drivers/android/binder_internal.h
// 表示进程信息
struct binder_proc {
  struct hlist_node proc_node;
  struct rb_root threads;
  struct rb_root nodes;
  struct rb_root refs_by_desc; // 红黑树,使用 handle 查找
  struct rb_root refs_by_node; // 红黑树,使用 node 查找
  struct list_head waiting_threads;
  int pid;
  struct task_struct *tsk;
  const struct cred *cred;
  struct hlist_node deferred_work_node;
  int deferred_work;
  int outstanding_txns;
  bool is_dead;
  bool is_frozen;
  bool sync_recv;
  bool async_recv;
  wait_queue_head_t freeze_wait;

  struct list_head todo;
  struct binder_stats stats;
  struct list_head delivered_death;
  int max_threads;
  int requested_threads;
  int requested_threads_started;
  int tmp_ref;
  long default_priority;
  struct dentry *debugfs_entry;
  struct binder_alloc alloc; // 内存分配结构体,用于管理transaction的内存
  struct binder_context *context;
  spinlock_t inner_lock;
  spinlock_t outer_lock;
  struct dentry *binderfs_entry;
  bool oneway_spam_detection_enabled;
};


binder_node

// 表示一个 服务,每个服务在 binder 中存在一个 binder_node
struct binder_node {
  int debug_id;
  spinlock_t lock;
  struct binder_work work;
  union {
    struct rb_node rb_node;
    struct hlist_node dead_node;
  };
  struct binder_proc *proc;
  struct hlist_head refs;
  int internal_strong_refs;
  int local_weak_refs;
  int local_strong_refs;
  int tmp_refs;
  binder_uintptr_t ptr;
  binder_uintptr_t cookie;
  struct {
    /*
     * bitfield elements protected by
     * proc inner_lock
     */
    u8 has_strong_ref:1;
    u8 pending_strong_ref:1;
    u8 has_weak_ref:1;
    u8 pending_weak_ref:1;
  };
  struct {
    /*
     * invariant after initialization
     */
    u8 accept_fds:1;
    u8 txn_security_ctx:1;
    u8 min_priority;
  };
  bool has_async_transaction;
  struct list_head async_todo;
};

在这里插入图片描述

  • 6
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值