linux 之dma_buf (8)- ION简化版本

一、前言

我们学习了如何使用 alloc_page() 方式来分配内存,但是该驱动只能分配1个PAGE_SIZE。本篇我们将在上一篇的基础上,实现一个简化版的ION驱动,以此来实现任意 size 大小的内存分配。

二、准备

为了和 kernel 标准 ion 驱动兼容,本篇引用了 driver/staging/android/uapi/ion.h 头文件,目的是为了方便 userspace 直接使用 struct ion_allocation_data 和 ION_IOC_ALLOC 宏:

struct ion_allocation_data {
	__u64 len;
	__u32 heap_id_mask;
	__u32 flags;
	__u32 fd;
	__u32 unused;
};

#define ION_IOC_MAGIC		'I'
#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
									struct ion_allocation_data)

 本篇 ion 驱动只使用 ion_allocation_data 结构体中的 len 和 fd 这两个元素,其它元素不做处理。

三、示例

 exporter-ion.c

#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>

struct ion_allocation_data {
        __u64 len;
        __u32 heap_id_mask;
        __u32 flags;
        __u32 fd;
        __u32 unused;
};

#define ION_IOC_MAGIC           'I'
#define ION_IOC_ALLOC           _IOWR(ION_IOC_MAGIC, 0, \
                                        struct ion_allocation_data)

struct ion_data {
        int npages;
        struct page *pages[];
};

static int ion_attach(struct dma_buf *dmabuf, struct device *dev,
                        struct dma_buf_attachment *attachment)
{
        pr_info("dmabuf attach device: %s\n", dev_name(dev));
        return 0;
}

static void ion_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
{
        pr_info("dmabuf detach device: %s\n", dev_name(attachment->dev));
}

static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
                                         enum dma_data_direction dir)
{
        struct ion_data *data = attachment->dmabuf->priv;
        struct sg_table *table;
        struct scatterlist *sg;
        int i;

        table = kmalloc(sizeof(*table), GFP_KERNEL);

        sg_alloc_table(table, data->npages, GFP_KERNEL);

        sg = table->sgl;
        for (i = 0; i < data->npages; i++) {
                sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
                sg = sg_next(sg);
        }

        dma_map_sg(NULL, table->sgl, table->nents, dir);

        return table;
}

static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
                               struct sg_table *table,
                               enum dma_data_direction dir)
{
        dma_unmap_sg(NULL, table->sgl, table->nents, dir);
        sg_free_table(table);
        kfree(table);
}

static void ion_release(struct dma_buf *dma_buf)
{
        struct ion_data *data = dma_buf->priv;
        int i;

        pr_info("dmabuf release\n");

        for (i = 0; i < data->npages; i++)
                put_page(data->pages[i]);

        kfree(data);
}
static void *ion_vmap(struct dma_buf *dma_buf)
{
        struct ion_data *data = dma_buf->priv;

        return vm_map_ram(data->pages, data->npages, 0, PAGE_KERNEL);
}

static void ion_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
        struct ion_data *data = dma_buf->priv;

        vm_unmap_ram(vaddr, data->npages);
}

static int ion_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
        struct ion_data *data = dma_buf->priv;
        unsigned long vm_start = vma->vm_start;
        int i;

        for (i = 0; i < data->npages; i++) {
                remap_pfn_range(vma, vm_start, page_to_pfn(data->pages[i]),
                                        PAGE_SIZE, vma->vm_page_prot);
                vm_start += PAGE_SIZE;
        }

        return 0;
}

static int ion_begin_cpu_access(struct dma_buf *dmabuf,
                                      enum dma_data_direction dir)
{
        struct dma_buf_attachment *attachment;
        struct sg_table *table;

        attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
        table = attachment->sgt;
        dma_sync_sg_for_cpu(NULL, table->sgl, table->nents, dir);

        return 0;
}

static int ion_end_cpu_access(struct dma_buf *dmabuf,
                                enum dma_data_direction dir)
{
        struct dma_buf_attachment *attachment;
        struct sg_table *table;

        attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
        table = attachment->sgt;
        dma_sync_sg_for_device(NULL, table->sgl, table->nents, dir);

        return 0;
}

static const struct dma_buf_ops exp_dmabuf_ops = {
        .attach = ion_attach,
        .detach = ion_detach,
        .map_dma_buf = ion_map_dma_buf,
        .unmap_dma_buf = ion_unmap_dma_buf,
        .release = ion_release,
        .mmap = ion_mmap,
        .vmap = ion_vmap,
        .vunmap = ion_vunmap,
        .begin_cpu_access = ion_begin_cpu_access,
        .end_cpu_access = ion_end_cpu_access,
};
static struct dma_buf *ion_alloc(size_t size)
{
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
        struct dma_buf *dmabuf;
        struct ion_data *data;
        int i, npages;

        npages = PAGE_ALIGN(size) / PAGE_SIZE;

        data = kmalloc(sizeof(*data) + npages * sizeof(struct page *),
                                        GFP_KERNEL);

        data->npages = npages;
        for (i = 0; i < npages; i++)
                data->pages[i] = alloc_page(GFP_KERNEL);

        exp_info.ops = &exp_dmabuf_ops;
        exp_info.size = npages * PAGE_SIZE;
        exp_info.flags = O_CLOEXEC;
        exp_info.priv = data;

        dmabuf = dma_buf_export(&exp_info);

        return dmabuf;
}

static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
        struct dma_buf *dmabuf;
        struct ion_allocation_data alloc_data;

        /* currently just only support ION_IOC_ALLOC ioctl */
        if (cmd != ION_IOC_ALLOC)
                return -EINVAL;

        copy_from_user(&alloc_data, (void __user *)arg, sizeof(alloc_data));

        dmabuf = ion_alloc(alloc_data.len);
        alloc_data.fd = dma_buf_fd(dmabuf, O_CLOEXEC);

        copy_to_user((void __user *)arg, &alloc_data, sizeof(alloc_data));

        return 0;
}

static struct file_operations ion_fops = {
        .owner   = THIS_MODULE,
        .unlocked_ioctl   = ion_ioctl,
};

static struct miscdevice mdev = {
        .minor = MISC_DYNAMIC_MINOR,
        .name = "ion",
        .fops = &ion_fops,
};

static int __init ion_init(void)
{
        return misc_register(&mdev);
}

static void __exit ion_exit(void)
{
        misc_deregister(&mdev);
}

module_init(ion_init);
module_exit(ion_exit);


从上面可以看出,任意大小的参数,在驱动中就是for循环申请页。因为申请的内存不一定时连续物理内存,所以使用sg table .

应用程序

ion_test.c

#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>

struct ion_allocation_data {
        __u64 len;
        __u32 heap_id_mask;
        __u32 flags;
        __u32 fd;
        __u32 unused;
};


#define PAGE_SIZE 4096

int main(int argc, char *argv[])
{
	int fd;
	struct ion_allocation_data alloc_data;

	fd = open("/dev/ion", O_RDWR);

	alloc_data.len = 3 * PAGE_SIZE;
	ioctl(fd, ION_IOC_ALLOC, &alloc_data);

	printf("ion alloc success: size = %llu, dmabuf_fd = %u\n",
			alloc_data.len, alloc_data.fd);

	close(fd);

	return 0;
}

该应用程序通过 ION_IOC_ALLOC ioctl 请求分配了3个 page 的物理 buffer,如果底层驱动分配成功,则会将该 dma-buf 所对应的 fd 返回给应用程序,以便后续执行 mmap 操作或将 fd 传给其它模块。

需要注意的是,这里的3个 pages 是通过3次调用 alloc_page() 来分配的,因此每个 page 之间可能是不连续的,也可以近似的认为该 ion 驱动分配的 buffer 属于 ION_HEAP_TYPE_SYSTEM。如果要分配物理连续的 pages,请使用 alloc_pages() 进行分配。

上面的驱动中,通过变长数组,实现虚拟地址连续,但是物理地址不一定连续的方法。

  • 7
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值