ION是一种Linux内核中管理共享内存的机制,最初由Google开发,用于Android系统中的图形渲染和多媒体应用。
This artical impl. a mecahins after ION, process`s communication could be held through the machism.just a demo.
the principle diagram,exporter process 输出FD,给另外两个进程作映射。
code:
ion.c
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/task.h>
#include <linux/fdtable.h>
#include "./uapi/ion.h"
struct ion_data {
int npages;
int counter;
struct page *pages[];
};
struct ion_import_fd {
int src_pid;
int fd;
int newfd;
};
static int ion_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
{
pr_info("ion attach called.\n");
return 0;
}
static void ion_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
{
pr_info("dmabuf detach device: %s\n", dev_name(attachment->dev));
}
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct ion_data *data = attachment->dmabuf->priv;
struct sg_table *table;
struct scatterlist *sg;
int i;
table = kmalloc(sizeof(*table), GFP_KERNEL);
sg_alloc_table(table, data->npages, GFP_KERNEL);
sg = table->sgl;
for (i = 0; i < data->npages; i++) {
sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
sg = sg_next(sg);
}
dma_map_sg(NULL, table->sgl, table->nents, dir);
return table;
}
static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction dir)
{
dma_unmap_sg(NULL, table->sgl, table->nents, dir);
sg_free_table(table);
kfree(table);
}
static void ion_release(struct dma_buf *dma_buf)
{
struct ion_data *data = dma_buf->priv;
int i;
pr_info("dmabuf release\n");
data->counter --;
if(data->counter) return;
for (i = 0; i < data->npages; i++)
put_page(data->pages[i]);
kfree(data);
}
static void *ion_vmap(struct dma_buf *dma_buf)
{
struct ion_data *data = dma_buf->priv;
return vm_map_ram(data->pages, data->npages, 0, PAGE_KERNEL);
}
static void ion_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct ion_data *data = dma_buf->priv;
vm_unmap_ram(vaddr, data->npages);
}
static void *ion_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct ion_data *data = dma_buf->priv;
return kmap(data->pages[page_num]);
}
static void ion_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
struct ion_data *data = dma_buf->priv;
return kunmap(data->pages[page_num]);
}
static int ion_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct ion_data *data = dma_buf->priv;
unsigned long vm_start = vma->vm_start;
int i;
for (i = 0; i < data->npages; i++) {
remap_pfn_range(vma, vm_start, page_to_pfn(data->pages[i]),
PAGE_SIZE, vma->vm_page_prot);
vm_start += PAGE_SIZE;
}
return 0;
}
static int ion_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction dir)
{
struct dma_buf_attachment *attachment;
struct sg_table *table;
attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
table = attachment->priv;
dma_sync_sg_for_cpu(NULL, table->sgl, table->nents, dir);
return 0;
}
static int ion_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction dir)
{
struct dma_buf_attachment *attachment;
struct sg_table *table;
attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
table = attachment->priv;
dma_sync_sg_for_device(NULL, table->sgl, table->nents, dir);
return 0;
}
static const struct dma_buf_ops exp_dmabuf_ops = {
.attach = ion_attach,
.detach = ion_detach,
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.release = ion_release,
.map = ion_kmap,
.unmap = ion_kunmap,
.mmap = ion_mmap,
.vmap = ion_vmap,
.vunmap = ion_vunmap,
.begin_cpu_access = ion_begin_cpu_access,
.end_cpu_access = ion_end_cpu_access,
};
static struct dma_buf *ion_alloc(size_t size)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dmabuf;
struct ion_data *data;
int i, npages;
npages = PAGE_ALIGN(size) / PAGE_SIZE;
data = kmalloc(sizeof(*data) + npages * sizeof(struct page *),
GFP_KERNEL);
data->npages = npages;
for (i = 0; i < npages; i++)
data->pages[i] = alloc_page(GFP_KERNEL);
exp_info.ops = &exp_dmabuf_ops;
exp_info.size = npages * PAGE_SIZE;
// exp_info.flags = O_CLOEXEC;
exp_info.flags = O_RDWR;
exp_info.priv = data;
dmabuf = dma_buf_export(&exp_info);
data->counter = 1;
return dmabuf;
}
static int _is_dma_buf(struct dma_buf *dmabuf)
{
return dmabuf->ops == &exp_dmabuf_ops;
}
static struct file *__fget_files(struct files_struct *files, unsigned int fd,fmode_t mask, unsigned int refs)
{
struct file *file;
rcu_read_lock();
loop:
file = fcheck_files(files, fd);
if (file) {
if (file->f_mode & mask)
file = NULL;
else if (!get_file_rcu_many(file, refs))
goto loop;
}
rcu_read_unlock();
return file;
}
static struct file *fget_task(struct task_struct *task, unsigned int fd)
{
struct file *file = NULL;
task_lock(task);
if (task->files)
file = __fget_files(task->files, fd, 0, 1);
task_unlock(task);
return file;
}
int my_dma_buf_export(void *buffer)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_data *data = buffer;
struct dma_buf *dmabuf;
int fd;
exp_info.ops = &exp_dmabuf_ops;
exp_info.size = data->npages * PAGE_SIZE;
//exp_info.flags = O_CLOEXEC;
exp_info.flags = O_RDWR;
exp_info.priv = data;
dmabuf = dma_buf_export(&exp_info);
if(!dmabuf) {
printk("%s line %d, export dmabuf failure.\n", __func__, __LINE__);
return -1;
}
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if(fd < 0) {
printk("%s line %d, reget the fd failure.\n", __func__, __LINE__);
return -1;
}
data->counter ++;
return fd;
}
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct dma_buf *dmabuf;
struct ion_allocation_data alloc_data;
struct ion_import_fd import_fd;
struct ion_data *data;
/* currently just only support ION_IOC_ALLOC ioctl */
if(cmd == ION_IOC_ALLOC) {
copy_from_user(&alloc_data, (void __user *)arg, sizeof(alloc_data));
dmabuf = ion_alloc(alloc_data.len);
alloc_data.fd = dma_buf_fd(dmabuf, O_CLOEXEC);
copy_to_user((void __user *)arg, &alloc_data, sizeof(alloc_data));
} else {
struct task_struct *task;
struct file *src_file;
struct dma_buf *dmabuf;
int newfd;
copy_from_user(&import_fd, (void __user *)arg, sizeof(struct ion_import_fd));
task = get_pid_task(find_vpid(import_fd.src_pid), PIDTYPE_PID);
if(!task){
printk("%s line %d, get task failure.\n", __func__, __LINE__);
return -1;
}
src_file = fget_task(task, import_fd.fd);
put_task_struct(task);
if (!src_file) {
printk("%s line %d, get src file failure.\n", __func__, __LINE__);
return -1;
}
dmabuf = src_file->private_data;
if (!_is_dma_buf(dmabuf)) {
fput(src_file);
printk("%s line %d not dma buf.\n", __func__, __LINE__);
return -1;
}
data = (struct ion_data *)dmabuf->priv;
newfd = my_dma_buf_export(data);
if(newfd < 0) {
fput(src_file);
printk("%s line %d newfd is invalid.\n", __func__, __LINE__);
return -1;
}
fput(src_file);
import_fd.newfd = newfd;
copy_to_user((void __user *)arg, &import_fd, sizeof(struct ion_import_fd));
}
return 0;
}
static struct file_operations ion_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = ion_ioctl,
};
static struct miscdevice mdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ion",
.fops = &ion_fops,
};
static int __init ion_init(void)
{
return misc_register(&mdev);
}
static void __exit ion_exit(void)
{
misc_deregister(&mdev);
}
module_init(ion_init);
module_exit(ion_exit);
MODULE_AUTHOR("zlcao");
MODULE_LICENSE("GPL");
makefile
ifneq ($(KERNELRELEASE),)
obj-m:=ion.o
else
KERNELDIR:=/lib/modules/$(shell uname -r)/build
PWD:=$(shell pwd)
all:
$(MAKE) -C $(KERNELDIR) M=$(PWD) modules
clean:
rm -rf *.o *.mod.c *.mod.o *.ko *.symvers *.mod .*.cmd *.order
endif
ion.h
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* drivers/staging/android/uapi/ion.h
*
* Copyright (C) 2011 Google, Inc.
*/
#ifndef _UAPI_LINUX_ION_H
#define _UAPI_LINUX_ION_H
#include <linux/ioctl.h>
#include <linux/types.h>
/**
* enum ion_heap_types - list of all possible types of heaps
* @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
* @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
* @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
* carveout heap, allocations are physically
* contiguous
* @ION_HEAP_TYPE_DMA: memory allocated via DMA API
* @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
* is used to identify the heaps, so only 32
* total heap types are supported
*/
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM, /*
* must be last so device specific heaps always
* are at the end of this enum
*/
};
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
/**
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
/*
* mappings of this buffer should be cached, ion will do cache maintenance
* when the buffer is mapped for dma
*/
#define ION_FLAG_CACHED 1
/**
* DOC: Ion Userspace API
*
* create a client by opening /dev/ion
* most operations handled via following ioctls
*
*/
/**
* struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @heap_id_mask: mask of heap ids to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to
* refer to this allocation
*
* Provided by userspace as an argument to the ioctl
*/
struct ion_allocation_data {
__u64 len;
__u32 heap_id_mask;
__u32 flags;
__u32 fd;
__u32 unused;
};
#define MAX_HEAP_NAME 32
/**
* struct ion_heap_data - data about a heap
* @name - first 32 characters of the heap name
* @type - heap type
* @heap_id - heap id for the heap
*/
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
/**
* struct ion_heap_query - collection of data about all heaps
* @cnt - total number of heaps to be copied
* @heaps - buffer to copy heap data
*/
struct ion_heap_query {
__u32 cnt; /* Total number of heaps to be copied */
__u32 reserved0; /* align to 64bits */
__u64 heaps; /* buffer to be populated */
__u32 reserved1;
__u32 reserved2;
};
#define ION_IOC_MAGIC 'I'
/**
* DOC: ION_IOC_ALLOC - allocate memory
*
* Takes an ion_allocation_data struct and returns it with the handle field
* populated with the opaque handle for the allocation.
*/
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct ion_allocation_data)
/**
* DOC: ION_IOC_HEAP_QUERY - information about available heaps
*
* Takes an ion_heap_query structure and populates information about
* available Ion heaps.
*/
#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, \