int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
{
struct ion_device *dev = internal_dev;
struct ion_buffer *buffer = NULL;
struct ion_heap *heap;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
int fd;
struct dma_buf *dmabuf;
pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
len, heap_id_mask, flags);
/*
* traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the
* request of the caller allocate from it. Repeat until allocate has
* succeeded or all heaps have been tried
*/
len = PAGE_ALIGN(len);
if (!len)
return -EINVAL;
down_read(&dev->lock);
plist_for_each_entry(heap, &dev->heaps, node) {
/* if the caller didn't specify this heap id */
if (!((1 << heap->id) & heap_id_mask))
continue;
buffer = ion_buffer_create(heap, dev, len, flags);
if (!IS_ERR(buffer))
break;
}
up_read(&dev->lock);
if (!buffer)
return -ENODEV;
if (IS_ERR(buffer))
return PTR_ERR(buffer);
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
exp_info.flags = O_RDWR;
exp_info.priv = buffer;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
_ion_buffer_destroy(buffer);
return PTR_ERR(dmabuf);
}
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
dma_buf_put(dmabuf);
return fd;
}
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
unsigned long len,
unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
buffer->heap = heap;
buffer->flags = flags;
ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret)
goto err2;
}
if (!buffer->sg_table) {
WARN_ONCE(1, "This heap needs to set the sgtable");
ret = -EINVAL;
goto err1;
}
table = buffer->sg_table;
buffer->dev = dev;
buffer->size = len;
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
return buffer;
err1:
heap->ops->free(buffer);
err2:
kfree(buffer);
return ERR_PTR(ret);
}
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long len,
unsigned long flags)
{
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
struct sg_table *table;
struct page *pages;
unsigned long size = PAGE_ALIGN(len);
unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned long align = get_order(size);
int ret;
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
if (!pages)
return -ENOMEM;
if (PageHighMem(pages)) {
unsigned long nr_clear_pages = nr_pages;
struct page *page = pages;
while (nr_clear_pages > 0) {
void *vaddr = kmap_atomic(page);
memset(vaddr, 0, PAGE_SIZE);
kunmap_atomic(vaddr);
page++;
nr_clear_pages--;
}
} else {
memset(page_address(pages), 0, size);
}
table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
goto err;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret)
goto free_mem;
sg_set_page(table->sgl, pages, size, 0);
buffer->priv_virt = pages;
buffer->sg_table = table;
return 0;
free_mem:
kfree(table);
err:
cma_release(cma_heap->cma, pages, nr_pages);
return -ENOMEM;
}