dma_buf_fops

static const struct file_operations dma_buf_fops = {
    .release    = dma_buf_file_release,
    .mmap        = dma_buf_mmap_internal,
    .llseek        = dma_buf_llseek,
    .poll        = dma_buf_poll,
    .unlocked_ioctl    = dma_buf_ioctl,
#ifdef CONFIG_COMPAT
    .compat_ioctl    = dma_buf_ioctl,
#endif
    .show_fdinfo    = dma_buf_show_fdinfo,
};

static long dma_buf_ioctl(struct file *file,
              unsigned int cmd, unsigned long arg)
{
    struct dma_buf *dmabuf;
    struct dma_buf_sync sync;
    enum dma_data_direction direction;
    int ret;

    dmabuf = file->private_data;

    switch (cmd) {
    case DMA_BUF_IOCTL_SYNC:
        if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
            return -EFAULT;

        if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
            return -EINVAL;

        switch (sync.flags & DMA_BUF_SYNC_RW) {
        case DMA_BUF_SYNC_READ:
            direction = DMA_FROM_DEVICE;
            break;
        case DMA_BUF_SYNC_WRITE:
            direction = DMA_TO_DEVICE;
            break;
        case DMA_BUF_SYNC_RW:
            direction = DMA_BIDIRECTIONAL;
            break;
        default:
            return -EINVAL;
        }

        if (sync.flags & DMA_BUF_SYNC_END)
            ret = dma_buf_end_cpu_access(dmabuf, direction);
        else
            ret = dma_buf_begin_cpu_access(dmabuf, direction);

        return ret;

    case DMA_BUF_SET_NAME_A:
    case DMA_BUF_SET_NAME_B:
        return dma_buf_set_name(dmabuf, (const char __user *)arg);

    default:
        return -ENOTTY;
    }
}

/**
 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
 * preparations. Coherency is only guaranteed in the specified range for the
 * specified access direction.
 * @dmabuf:    [in]    buffer to prepare cpu access for.
 * @direction:    [in]    length of range for cpu access.
 *
 * After the cpu access is complete the caller should call
 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
 * it guaranteed to be coherent with other DMA access.
 *
 * Can return negative error values, returns 0 on success.
 */
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                 enum dma_data_direction direction)
{
    int ret = 0;

    if (WARN_ON(!dmabuf))
        return -EINVAL;

    if (dmabuf->ops->begin_cpu_access)
        ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);

    /* Ensure that all fences are waited upon - but we first allow
     * the native handler the chance to do so more efficiently if it
     * chooses. A double invocation here will be reasonably cheap no-op.
     */
    if (ret == 0)
        ret = __dma_buf_begin_cpu_access(dmabuf, direction);

    return ret;
}

static const struct dma_buf_ops dma_buf_ops = {
    .map_dma_buf = ion_map_dma_buf,
    .unmap_dma_buf = ion_unmap_dma_buf,
    .mmap = ion_mmap,
    .release = ion_dma_buf_release,
    .attach = ion_dma_buf_attach,
    .detach = ion_dma_buf_detatch,
    .begin_cpu_access = ion_dma_buf_begin_cpu_access,
    .end_cpu_access = ion_dma_buf_end_cpu_access,
    .map = ion_dma_buf_kmap,
    .unmap = ion_dma_buf_kunmap,
};

static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                    enum dma_data_direction direction)
{
    struct ion_buffer *buffer = dmabuf->priv;
    void *vaddr;
    struct ion_dma_buf_attachment *a;
    int ret = 0;

    /*
     * TODO: Move this elsewhere because we don't always need a vaddr
     */
    if (buffer->heap->ops->map_kernel) {
        mutex_lock(&buffer->lock);
        vaddr = ion_buffer_kmap_get(buffer);
        if (IS_ERR(vaddr)) {
            ret = PTR_ERR(vaddr);
            goto unlock;
        }
        mutex_unlock(&buffer->lock);
    }

    mutex_lock(&buffer->lock);
    list_for_each_entry(a, &buffer->attachments, list) {
        dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
                    direction);
    }

unlock:
    mutex_unlock(&buffer->lock);
    return ret;
}

static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
            int nelems, enum dma_data_direction dir)
{
    const struct dma_map_ops *ops = get_dma_ops(dev);

    BUG_ON(!valid_dma_direction(dir));
    if (dma_is_direct(ops))
        dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
    else if (ops->sync_sg_for_cpu)
        ops->sync_sg_for_cpu(dev, sg, nelems, dir);
    debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}

const struct dma_map_ops arm_dma_ops = {
    .alloc            = arm_dma_alloc,
    .free            = arm_dma_free,
    .mmap            = arm_dma_mmap,
    .get_sgtable        = arm_dma_get_sgtable,
    .map_page        = arm_dma_map_page,
    .unmap_page        = arm_dma_unmap_page,
    .map_sg            = arm_dma_map_sg,
    .unmap_sg        = arm_dma_unmap_sg,
    .map_resource        = dma_direct_map_resource,
    .sync_single_for_cpu    = arm_dma_sync_single_for_cpu,
    .sync_single_for_device    = arm_dma_sync_single_for_device,
    .sync_sg_for_cpu    = arm_dma_sync_sg_for_cpu,
    .sync_sg_for_device    = arm_dma_sync_sg_for_device,
    .dma_supported        = arm_dma_supported,
    .get_required_mask    = dma_direct_get_required_mask,

static void arm_dma_sync_single_for_cpu(struct device *dev,
        dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
    unsigned int offset = handle & (PAGE_SIZE - 1);
    struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
    __dma_page_dev_to_cpu(page, offset, size, dir);
}

static void arm_dma_sync_single_for_device(struct device *dev,
        dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
    unsigned int offset = handle & (PAGE_SIZE - 1);
    struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
    __dma_page_cpu_to_dev(page, offset, size, dir);
}

static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
    size_t size, enum dma_data_direction dir)
{
    phys_addr_t paddr = page_to_phys(page) + off;

    /* FIXME: non-speculating: not required */
    /* in any case, don't bother invalidating if DMA to device */
    if (dir != DMA_TO_DEVICE) {
        outer_inv_range(paddr, paddr + size);

        dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
    }

    /*
     * Mark the D-cache clean for these pages to avoid extra flushing.
     */
    if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
        unsigned long pfn;
        size_t left = size;

        pfn = page_to_pfn(page) + off / PAGE_SIZE;
        off %= PAGE_SIZE;
        if (off) {
            pfn++;
            left -= PAGE_SIZE - off;
        }
        while (left >= PAGE_SIZE) {
            page = pfn_to_page(pfn++);
            set_bit(PG_dcache_clean, &page->flags);
            left -= PAGE_SIZE;
        }
    }
}

/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
    size_t size, enum dma_data_direction dir)
{
    phys_addr_t paddr;

    dma_cache_maint_page(page, off, size, dir, dmac_map_area);

    paddr = page_to_phys(page) + off;
    if (dir == DMA_FROM_DEVICE) {
        outer_inv_range(paddr, paddr + size);
    } else {
        outer_clean_range(paddr, paddr + size);
    }
    /* FIXME: non-speculating: flush on bidirectional mappings? */
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值