dma_alloc_wc

static inline void *dma_alloc_wc(struct device *dev, size_t size,
                 dma_addr_t *dma_addr, gfp_t gfp)
{
    unsigned long attrs = DMA_ATTR_WRITE_COMBINE;

    if (gfp & __GFP_NOWARN)
        attrs |= DMA_ATTR_NO_WARN;

    return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}

void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
        gfp_t flag, unsigned long attrs)
{
    const struct dma_map_ops *ops = get_dma_ops(dev);
    void *cpu_addr;

    WARN_ON_ONCE(!dev->coherent_dma_mask);

    if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
        return cpu_addr;

    /* let the implementation decide on the zone to allocate from: */
    flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

    if (dma_is_direct(ops))
        cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
    else if (ops->alloc)
        cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
    else
        return NULL;

    debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
    return cpu_addr;
}

void *dma_direct_alloc(struct device *dev, size_t size,
        dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
    if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
        dma_alloc_need_uncached(dev, attrs))
        return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
    return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
}

void *dma_direct_alloc_pages(struct device *dev, size_t size,
        dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
    struct page *page;
    void *ret;

    page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
    if (!page)
        return NULL;

    if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
        !force_dma_unencrypted(dev)) {
        /* remove any dirty cache lines on the kernel alias */
        if (!PageHighMem(page))
            arch_dma_prep_coherent(page, size);
        *dma_handle = phys_to_dma(dev, page_to_phys(page));
        /* return the page pointer as the opaque cookie */
        return page;
    }

    if (PageHighMem(page)) {
        /*
         * Depending on the cma= arguments and per-arch setup
         * dma_alloc_contiguous could return highmem pages.
         * Without remapping there is no way to return them here,
         * so log an error and fail.
         */
        dev_info(dev, "Rejecting highmem page from CMA.\n");
        __dma_direct_free_pages(dev, size, page);
        return NULL;
    }

    ret = page_address(page);
    if (force_dma_unencrypted(dev)) {
        set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
        *dma_handle = __phys_to_dma(dev, page_to_phys(page));
    } else {
        *dma_handle = phys_to_dma(dev, page_to_phys(page));
    }
    memset(ret, 0, size);

    if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
        dma_alloc_need_uncached(dev, attrs)) {
        arch_dma_prep_coherent(page, size);
        ret = uncached_kernel_address(ret);
    }

    return ret;
}

struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
        dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
    size_t alloc_size = PAGE_ALIGN(size);
    int node = dev_to_node(dev);
    struct page *page = NULL;
    u64 phys_mask;

    if (attrs & DMA_ATTR_NO_WARN)
        gfp |= __GFP_NOWARN;

    /* we always manually zero the memory once we are done: */
    gfp &= ~__GFP_ZERO;
    gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
            &phys_mask);
    page = dma_alloc_contiguous(dev, alloc_size, gfp);
    if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
        dma_free_contiguous(dev, page, alloc_size);
        page = NULL;
    }
again:
    if (!page)
        page = alloc_pages_node(node, gfp, get_order(alloc_size));
    if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
        dma_free_contiguous(dev, page, size);
        page = NULL;

        if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
            phys_mask < DMA_BIT_MASK(64) &&
            !(gfp & (GFP_DMA32 | GFP_DMA))) {
            gfp |= GFP_DMA32;
            goto again;
        }

        if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
            gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
            goto again;
        }
    }

    return page;
}

/**
 * dma_alloc_contiguous() - allocate contiguous pages
 * @dev:   Pointer to device for which the allocation is performed.
 * @size:  Requested allocation size.
 * @gfp:   Allocation flags.
 *
 * This function allocates contiguous memory buffer for specified device. It
 * first tries to use device specific contiguous memory area if available or
 * the default global one, then tries a fallback allocation of normal pages.
 *
 * Note that it byapss one-page size of allocations from the global area as
 * the addresses within one page are always contiguous, so there is no need
 * to waste CMA pages for that kind; it also helps reduce fragmentations.
 */
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
{
    size_t count = size >> PAGE_SHIFT;
    struct page *page = NULL;
    struct cma *cma = NULL;

    if (dev && dev->cma_area)
        cma = dev->cma_area;
    else if (count > 1)
        cma = dma_contiguous_default_area;

    /* CMA can be used only in the context which permits sleeping */
    if (cma && gfpflags_allow_blocking(gfp)) {
        size_t align = get_order(size);
        size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);

        page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
    }

    return page;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值