adreno源码系列(十)私有内存申请

static const struct kgsl_ioctl kgsl_ioctl_funcs[] = {
    ...
    // ioctl命令:IOCTL_KGSL_GPUMEM_ALLOC
    // ioctl函数:kgsl_ioctl_gpumem_alloc
	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
			kgsl_ioctl_gpumem_alloc),
    ...
}

1. kgsl_gpumem_alloc

struct kgsl_gpumem_alloc {
    // 返回值:GPU虚拟地址
	unsigned long gpuaddr; /* output param */
    // 申请的物理内存大小
	__kernel_size_t size;
    // 标志位
	unsigned int flags;
};

// ioctl参数:kgsl_gpumem_alloc
#define IOCTL_KGSL_GPUMEM_ALLOC \
	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)

2. kgsl_ioctl_gpumem_alloc

long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
		unsigned int cmd, void *data)
{
    // ioctl参数
	struct kgsl_gpumem_alloc *param = data;
    
    // kgsl_mem_entry用于描述用户空间的内存分配[见2.1节]
	struct kgsl_mem_entry *entry;
    // 用户空间指定的标志位
	uint64_t flags = param->flags;

	/*
	 * On 64 bit kernel, secure memory region is expanded and
	 * moved to 64 bit address, 32 bit apps can not access it from
	 * this IOCTL.
	 */
	if ((param->flags & KGSL_MEMFLAGS_SECURE) && is_compat_task()
			&& test_bit(KGSL_MMU_64BIT, &device->mmu.features))
		return -EOPNOTSUPP;

	/* Legacy functions doesn't support these advanced features */
	flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);

	if (is_compat_task())
		flags |= KGSL_MEMFLAGS_FORCE_32BIT;

    // 创建kgsl_mem_entry[见2.2节]
	entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size, flags);

	if (IS_ERR(entry))
		return PTR_ERR(entry);

    // 更新参数
	param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
	param->size = (size_t) entry->memdesc.size;
	param->flags = (unsigned int) entry->memdesc.flags;

	/* Put the extra ref from kgsl_mem_entry_create() */
    // 减少引用计数, 如果引用计数减为0则通过kgsl_mem_entry_destroy释放kgsl_mem_entry
	kgsl_mem_entry_put(entry);

	return 0;
}

2.1 kgsl_mem_entry

/*
 * struct kgsl_mem_entry - a userspace memory allocation
 */
struct kgsl_mem_entry {
    // Currently userspace can only hold a single reference count but the kernel may hold more
	struct kref refcount;
    // description of the memory[见2.1.1节]
	struct kgsl_memdesc memdesc;
    // type-specific data, such as the dma-buf attachment pointer
	void *priv_data;
    // rb_node for the gpu address lookup rb tree
	struct rb_node node;
    // idr index for this entry, can be used to find memory that does not have a valid GPU address
	unsigned int id;
    // 持有该内存的进程
	struct kgsl_process_private *priv;
    // if !0, userspace requested that his memory be freed, but there are still references to it
	int pending_free;
    // String containing user specified metadata for the entry
	char metadata[KGSL_GPUOBJ_ALLOC_METADATA_MAX + 1];
    // used to schedule a kgsl_mem_entry_put in atomic contexts
	struct work_struct work;
	/**
	 * @map_count: Count how many vmas this object is mapped in - used for
	 * debugfs accounting
	 */
    // 映射的VMA数量
	atomic_t map_count;
};

2.1.1 kgsl_memdesc

/**
 * struct kgsl_memdesc - GPU memory object descriptor
 */
struct kgsl_memdesc {
    // 此块对象映射的页表
	struct kgsl_pagetable *pagetable;
    // CPU(进程)虚拟地址
	void *hostptr;
    // 使用CPU虚拟地址的线程个数
	unsigned int hostptr_count;
    // GPU虚拟地址
	uint64_t gpuaddr;
    // 该内存对象的物理地址
	phys_addr_t physaddr;
    // 该内存对象的物理内存大小
	uint64_t size;
    // Internal flags and settings
	unsigned int priv;
	struct sg_table *sgt;
    // 操作这块内存的函数[见2.1.2节]
	const struct kgsl_memdesc_ops *ops;
    // 用户空间申请内存时设置的标志位(Flags set from userspace)
	uint64_t flags;
	struct device *dev;
    // dma attributes for this memory
	unsigned long attrs;
    // An array of pointers to allocated pages
    // 申请的物理页面数组
	struct page **pages;
    // Total number of pages allocated
    // 申请的物理页面数量
	unsigned int page_count;
	/*
	 * @lock: Spinlock to protect the gpuaddr from being accessed by
	 * multiple entities trying to map the same SVM region at once
	 */
	spinlock_t lock;
};

2.1.2 kgsl_memdesc_ops

// 具体实现见2.2.5节kgsl_page_ops
struct kgsl_memdesc_ops {
	unsigned int vmflags;
	vm_fault_t (*vmfault)(struct kgsl_memdesc *memdesc,
		struct vm_area_struct *vma, struct vm_fault *vmf);
    // 释放内存
	void (*free)(struct kgsl_memdesc *memdesc);
    // 映射到内核虚拟地址空间
	int (*map_kernel)(struct kgsl_memdesc *memdesc);
    // 解映射
	void (*unmap_kernel)(struct kgsl_memdesc *memdesc);
	/**
	 * @put_gpuaddr: Put away the GPU address and unmap the memory
	 * descriptor
	 */
	void (*put_gpuaddr)(struct kgsl_memdesc *memdesc);
};

2.2 gpumem_alloc_entry

struct kgsl_mem_entry *gpumem_alloc_entry(
		struct kgsl_device_private *dev_priv,
		uint64_t size, uint64_t flags)
{
	int ret;
	struct kgsl_process_private *private = dev_priv->process_priv;
	struct kgsl_mem_entry *entry;
	struct kgsl_mmu *mmu = &dev_priv->device->mmu;
	unsigned int align;

	flags &= KGSL_MEMFLAGS_GPUREADONLY
		| KGSL_CACHEMODE_MASK
		| KGSL_MEMTYPE_MASK
		| KGSL_MEMALIGN_MASK
		| KGSL_MEMFLAGS_USE_CPU_MAP
		| KGSL_MEMFLAGS_SECURE
		| KGSL_MEMFLAGS_FORCE_32BIT
		| KGSL_MEMFLAGS_IOCOHERENT
		| KGSL_MEMFLAGS_GUARD_PAGE;

	/* Return not supported error if secure memory isn't enabled */
	if (!kgsl_mmu_is_secured(mmu) &&
			(flags & KGSL_MEMFLAGS_SECURE)) {
		dev_WARN_ONCE(dev_priv->device->dev, 1,
				"Secure memory not supported");
		return ERR_PTR(-EOPNOTSUPP);
	}

	/* Cap the alignment bits to the highest number we can handle */
	align = MEMFLAGS(flags, KGSL_MEMALIGN_MASK, KGSL_MEMALIGN_SHIFT);
	if (align >= ilog2(KGSL_MAX_ALIGN)) {
		dev_err(dev_priv->device->dev,
			"Alignment too large; restricting to %dK\n",
			KGSL_MAX_ALIGN >> 10);

		flags &= ~((uint64_t) KGSL_MEMALIGN_MASK);
		flags |= (uint64_t)((ilog2(KGSL_MAX_ALIGN) <<
						KGSL_MEMALIGN_SHIFT) &
					KGSL_MEMALIGN_MASK);
	}

	/* For now only allow allocations up to 4G */
	if (size == 0 || size > UINT_MAX)
		return ERR_PTR(-EINVAL);

    // 更新缓存策略
	flags = kgsl_filter_cachemode(flags);

    // 前面主要完成标志位的校验和更新
    // 这里开始创建kgsl_mem_entry[见2.2.1节]
	entry = kgsl_mem_entry_create();
	if (entry == NULL)
		return ERR_PTR(-ENOMEM);

    // 根据标志位判断是否是cached buffer
	if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) &&
		kgsl_cachemode_is_cached(flags))
		flags |= KGSL_MEMFLAGS_IOCOHERENT;

    // 私有内存分配[2.2.2节]
	ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
		size, flags, 0);
	if (ret != 0)
		goto err;

    // 将该内存绑定到kgsl进程[2.2.7节]
	ret = kgsl_mem_entry_attach_process(dev_priv->device, private, entry);
	if (ret != 0) {
		kgsl_sharedmem_free(&entry->memdesc);
		goto err;
	}

	kgsl_process_add_stats(private,
			kgsl_memdesc_usermem_type(&entry->memdesc),
			entry->memdesc.size);
	trace_kgsl_mem_alloc(entry);

    // 将kgsl_mem_entry提交到kgsl_process_private, 以便其他操作也能够访问
	kgsl_mem_entry_commit_process(entry);
	return entry;
err:
	kfree(entry);
	return ERR_PTR(ret);
}

2.2.1 kgsl_mem_entry_create

static struct kgsl_mem_entry *kgsl_mem_entry_create(void)
{
    // 创建kgsl_mem_entry
	struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);

	if (entry != NULL) {
        // 初始化kgsl_mem_entry引用计数为1
		kref_init(&entry->refcount);
		/* put this ref in userspace memory alloc and map ioctls */
        // 引用计数加1
		kref_get(&entry->refcount);
        // 初始化映射的VMA数量为0
		atomic_set(&entry->map_count, 0);
	}

	return entry;
}

2.2.2 kgsl_allocate_user

enum kgsl_mmutype {
    // 支持IOMMU
	KGSL_MMU_TYPE_IOMMU = 0,
	KGSL_MMU_TYPE_NONE
};

int kgsl_allocate_user(struct kgsl_device *device, struct kgsl_memdesc *memdesc,
		u64 size, u64 flags, u32 priv)
{
    // 如果不支持IOMMU, 则需要分配连续内存
	if (device->mmu.type == KGSL_MMU_TYPE_NONE)
		return kgsl_alloc_contiguous(device, memdesc, size, flags,
			priv);
	else if (flags & KGSL_MEMFLAGS_SECURE)
		return kgsl_allocate_secure(device, memdesc, size, flags, priv);

    // 页面分配[见2.2.3节]
	return kgsl_alloc_pages(device, memdesc, size, flags, priv);
}

2.2.3 kgsl_alloc_pages

static int kgsl_alloc_pages(struct kgsl_device *device,
		struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
{
	struct page **pages;
	int count;

    // size大小对齐
	size = PAGE_ALIGN(size);

    // 判断size大小有效性
	if (!size || size > UINT_MAX)
		return -EINVAL;

    // 根据标志位初始化kgsl_memdesc[见2.2.4节]
	kgsl_memdesc_init(device, memdesc, flags);
    // 传入的priv为0
	memdesc->priv |= priv;

    // #define KGSL_MEMDESC_SYSMEM BIT(9)
	if (priv & KGSL_MEMDESC_SYSMEM) {
		memdesc->ops = &kgsl_system_ops;
		count = kgsl_system_alloc_pages(size, &pages, device->dev);
	} else {
        // 设置kgsl_memdesc的kgsl_memdesc_ops实现[2.2.5节]
		memdesc->ops = &kgsl_page_ops;
        // 分配页面并返回分配的page[2.2.6节]
		count = _kgsl_alloc_pages(size, &pages, device->dev);
	}

	if (count < 0)
		return count;

    // 页面数组指针
	memdesc->pages = pages;
    // 内存大小
	memdesc->size = size;
    // 页面数量
	memdesc->page_count = count;

    // 更新全局的kgsl的内存统计: 将申请的内存大小统计进kgsl_driver的stats结构体page_alloc成员
	KGSL_STATS_ADD(size, &kgsl_driver.stats.page_alloc,
		&kgsl_driver.stats.page_alloc_max);

	return 0;
}

2.2.4 kgsl_memdesc_init

void kgsl_memdesc_init(struct kgsl_device *device,
			struct kgsl_memdesc *memdesc, uint64_t flags)
{
	struct kgsl_mmu *mmu = &device->mmu;
	unsigned int align;

    // 初始化kgsl_memdesc
	memset(memdesc, 0, sizeof(*memdesc));
	/* Turn off SVM if the system doesn't support it */
    // 判断是否支持KGSL_MMU_IOPGTABLE
	if (!kgsl_mmu_is_perprocess(mmu))
		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);

	/* Secure memory disables advanced addressing modes */
	if (flags & KGSL_MEMFLAGS_SECURE)
		flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);

	/* Disable IO coherence if it is not supported on the chip */
    // 判断是否支持I/O coherency
	if (!kgsl_mmu_has_feature(device, KGSL_MMU_IO_COHERENT)) {
		flags &= ~((uint64_t) KGSL_MEMFLAGS_IOCOHERENT);

		WARN_ONCE(IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT),
			"I/O coherency is not supported on this target\n");
	} else if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT))
		flags |= KGSL_MEMFLAGS_IOCOHERENT;

	/*
	 * We can't enable I/O coherency on uncached surfaces because of
	 * situations where hardware might snoop the cpu caches which can
	 * have stale data. This happens primarily due to the limitations
	 * of dma caching APIs available on arm64
	 */
	if (!kgsl_cachemode_is_cached(flags))
		flags &= ~((u64) KGSL_MEMFLAGS_IOCOHERENT);

	if (kgsl_mmu_has_feature(device, KGSL_MMU_NEED_GUARD_PAGE) ||
		(flags & KGSL_MEMFLAGS_GUARD_PAGE))
		memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE;

	if (flags & KGSL_MEMFLAGS_SECURE)
		memdesc->priv |= KGSL_MEMDESC_SECURE;

    // 设置标志位
	memdesc->flags = flags;
    // 设置持有该内存的device
	memdesc->dev = &device->pdev->dev;

    // 对齐
	align = max_t(unsigned int,
		kgsl_memdesc_get_align(memdesc), ilog2(PAGE_SIZE));
    // 设置kgsl_memdesc的对齐标志位
	kgsl_memdesc_set_align(memdesc, align);

	spin_lock_init(&memdesc->lock);
}

2.2.5 kgsl_page_ops

static const struct kgsl_memdesc_ops kgsl_page_ops = {
	.free = kgsl_free_pages,
	.vmflags = VM_DONTDUMP | VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP,
	.vmfault = kgsl_paged_vmfault,
	.map_kernel = kgsl_paged_map_kernel,
	.unmap_kernel = kgsl_paged_unmap_kernel,
	.put_gpuaddr = kgsl_unmap_and_put_gpuaddr,
};

2.2.6 _kgsl_alloc_pages

static int _kgsl_alloc_pages(struct kgsl_memdesc *memdesc,
		u64 size, struct page ***pages, struct device *dev)
{
	int count = 0;
    // 将内存大小转换为页面数量
	int npages = size >> PAGE_SHIFT;
    // attempt to allocate physically contiguous memory by kmalloc
    // but upon failure, fall back to non-contiguous (vmalloc) allocation
	struct page **local = kvcalloc(npages, sizeof(*local), GFP_KERNEL);
	u32 page_size, align;
	u64 len = size;

	if (!local)
		return -ENOMEM;

    // 共享内存设置成功或者未配置CONFIG_QCOM_KGSL_USE_SHMEM则返回0[见2.2.6.1节]
	count = kgsl_memdesc_file_setup(memdesc, size);
	if (count) {
		kvfree(local);
		return count;
	}

	/* Start with 1MB alignment to get the biggest page we can */
	align = ilog2(SZ_1M);

    // 根据内存大小计算页面大小
	page_size = kgsl_get_page_size(len, align);

	while (len) {
        // 调用kgsl_pool_alloc_page分配, 并将获取的page通过local数组返回
		int ret = kgsl_alloc_page(&page_size, &local[count],
			npages, &align, count, memdesc->shmem_filp, dev);

		if (ret == -EAGAIN)
			continue;
		else if (ret <= 0) {
			int i;

			for (i = 0; i < count; ) {
				int n = 1 << compound_order(local[i]);

				kgsl_free_page(local[i]);
				i += n;
			}
			kvfree(local);

			if (!kgsl_sharedmem_noretry_flag)
				pr_err_ratelimited("kgsl: out of memory: only allocated %lldKb of %lldKb requested\n",
					(size - len) >> 10, size >> 10);

			if (memdesc->shmem_filp)
				fput(memdesc->shmem_filp);

			return -ENOMEM;
		}

		count += ret;
		npages -= ret;
		len -= page_size;

		page_size = kgsl_get_page_size(len, align);
	}

    // pages作为返回值
	*pages = local;

	return count;
}

2.2.6.1 kgsl_memdesc_file_setup
// 配置kgsl使用共享内存
#ifdef CONFIG_QCOM_KGSL_USE_SHMEM
static int kgsl_memdesc_file_setup(struct kgsl_memdesc *memdesc, uint64_t size)
{
	int ret;

    // 在共享内存目录下挂载一个kgsl-3d0的目录, 共享size大小的内存
	memdesc->shmem_filp = shmem_file_setup("kgsl-3d0", size,
			VM_NORESERVE);
	if (IS_ERR(memdesc->shmem_filp)) {
		ret = PTR_ERR(memdesc->shmem_filp);
		pr_err("kgsl: unable to setup shmem file err %d\n",
				ret);
		memdesc->shmem_filp = NULL;
		return ret;
	}

	return 0;
}
#else
static int kgsl_memdesc_file_setup(struct kgsl_memdesc *memdesc, uint64_t size)
{
	return 0;
}
#endif

2.2.7 kgsl_mem_entry_attach_to_process

/*
 * Attach the memory object to a process by (possibly) getting a GPU address and
 * (possibly) mapping it
 */
static int kgsl_mem_entry_attach_process(struct kgsl_device *device,
		struct kgsl_process_private *process,
		struct kgsl_mem_entry *entry)
{
	int id, ret;

    // kgsl_process_private引用计数加1
	ret = kgsl_process_private_get(process);
	if (!ret)
		return -EBADF;

    // [见2.2.7.1节]
	ret = kgsl_mem_entry_track_gpuaddr(device, process, entry);
	if (ret) {
		kgsl_process_private_put(process);
		return ret;
	}

	idr_preload(GFP_KERNEL);
	spin_lock(&process->mem_lock);
	/* Allocate the ID but don't attach the pointer just yet */
    // 为kgsl_mem_entry分配id
	id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
	spin_unlock(&process->mem_lock);
	idr_preload_end();

	if (id < 0) {
		if (!kgsl_memdesc_use_cpu_map(&entry->memdesc))
			kgsl_mmu_put_gpuaddr(&entry->memdesc);
		kgsl_process_private_put(process);
		return id;
	}

	entry->id = id;
	entry->priv = process;

	/*
	 * Map the memory if a GPU address is already assigned, either through
	 * kgsl_mem_entry_track_gpuaddr() or via some other SVM process
	 */
    // GPU虚拟地址分配成功
	if (entry->memdesc.gpuaddr) {
        // [见2.2.7.6节]
		ret = kgsl_mmu_map(entry->memdesc.pagetable,
				&entry->memdesc);

		if (ret)
			kgsl_mem_entry_detach_process(entry);
	}

	kgsl_memfree_purge(entry->memdesc.pagetable, entry->memdesc.gpuaddr,
		entry->memdesc.size);

	return ret;
}

2.2.7.1 kgsl_mem_entry_track_gpuaddr
/* Allocate a IOVA for memory objects that don't use SVM */
static int kgsl_mem_entry_track_gpuaddr(struct kgsl_device *device,
		struct kgsl_process_private *process,
		struct kgsl_mem_entry *entry)
{
	struct kgsl_pagetable *pagetable;

	/*
	 * If SVM is enabled for this object then the address needs to be
	 * assigned elsewhere
	 * Also do not proceed further in case of NoMMU.
	 */
    // 不支持IOMMU则直接返回
	if (kgsl_memdesc_use_cpu_map(&entry->memdesc) ||
		(kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
		return 0;

    // 使用kgsl进程页表
	pagetable = kgsl_memdesc_is_secured(&entry->memdesc) ?
		device->mmu.securepagetable : process->pagetable;

    // 获取GPU虚拟地址[见2.2.7.2节]
	return kgsl_mmu_get_gpuaddr(pagetable, &entry->memdesc);
}

2.2.7.2 kgsl_mmu_get_gpuaddr
#define PT_OP_VALID(_pt, _field) \
	(((_pt) != NULL) && \
	 ((_pt)->pt_ops != NULL) && \
	 ((_pt)->pt_ops->_field != NULL))

/**
 * kgsl_mmu_get_gpuaddr - Assign a GPU address to the memdesc
 * @pagetable: GPU pagetable to assign the address in
 * @memdesc: mem descriptor to assign the memory to
 *
 * Return: 0 on success or negative on failure
 */
static inline int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
		 struct kgsl_memdesc *memdesc)
{
    // 调用iommu_pt_ops中定义的kgsl_iommu_get_gpuaddr分配GPU虚拟地址[2.2.7.5节]
	if (PT_OP_VALID(pagetable, get_gpuaddr))
		return pagetable->pt_ops->get_gpuaddr(pagetable, memdesc);

	return -ENOMEM;
}

2.2.7.3 kgsl_iommu_get_gpuaddr
static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
		struct kgsl_memdesc *memdesc)
{
	struct kgsl_iommu_pt *pt = pagetable->priv;
	int ret = 0;
	uint64_t addr, start, end, size;
	unsigned int align;

	if (WARN_ON(kgsl_memdesc_use_cpu_map(memdesc)))
		return -EINVAL;

	if (memdesc->flags & KGSL_MEMFLAGS_SECURE &&
			pagetable->name != KGSL_MMU_SECURE_PT)
		return -EINVAL;

    // 获取映射区域(kgsl_memdesc)的大小
	size = kgsl_memdesc_footprint(memdesc);

	align = max_t(uint64_t, 1 << kgsl_memdesc_get_align(memdesc),
			PAGE_SIZE);

	if (memdesc->flags & KGSL_MEMFLAGS_FORCE_32BIT) {
		start = pagetable->compat_va_start;
		end = pagetable->compat_va_end;
	} else {
        // 页表的起始虚拟地址
		start = pt->va_start;
        // 页表的结束虚拟地址
		end = pt->va_end;
	}

	spin_lock(&pagetable->lock);
    // 获取一块未映射的虚拟地址[2.2.7.4节]
	addr = _get_unmapped_area(pagetable, start, end, size, align);

	if (addr == (uint64_t) -ENOMEM) {
		ret = -ENOMEM;
		goto out;
	}

	/*
	 * This path is only called in a non-SVM path with locks so we can be
	 * sure we aren't racing with anybody so we don't need to worry about
	 * taking the lock
	 */
    // 将该虚拟地址插入页表[2.2.7.5节]
	ret = _insert_gpuaddr(pagetable, addr, size);
	if (ret == 0) {
        // 设置GPU虚拟地址
		memdesc->gpuaddr = addr;
        // 设置页表
		memdesc->pagetable = pagetable;
	}

out:
	spin_unlock(&pagetable->lock);
	return ret;
}

2.2.7.4 _get_unmapped_area
/*
 * struct kgsl_iommu_addr_entry - entry in the kgsl_pagetable rbtree.
 * @base: starting virtual address of the entry
 * @size: size of the entry
 * @node: the rbtree node
 */
struct kgsl_iommu_addr_entry {
    // 起始虚拟地址
	uint64_t base;
	uint64_t size;
	struct rb_node node;
};

static uint64_t _get_unmapped_area(struct kgsl_pagetable *pagetable,
		uint64_t bottom, uint64_t top, uint64_t size,
		uint64_t align)
{
    // 页表radix tree头节点
	struct rb_node *node = rb_first(&pagetable->rbtree);
	uint64_t start;

	bottom = ALIGN(bottom, align);
	start = bottom;

	while (node != NULL) {
		uint64_t gap;
        // 查找rb_node的容器即kgsl_iommu_addr_entry
		struct kgsl_iommu_addr_entry *entry = rb_entry(node,
			struct kgsl_iommu_addr_entry, node);

		/*
		 * Skip any entries that are outside of the range, but make sure
		 * to account for some that might straddle the lower bound
		 */
		if (entry->base < bottom) {
			if (entry->base + entry->size > bottom)
				start = ALIGN(entry->base + entry->size, align);
			node = rb_next(node);
			continue;
		}

		/* Stop if we went over the top */
		if (entry->base >= top)
			break;

		/* Make sure there is a gap to consider */
		if (start < entry->base) {
			gap = entry->base - start;

			if (gap >= size)
				return start;
		}

		/* Stop if there is no more room in the region */
		if (entry->base + entry->size >= top)
			return (uint64_t) -ENOMEM;

		/* Start the next cycle at the end of the current entry */
		start = ALIGN(entry->base + entry->size, align);
		node = rb_next(node);
	}

    // 返回起始虚拟地址
	if (start + size <= top)
		return start;

	return (uint64_t) -ENOMEM;
}

2.2.7.5 _insert_gpuaddr
static int _insert_gpuaddr(struct kgsl_pagetable *pagetable,
		uint64_t gpuaddr, uint64_t size)
{
	struct rb_node **node, *parent = NULL;
    // 创建kgsl_iommu_addr_entry
	struct kgsl_iommu_addr_entry *new =
		kmem_cache_alloc(addr_entry_cache, GFP_ATOMIC);

	if (new == NULL)
		return -ENOMEM;

    // 设置kgsl_iommu_addr_entry起始虚拟地址
	new->base = gpuaddr;
    // 设置kgsl_iommu_addr_entry大小
	new->size = size;

    // 页表基数树头节点
	node = &pagetable->rbtree.rb_node;

	while (*node != NULL) {
		struct kgsl_iommu_addr_entry *this;

		parent = *node;
		this = rb_entry(parent, struct kgsl_iommu_addr_entry, node);

		if (new->base < this->base)
			node = &parent->rb_left;
		else if (new->base > this->base)
			node = &parent->rb_right;
		else {
			/* Duplicate entry */
			WARN(1, "duplicate gpuaddr: 0x%llx\n", gpuaddr);
			kmem_cache_free(addr_entry_cache, new);
			return -EEXIST;
		}
	}

    // 将rb_node插入页表的基数树
	rb_link_node(&new->node, parent, node);
	rb_insert_color(&new->node, &pagetable->rbtree);

	return 0;
}

2.2.7.6 kgsl_mmu_map
int
kgsl_mmu_map(struct kgsl_pagetable *pagetable,
				struct kgsl_memdesc *memdesc)
{
	int size;
	struct kgsl_device *device = KGSL_MMU_DEVICE(pagetable->mmu);

	if (!memdesc->gpuaddr)
		return -EINVAL;
	/* Only global mappings should be mapped multiple times */
    // KGSL_MEMDESC_MAPPED标志位用于判断kgsl_memdesc是否被映射:只有全局共享内存才能映射多次
	if (!kgsl_memdesc_is_global(memdesc) &&
			(KGSL_MEMDESC_MAPPED & memdesc->priv))
		return -EINVAL;

	size = kgsl_memdesc_footprint(memdesc);

	if (PT_OP_VALID(pagetable, mmu_map)) {
		int ret;

        // 调用iommu_pt_ops中定义的kgsl_iommu_map[见2.2.7.7节]
		ret = pagetable->pt_ops->mmu_map(pagetable, memdesc);
		if (ret)
			return ret;

		atomic_inc(&pagetable->stats.entries);
        // 内存统计
		KGSL_STATS_ADD(size, &pagetable->stats.mapped,
				&pagetable->stats.max_mapped);
		kgsl_mmu_trace_gpu_mem_pagetable(pagetable);

		if (!kgsl_memdesc_is_global(memdesc)
				&& !(memdesc->flags & KGSL_MEMFLAGS_USERMEM_ION)) {
			kgsl_trace_gpu_mem_total(device, size);
		}

        // 标记此块内存已经被映射
		memdesc->priv |= KGSL_MEMDESC_MAPPED;
	}

	return 0;
}

2.2.7.6 kgsl_iommu_map
static int
kgsl_iommu_map(struct kgsl_pagetable *pt,
			struct kgsl_memdesc *memdesc)
{
	int ret;
	uint64_t addr = memdesc->gpuaddr;
	uint64_t size = memdesc->size;
	unsigned int flags = _get_protection_flags(pt, memdesc);
	struct sg_table *sgt = NULL;

	/*
	 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
	 * Allocate sgt here just for its map operation. Contiguous memory
	 * already has its sgt, so no need to allocate it here.
	 */
	if (memdesc->pages != NULL)
		sgt = kgsl_alloc_sgt_from_pages(memdesc);
	else
		sgt = memdesc->sgt;

	if (IS_ERR(sgt))
		return PTR_ERR(sgt);

	ret = _iommu_map_sg(pt, addr, sgt->sgl, sgt->nents, flags);
	if (ret)
		goto done;

	ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
	if (ret)
		_iommu_unmap(pt, addr, size);

done:
	if (memdesc->pages != NULL)
		kgsl_free_sgt(sgt);

	return ret;
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值