dpdk版本:dpdk-stable-16.11.11
今天我们来看一下rte_eal_hugepage_init() 函数都干了哪些事。
1、计算大页总数
在调用rte_eal_hugepage_init() 的时候,大页的信息已经被填充在了internal_config.num_hugepage_sizes 和 internal_config.hugepage_info中。num_hugepage_sizes中保存的是系统中大页的尺寸个数,比如系统支持2M和1G的大页,那么该字段就是2;hugepage_info 是类型为 struct hugepage_info 数组,存储的是每种大页尺寸的具体信息,包括大页的尺寸,挂载点,每个socket上配置的个数等,具体如下:
struct hugepage_info
{
uint64_t hugepage_size; // 大页的尺寸
const char *hugedir; // 挂载点
uint32_t num_pages[RTE_MAX_NUMA_NODES]; // 每个socket上的配置的大页数
int lock_descriptor; //
}
需要注意的是系统配置的大页总数被保存在num_pages[0]中。所以想要计算大页总数就相当简单了,一个for循环搞定。
int i;
int nr_hugepages = 0;
for (i = 0; i < internal_config.num_hugepage_sizes; i++)
{
nr_hugepages += internal_config.num_pages[0];
}
2、映射 nr_hugepages 个大页
这一步会分为好几个步骤,1)first mmap,这次映射完成后,是虚拟地址连续的内存,物理地址并不连续;2)根据虚拟地址找对应的物理地址;3)根据虚拟地址找对应的socket_id;4)根据物理地址排序;5)remmap,目的是将物理地址连续的内存映射在一起,从而达到部分页面虚拟地址和物理地址都连续。
首先介绍 struct hugepage_file 结构
struct hugepage_file {
void *orig_va; /**< virtual addr of first mmap() */
void *final_va; /**< virtual addr of 2nd mmap() */
uint64_t physaddr; /**< physical addr */
size_t size; /**< the page size */
int socket_id; /**< NUMA socket ID */
int file_id; /**< the '%d' in HUGEFILE_FMT */
int memseg_id; /**< the memory segment to which page belongs */
char filepath[MAX_HUGEPAGE_PATH]; /**< path to backing file on filesystem */
};
结构体本身的注释已经很清晰了,不再做太多介绍。
1)第一次映射,需要保证虚拟地址是连续的。
static unsigned
map_all_hugepages(struct hugepage_file *hugepg_tbl,
struct hugepage_info *hpi, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
eal_get_hugefile_path(hugepg_tbl[i].filepath,
sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
hugepg_tbl[i].file_id);
hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
}
/* try to create hugepage file */
fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0600);
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
return i;
}
/* map the segment, and populate page tables,
* the kernel fills this segment with zeros */
virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0);
if (virtaddr == MAP_FAILED) {
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
return i;
}
hugepg_tbl[i].orig_va = virtaddr;
*(int *)virtaddr = 0;
/* set shared flock on the file. */
if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
return i;
}
close(fd);
vma_addr = (char *)vma_addr + hugepage_sz;
vma_len -= hugepage_sz;
}
}
怎么样,是不是很简单?我叨叨两句,遍历每种大页尺寸,进行mmap,只是在mmap的时候指定了映射的起始地址,刚开始0x00,每成功映射一次,就+= hugepage_sz,然后继续进行映射,这样做是为了保证虚拟地址连续。
2)找物理内存(固定代码)
/*
* Get physical address of any mapped virtual address in the current process.
*/
phys_addr_t
rte_mem_virt2phy(const void *virtaddr)
{
int fd, retval;
uint64_t page, physaddr;
unsigned long virt_pfn;
int page_size;
off_t offset;
/* standard page size */
page_size = getpagesize();
fd = open("/proc/self/pagemap", O_RDONLY);
if (fd < 0) {
RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
__func__, strerror(errno));
return RTE_BAD_PHYS_ADDR;
}
virt_pfn = (unsigned long)virtaddr / page_size;
offset = sizeof(uint64_t) * virt_pfn;
if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
__func__, strerror(errno));
close(fd);
return RTE_BAD_PHYS_ADDR;
}
retval = read(fd, &page, PFN_MASK_SIZE);
close(fd);
if (retval < 0) {
RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
__func__, strerror(errno));
return RTE_BAD_PHYS_ADDR;
} else if (retval != PFN_MASK_SIZE) {
RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
"but expected %d:\n",
__func__, retval, PFN_MASK_SIZE);
return RTE_BAD_PHYS_ADDR;
}
/*
* the pfn (page frame number) are bits 0-54 (see
* pagemap.txt in linux Documentation)
*/
physaddr = ((page & 0x7fffffffffffffULL) * page_size)
+ ((unsigned long)virtaddr % page_size);
return physaddr;
}
3)找socket_id
/*
* Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
* page.
*/
static int
find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
{
int socket_id;
char *end, *nodestr;
unsigned i, hp_count = 0;
uint64_t virt_addr;
char buf[BUFSIZ];
char hugedir_str[PATH_MAX];
FILE *f;
f = fopen("/proc/self/numa_maps", "r");
if (f == NULL) {
RTE_LOG(NOTICE, EAL, "cannot open /proc/self/numa_maps,"
" consider that all memory is in socket_id 0\n");
return 0;
}
snprintf(hugedir_str, sizeof(hugedir_str),
"%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
/* parse numa map */
while (fgets(buf, sizeof(buf), f) != NULL) {
/* ignore non huge page */
if (strstr(buf, " huge ") == NULL &&
strstr(buf, hugedir_str) == NULL)
continue;
/* get zone addr */
virt_addr = strtoull(buf, &end, 16);
if (virt_addr == 0 || end == buf) {
RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
goto error;
}
/* get node id (socket id) */
nodestr = strstr(buf, " N");
if (nodestr == NULL) {
RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
goto error;
}
nodestr += 2;
end = strstr(nodestr, "=");
if (end == NULL) {
RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
goto error;
}
end[0] = '\0';
end = NULL;
socket_id = strtoul(nodestr, &end, 0);
if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
goto error;
}
/* if we find this page in our mappings, set socket_id */
for (i = 0; i < hpi->num_pages[0]; i++) {
void *va = (void *)(unsigned long)virt_addr;
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
}
}
}
if (hp_count < hpi->num_pages[0])
goto error;
fclose(f);
return 0;
error:
fclose(f);
return -1;
}
4)根据物理内存排序(升序)
5)重新映射
重新映射的目的是将第一个映射的物理地址连续的大页,进行重新映射,保证物理地址和虚拟地址都连续。
先看get_virtual_area()函数,目的是尽量找到一块大小为size的共享内存,如果找不到就将 size -= hugepage_size,直到size = 0。
/*
* Try to mmap *size bytes in /dev/zero. If it is successful, return the
* pointer to the mmap'd area and keep *size unmodified. Else, retry
* with a smaller zone: decrease *size by hugepage_sz until it reaches
* 0. In this case, return NULL. Note: this function returns an address
* which is a multiple of hugepage size.
*/
static void *
get_virtual_area(size_t *size, size_t hugepage_sz)
{
void *addr;
int fd;
long aligned_addr;
if (internal_config.base_virtaddr != 0) {
addr = (void*) (uintptr_t) (internal_config.base_virtaddr +
baseaddr_offset);
}
else addr = NULL;
RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
fd = open("/dev/zero", O_RDONLY);
if (fd < 0){
RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
return NULL;
}
do {
addr = mmap(addr,
(*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0);
if (addr == MAP_FAILED)
*size -= hugepage_sz;
} while (addr == MAP_FAILED && *size > 0);
if (addr == MAP_FAILED) {
close(fd);
RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
strerror(errno));
return NULL;
}
munmap(addr, (*size) + hugepage_sz);
close(fd);
/* align addr to a huge page size boundary */
aligned_addr = (long)addr;
aligned_addr += (hugepage_sz - 1);
aligned_addr &= (~(hugepage_sz - 1));
addr = (void *)(aligned_addr);
RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
addr, *size);
/* increment offset */
baseaddr_offset += *size;
return addr;
}
再看 map_all_hugepages()函数
static unsigned
map_all_hugepages(struct hugepage_file *hugepg_tbl,
struct hugepage_info *hpi, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
if (vma_len == 0) {
unsigned j, num_pages;
/* reserve a virtual area for next contiguous
* physical block: count the number of
* contiguous physical pages. */
for (j = i+1; j < hpi->num_pages[0] ; j++) {
if (hugepg_tbl[j].physaddr !=
hugepg_tbl[j-1].physaddr + hugepage_sz)
break;
}
num_pages = j - i;
vma_len = num_pages * hugepage_sz;
/* get the biggest virtual memory area up to
* vma_len. If it fails, vma_addr is NULL, so
* let the kernel provide the address. */
vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
if (vma_addr == NULL)
vma_len = hugepage_sz;
}
/* try to create hugepage file */
fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0600);
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
return i;
}
/* map the segment, and populate page tables,
* the kernel fills this segment with zeros */
virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0);
if (virtaddr == MAP_FAILED) {
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
return i;
}
hugepg_tbl[i].final_va = virtaddr;
/* set shared flock on the file. */
if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
return i;
}
close(fd);
vma_addr = (char *)vma_addr + hugepage_sz;
vma_len -= hugepage_sz;
}
}
经过物理地址排序之后,这一次的映射首先会找到物理地址连续的最大的映射内存大小,找到该内存大小之后,在通过get_virtual_area()函数找该尺寸的内存地址,找到之后在进行一页一页的映射,最终达到的效果就是虚拟地址是虚拟地址连续的页面物理地址也是连续的。比如页面1和页面2是虚拟地址和物理地址都连续,页面3和页面4是虚拟地址和物理地址连续,但是 页面2 和页面3 虚拟地址和物理地址都不连续。
3、获取每个socket上大页的个数
之前说过,在调用函数之前,所有的大页个数都被保存在了num_pages[0] 中,此处的0表示的是socket 0。这里我们需要找到每个socket上的大页个数。
for (i = 0; i < nr_hugepages; i++)
{
socket_id = tmp_hp[i].socket_id;
for (j = 0; j < internal_config.num_hugepage_sizes; j++)
{
if (tmp_hp[i].size== internal_config.hugepage_info[j].hugepage_size)
{
internal_config.hugepage_info[j].num_pages[socket_id]++;
}
}
}
直接两层循环搞定,外层循环遍历所有映射的大页,内层循环遍历大页尺寸,将hugepage_size 相等的按照socket_id 累加。(会写代码和能清楚的表达出来是两回事,我就假定你们能看懂代码吧)
4、计算最终需要的大页个数
calc_num_pages_per_socket(),这个函数困扰了我很久,过程比较艰辛,我就直接上结论吧,有兴趣的可以参考我的另一篇(当然写的也是比较low,达到目的就行了)dpdk内存管理之calc_num_pages_per_socket()函数分析_趁着d年轻的博客-CSDN博客
1、请求内存比较大,实际的大页内存不够。这种情况下,会返回-1;
2、请求的内存比较小,实际的大页内存足够多,分三种:
请求1G,那么就分配一个1G的大页;
请求1000M,分配1G会有点多,看2M尺寸的大页够不够,如果够就分配2M的大页尺寸500个;如果不够,那就只能分配一个1G的大页;
5、释放不需要的大页内存
一般情况下,我们启动程序的时候都不会指定--socket-mem 参数(反正我们的程序不会指定),所以在这里可能不需要释放,但是如果指定了--socket-mem 参数,就会出现请求的内存比较少,而实际映射比较多的情况,还是直接上代码吧。
static int
unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
struct hugepage_info *hpi,
unsigned num_hp_info)
{
unsigned socket, size;
int page, nrpages = 0;
/* get total number of hugepages */
for (size = 0; size < num_hp_info; size++)
for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
nrpages += internal_config.hugepage_info[size].num_pages[socket];
for (size = 0; size < num_hp_info; size++) {
for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
unsigned pages_found = 0;
/* traverse until we have unmapped all the unused pages */
for (page = 0; page < nrpages; page++) {
struct hugepage_file *hp = &hugepg_tbl[page];
/* find a page that matches the criteria */
if ((hp->size == hpi[size].hugepage_sz) &&
(hp->socket_id == (int) socket)) {
/* if we skipped enough pages, unmap the rest */
if (pages_found == hpi[size].num_pages[socket]) {
uint64_t unmap_len;
unmap_len = hp->size;
/* get start addr and len of the remaining segment */
munmap(hp->final_va, (size_t) unmap_len);
hp->final_va = NULL;
if (unlink(hp->filepath) == -1) {
RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
__func__, hp->filepath, strerror(errno));
return -1;
}
} else {
/* lock the page and skip */
pages_found++;
}
} /* match page */
} /* foreach page */
} /* foreach socket */
} /* foreach pagesize */
return 0;
}
既然是要释放掉多余的大页映射,那么肯定是有一个比较的过程,那么谁和谁比呢?答案是internal_config.hugepage_info[i].num_pages[j] 和 hp_used[i].num_pages[j] 做比较,前者中保存的是这种大页尺寸在某个socket上所有的映射的个数,比如系统在socket1 上 配置了1G的大页20个,那么这里就是20;后者是真正需要的大页的个数,比如程序启动的时候我需要socket1 上有10G内存,那么我就只需要10个大页。所以多余的10个就会被unmap掉。(其实看不懂别人的代码不要紧,知道原理之后再看代码会事半功倍的)
6、将大页信息放在共享内存中
到目前为止,所有的map的大页信息都是在tmp_hp的变量中,这里的目的是将其copy到共享内存中。怎么做呢?当然是先map一段内存,然后将数据copy过去。上伪代码:
map_size =nr_hugefiles * sizeof(struct hugepage_file);
hugepage = mmap(NULL, map_size, ....);
// 从 tmp_hp 中拷贝 nr_hugepages 个大页放到 hugepage 中
copy(hugepage, nr_hugepages, tmp_hp, nr_hugepages);
7、填充 memseg 结构
先上结构体
struct rte_memseg {
phys_addr_t phys_addr; /**< Start physical address. */
RTE_STD_C11
union {
void *addr; /**< Start virtual address. */
uint64_t addr_64; /**< Makes sure addr is always 64 bits */
};
size_t len; /**< Length of the segment. */
uint64_t hugepage_sz; /**< The pagesize of underlying memory */
int32_t socket_id; /**< NUMA socket ID. */
uint32_t nchannel; /**< Number of channels. */
uint32_t nrank; /**< Number of ranks. */
} __rte_packed;
memseg表示的是一段物理地址和虚拟地址都连续的内存段,该结构体中记录了该内存段的所属的socket_id,内存段的长度,内存段的大页的尺寸,内存段的物理起始地址和虚拟起始地址等信息。
那么对应一个大页,我是要新创建一个内存段还是要在上一个内存段上继续增加?上代码:
for (i = 0; i < nr_hugefiles; i++) {
new_memseg = 0;
/* if this is a new section, create a new memseg */
if (i == 0)
new_memseg = 1;
else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
new_memseg = 1;
else if (hugepage[i].size != hugepage[i-1].size)
new_memseg = 1;
else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) != hugepage[i].size)
new_memseg = 1;
else if (((unsigned long)hugepage[i].final_va - (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
new_memseg = 1;
if (new_memseg) {
j += 1;
if (j == RTE_MAX_MEMSEG)
break;
mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
mcfg->memseg[j].addr = hugepage[i].final_va;
mcfg->memseg[j].len = hugepage[i].size;
mcfg->memseg[j].socket_id = hugepage[i].socket_id;
mcfg->memseg[j].hugepage_sz = hugepage[i].size;
}
/* continuation of previous memseg */
else {
mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
}
hugepage[i].memseg_id = j;
}
我是发现认真看代码还是可以理解的。判断条件为:socket_id 相等,page_size 相等,物理地址连续,虚拟地址连续,那么就不需要新创建一个memseg;否则新创建一个memseg。
结束语
到这里呢就结束了,做个总结吧。
那么这个函数最终干了个什么事呢?
在我看来他就是将大页内存进行了mmap,但是又不仅仅是mmap,在mmap的过程中,他做到了一些页面是物理地址和虚拟地址连续的(这一点比较牛),然后将这些连续的内存,用另外一种方式进行表示,那就是memseg。每个memsg表示的是一段物理地址和虚拟地址连续的内存。所以就这么个事,浪费了我大把大把的时间看代码。