memblock 结构体图解
Memblock api 代码段描述
- int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
583 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
584 {
585 return memblock_add_range(&memblock.memory, base, size,
586 MAX_NUMNODES, 0);
587 }
498 int __init_memblock memblock_add_range(struct memblock_type *type,
499 phys_addr_t base, phys_addr_t size,
500 int nid, unsigned long flags)
501 {
502 bool insert = false;
503 phys_addr_t obase = base;
504 phys_addr_t end = base + memblock_cap_size(base, &size);
505 int i, nr_new;
506
507 if (!size)
508 return 0;
509
510 /* special case for empty array */
511 if (type->regions[0].size == 0) {
512 WARN_ON(type->cnt != 1 || type->total_size);
513 type->regions[0].base = base;
514 type->regions[0].size = size;
515 type->regions[0].flags = flags;
516 memblock_set_region_node(&type->regions[0], nid);
517 type->total_size = size;
518 return 0;
519 }
520 repeat:
521 /*
522 * The following is executed twice. Once with %false @insert and
523 * then with %true. The first counts the number of regions needed
524 * to accomodate the new area. The second actually inserts them.
525 */
526 base = obase;
527 nr_new = 0;
528
529 for (i = 0; i < type->cnt; i++) {
530 struct memblock_region *rgn = &type->regions[i];
531 phys_addr_t rbase = rgn->base;
532 phys_addr_t rend = rbase + rgn->size;
533
534 if (rbase >= end)
535 break;
536 if (rend <= base)
537 continue;
538 /*
539 * @rgn overlaps. If it separates the lower part of new
540 * area, insert that portion.
541 */
542 if (rbase > base) {
543 nr_new++;
544 if (insert)
545 memblock_insert_region(type, i++, base,
546 rbase - base, nid,
547 flags);
548 }
549 /* area below @rend is dealt with, forget about it */
550 base = min(rend, end);
551 }
552
553 /* insert the remaining portion */
554 if (base < end) {
555 nr_new++;
556 if (insert)
557 memblock_insert_region(type, i, base, end - base,
558 nid, flags);
559 }
560
561 /*
562 * If this was the first round, resize array and repeat for actual
563 * insertions; otherwise, merge and return.
564 */
565 if (!insert) {
566 while (type->cnt + nr_new > type->max)
567 if (memblock_double_array(type, obase, size) < 0)
568 return -ENOMEM;
569 insert = true;
570 goto repeat;
571 } else {
572 memblock_merge_regions(type);
573 return 0;
574 }
575 }
- int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t
size)
665 int __init_memblock memblock_remove_range(struct memblock_type *type,
666 phys_addr_t base, phys_addr_t size)
667 {
668 int start_rgn, end_rgn;
669 int i, ret;
670
671 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
672 if (ret)
673 return ret;
674
675 for (i = end_rgn - 1; i >= start_rgn; i--)
676 memblock_remove_region(type, i);
677 return 0;
678 }
679
680 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
681 {
682 return memblock_remove_range(&memblock.memory, base, size);
683 }
- phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t
align)
1037 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1038 phys_addr_t align, phys_addr_t start,
1039 phys_addr_t end, int nid)
1040 {
1041 phys_addr_t found;
1042
1043 if (!align)
1044 align = SMP_CACHE_BYTES;
1045
1046 found = memblock_find_in_range_node(size, align, start, end, nid);
1047 if (found && !memblock_reserve(found, size)) {
1048 /*
1049 * The min_count is set to 0 so that memblock allocations are
1050 * never reported as leaks.
1051 */
1052 kmemleak_alloc(__va(found), size, 0, 0);
1053 return found;
1054 }
1055 return 0;
1056 }
1057
1058 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1059 phys_addr_t start, phys_addr_t end)
1060 {
1061 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1062 }
1063
1064 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1065 phys_addr_t align, phys_addr_t max_addr,
1066 int nid)
1067 {
1068 return memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1069 }
1070
1071 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1072 {
1073 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1074 }
1075
1076 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1077 {
1078 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
1079 }
1080
1081 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1082 {
1083 phys_addr_t alloc;
1084
1085 alloc = __memblock_alloc_base(size, align, max_addr);
1086
1087 if (alloc == 0)
1088 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1089 (unsigned long long) size, (unsigned long long) max_addr);
1090
1091 return alloc;
1092 }
1094 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1095 {
1096 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1097 }
1098
- int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
686 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
687 {
688 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
689 (unsigned long long)base,
690 (unsigned long long)base + size - 1,
691 (void *)_RET_IP_);
692
693 kmemleak_free_part(__va(base), size);
694 return memblock_remove_range(&memblock.reserved, base, size);
695 }
696