highmem 分配使用与物理地址的对应关系

一个测试程序的运行轨迹:

 

 *****************************************************

virt:0xaa398000 
virtaddr = 0xaa398000
page_size = 1000
virt_pfn = aa398
offset = 551cc0

//用户态 申请4k内存 并写入值
[   47.195130] in pagemap_read
[   47.206743] buf = beda8a48
[   47.209892] count = 8
[   47.212545] ppos = dc1d7f70
[   47.215695] *ppos = 551cc0
[   47.218731] task->comm = mytest
[   47.222388] 
[   47.222388] 
[   47.222388] pagemap_pmd_range start...
[   47.229901] addr = aa398000
[   47.233027] end = aa400000
[   47.236061] pte = ffefe660
[   47.239089] *pte = 9ed9375f
[   47.242215] addr = aa398000
[   47.245341] page = eb34e8ac

//根据虚拟地址获取到物理 地址 
[   47.248467] 
[   47.248467] 
[   47.248467]  map_new_virtual:  
[   47.255264] page = eb34e8ac
[   47.258375] vaddr = bfed6000
[   47.261621] kaddr = 0xbfed6000

//kmap的日志 关键点 page

//#define __pfn_to_page(pfn)    (mem_map + ((pfn) - ARCH_PFN_OFFSET))
    //#define __page_to_pfn(page)    ((unsigned long)((page) - mem_map) + ARCH_PFN_OFFSET)
    //互为逆运算

//分配内存与映射内存 互为逆运算
[   47.265053] walk.data: bfed6000: aa bb cc dd 55 55 55 55 55 55 55 55 55 55 55 55  ....UUUUUUUUUUUU
[   47.275062] walk.data: bfed6010: 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55  UUUUUUUUUUUUUUUU
[   47.285074] walk.data: bfed6020: 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55  UUUUUUUUUUUUUUUU
[   47.295085] walk.data: bfed6030: 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55  UUUUUUUUUUUUUUUU
[   47.305097] pagemap_pmd_range end!!! err = 0
[   47.305097] 

//回到用户态 输出虚拟地址对应的物理内存地址
page = 810000000009ed93
virt:0xaa398000 phys:9ed93000
cnt = 1, pad = 55.

用户态程序:


#define phys_addr_t     uint64_t
#define PFN_MASK_SIZE   8

phys_addr_t rte_mem_virt2phy( const void *virtaddr )
{
    int fd, retval;
    uint64_t page, physaddr;
    unsigned long virt_pfn;
    int page_size;
    off_t offset;
                                         
    /* standard page size */
    page_size = getpagesize();
    fd = open("/proc/self/pagemap", O_RDONLY);
    if( fd < 0 ){ 
        ;;;;
    }
    virt_pfn = (unsigned long)virtaddr / page_size;

    printf( "virtaddr = %p\n", virtaddr );
    printf( "page_size = %x\n", page_size );
    printf( "virt_pfn = %lx\n", virt_pfn );

    


    

    
    offset = sizeof(uint64_t) * virt_pfn;

    printf( "offset = %lx\n", offset );

    
    if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
        return -1;
    }
    retval = read(fd, &page, PFN_MASK_SIZE);
    close(fd);


    //the pfn (page frame number) are bits 0-54 (see         
    //pagemap.txt in linux Documentation)         
    if ((page & 0x7fffffffffffffULL) == 0){     
        return -1;          
    }

    printf( "page = %llx\n", page );
    

    
    physaddr = ((page & 0x7fffffffffffffULL) * page_size) + ((unsigned long)virtaddr % page_size);


//————————————————
//版权声明:本文为CSDN博主「宋宝华」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
//原文链接:https://blog.csdn.net/21cnbao/article/details/108989210

    return physaddr;
}

int main( int argc __unused, char **argv __unused ) 
{      
    printf( "hello mytest, pid = %d.\n", getpid() );
    
    uint8_t *p;
    int cnt;
    cnt = 0x00;
    uint8_t pad= 0x55;

    while(1){
      cnt++;
      p = malloc(3 * 1024);
      printf("virt:%p \n", p);
      if( p==NULL ){
          sleep(5);
          continue;
      }
      
      memset(p, pad, 3 * 1024);
      p[0] = 0xaa;
      p[1] = 0xbb;
      p[2] = 0xcc;
      p[3] = 0xdd;

      
      printf("virt:%p phys:%llx\n", p, rte_mem_virt2phy(p));
      printf("cnt = %d, pad = %x.\n", cnt, pad);
      
      pad = pad + 0x11;

      free(p);
      
      //sleep(1);
      break;

    
    //*(p + 2 * 4096) = 10;
    //printf("virt:%p phys:%llx\n", p + 2 * 4096, rte_mem_virt2phy(p + 2 * 4096));
    }

  
  

    return 0;

}

 

内核态程序:

/*
 * High memory handling common code and variables.
 *
 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
 *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
 *
 *
 * Redesigned the x86 32-bit VM architecture to deal with
 * 64-bit physical space. With current x86 CPUs this
 * means up to 64 Gigabytes physical RAM.
 *
 * Rewrote high memory support to move the page cache into
 * high memory. Implemented permanent (schedulable) kmaps
 * based on Linus' idea.
 *
 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
 */

#include <linux/mm.h>
#include <linux/export.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/pagemap.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
#include <linux/kgdb.h>
#include <asm/tlbflush.h>


#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
DEFINE_PER_CPU(int, __kmap_atomic_idx);
#endif

/*
 * Virtual_count is not a pure "count".
 *  0 means that it is not mapped, and has not been mapped
 *    since a TLB flush - it is usable.
 *  1 means that there are no users, but it has been mapped
 *    since the last TLB flush - so we can't use it.
 *  n means that there are (n-1) current users of it.
 */
#ifdef CONFIG_HIGHMEM

/*
 * Architecture with aliasing data cache may define the following family of
 * helper functions in its asm/highmem.h to control cache color of virtual
 * addresses where physical memory pages are mapped by kmap.
 */
#ifndef get_pkmap_color

/*
 * Determine color of virtual address where the page should be mapped.
 */
static inline unsigned int get_pkmap_color(struct page *page)
{
    return 0;
}
#define get_pkmap_color get_pkmap_color

/*
 * Get next index for mapping inside PKMAP region for page with given color.
 */
static inline unsigned int get_next_pkmap_nr(unsigned int color)
{
    static unsigned int last_pkmap_nr;

    last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
    return last_pkmap_nr;
}

/*
 * Determine if page index inside PKMAP region (pkmap_nr) of given color
 * has wrapped around PKMAP region end. When this happens an attempt to
 * flush all unused PKMAP slots is made.
 */
static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
{
    return pkmap_nr == 0;
}

/*
 * Get the number of PKMAP entries of the given color. If no free slot is
 * found after checking that many entries, kmap will sleep waiting for
 * someone to call kunmap and free PKMAP slot.
 */
static inline int get_pkmap_entries_count(unsigned int color)
{
    return LAST_PKMAP;
}

/*
 * Get head of a wait queue for PKMAP entries of the given color.
 * Wait queues for different mapping colors should be independent to avoid
 * unnecessary wakeups caused by freeing of slots of other colors.
 */
static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
{
    static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);

    return &pkmap_map_wait;
}
#endif

unsigned long totalhigh_pages __read_mostly;
EXPORT_SYMBOL(totalhigh_pages);


EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);

unsigned int nr_free_highpages (void)
{
    struct zone *zone;
    unsigned int pages = 0;

    for_each_populated_zone(zone) {
        if (is_highmem(zone))
            pages += zone_page_state(zone, NR_FREE_PAGES);
    }

    return pages;
}

static int pkmap_count[LAST_PKMAP];
static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);

pte_t * pkmap_page_table;

/*
 * Most architectures have no use for kmap_high_get(), so let's abstract
 * the disabling of IRQ out of the locking in that case to save on a
 * potential useless overhead.
 */
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
#define lock_kmap()             spin_lock_irq(&kmap_lock)
#define unlock_kmap()           spin_unlock_irq(&kmap_lock)
#define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
#define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
#else
#define lock_kmap()             spin_lock(&kmap_lock)
#define unlock_kmap()           spin_unlock(&kmap_lock)
#define lock_kmap_any(flags)    \
        do { spin_lock(&kmap_lock); (void)(flags); } while (0)
#define unlock_kmap_any(flags)  \
        do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
#endif

struct page *kmap_to_page(void *vaddr)
{
    unsigned long addr = (unsigned long)vaddr;

    if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
        int i = PKMAP_NR(addr);
        return pte_page(pkmap_page_table[i]);
    }

    return virt_to_page(addr);
}
EXPORT_SYMBOL(kmap_to_page);

static void flush_all_zero_pkmaps(void)
{
    int i;
    int need_flush = 0;

    flush_cache_kmaps();

    for (i = 0; i < LAST_PKMAP; i++) {
        struct page *page;

        /*
         * zero means we don't have anything to do,
         * >1 means that it is still in use. Only
         * a count of 1 means that it is free but
         * needs to be unmapped
         */
        if (pkmap_count[i] != 1)
            continue;
        pkmap_count[i] = 0;

        /* sanity check */
        BUG_ON(pte_none(pkmap_page_table[i]));

        /*
         * Don't need an atomic fetch-and-clear op here;
         * no-one has the page mapped, and cannot get at
         * its virtual address (and hence PTE) without first
         * getting the kmap_lock (which is held here).
         * So no dangers, even with speculative execution.
         */
        page = pte_page(pkmap_page_table[i]);
        pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);

        set_page_address(page, NULL);
        need_flush = 1;
    }
    if (need_flush)
        flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
}

/**
 * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
 */
void kmap_flush_unused(void)
{
    lock_kmap();
    flush_all_zero_pkmaps();
    unlock_kmap();
}

//进一步查看map_new_virtual函数,我们知道pkmap是在虚拟地址空间已经预留出来的一部分地址,
//我们现在需要申请一个地址用于对特定page的映射。
//这个函数的目的就是找到未用的空闲pkmap地址,然后创建对应物理page和该虚拟地址之间的pte页表项


int debug_en_highmem = 0x00;    


//https://cloud.tencent.com/developer/article/1381079
//https://blog.csdn.net/rikeyone/article/details/85223458
static inline unsigned long map_new_virtual(struct page *page)
{
    unsigned long vaddr;
    int count;
    unsigned int last_pkmap_nr;
    unsigned int color = get_pkmap_color(page);

    
    struct task_struct *task;
    task = current;
    if( !strcmp(task->comm, "mytest") ){
        debug_en_highmem = 0x01;
    }else{
        debug_en_highmem = 0x00; 
    }    

                

start:
    count = get_pkmap_entries_count(color);
    /* Find an empty entry */
    for (;;) {
        last_pkmap_nr = get_next_pkmap_nr(color);
        if (no_more_pkmaps(last_pkmap_nr, color)) {
            flush_all_zero_pkmaps();
            count = get_pkmap_entries_count(color);
        }
        if (!pkmap_count[last_pkmap_nr])
            break;    /* Found a usable entry */
        if (--count)
            continue;

        /*
         * Sleep for somebody else to unmap their entries
         */
        {
            DECLARE_WAITQUEUE(wait, current);
            wait_queue_head_t *pkmap_map_wait =
                get_pkmap_wait_queue_head(color);

            __set_current_state(TASK_UNINTERRUPTIBLE);
            add_wait_queue(pkmap_map_wait, &wait);
            unlock_kmap();
            schedule();
            remove_wait_queue(pkmap_map_wait, &wait);
            lock_kmap();

            /* Somebody else might have mapped it while we slept */
            if (page_address(page))
                return (unsigned long)page_address(page);

            /* Re-start */
            goto start;
        }
    }
    vaddr = PKMAP_ADDR(last_pkmap_nr);


    


    //printk("task.name = %s\n", task->comm);
    
    if( debug_en_highmem ){    

        printk("\n\n map_new_virtual:  \n");
        printk("page = %lx\n", page);
        printk("vaddr = %lx\n", vaddr);
        //printk("last_pkmap_nr = %lx\n", last_pkmap_nr);
        //printk("&(pkmap_page_table[last_pkmap_nr]) = %lx\n", &(pkmap_page_table[last_pkmap_nr]) );
        //printk("mk_pte(page, kmap_prot) = %lx\n", mk_pte(page, kmap_prot));
        //dump_stack();
    }else{
        //printk("\n\n map_new_virtualxxxxx:  \n");
        //printk("pagex = %lx\n", page);
        //printk("vaddrxcv = %lx\n", vaddr);
    }
    set_pte_at(&init_mm, vaddr, &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));

    //#define __pfn_to_page(pfn)    (mem_map + ((pfn) - ARCH_PFN_OFFSET))
    //#define __page_to_pfn(page)    ((unsigned long)((page) - mem_map) + ARCH_PFN_OFFSET)
    //互为逆运算
    pkmap_count[last_pkmap_nr] = 1;
    set_page_address(page, (void *)vaddr);

    return vaddr;
}


//循环查找pkmap区域,判断是否有空闲未用的虚拟地址页,
//如果找到未用的虚拟地址页,那么就中断循环,进行PTE页表的创建
//如果未找到空闲虚拟地址页,说明所有的pkmap区域都已经被内核其他路径申请完了,则要进行释放zero空的pkmap映射。
//释放后依然未找到空闲映射区,则申请映射的进程需要进行等待。
//等待一段时候,系统被唤醒后,重新执行这一系列申请动作。
//找到空闲映射区后,进行最后的PTE页表创建。
//set_page_address设置对应page的描述符,把申请的映射虚拟地址设置进去。


/**
 * kmap_high - map a highmem page into memory
 * @page: &struct page to map
 *
 * Returns the page's virtual memory address.
 *
 * We cannot call this from interrupts, as it may block.
 */
void *kmap_high(struct page *page)
{
    unsigned long vaddr;

    /*
     * For highmem pages, we can't trust "virtual" until
     * after we have the lock.
     */
    lock_kmap();
    vaddr = (unsigned long)page_address(page);
    if (!vaddr)
        vaddr = map_new_virtual(page);
    pkmap_count[PKMAP_NR(vaddr)]++;
    BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
    unlock_kmap();
    return (void*) vaddr;
}

EXPORT_SYMBOL(kmap_high);

#ifdef ARCH_NEEDS_KMAP_HIGH_GET
/**
 * kmap_high_get - pin a highmem page into memory
 * @page: &struct page to pin
 *
 * Returns the page's current virtual memory address, or NULL if no mapping
 * exists.  If and only if a non null address is returned then a
 * matching call to kunmap_high() is necessary.
 *
 * This can be called from any context.
 */
void *kmap_high_get(struct page *page)
{
    unsigned long vaddr, flags;

    lock_kmap_any(flags);
    vaddr = (unsigned long)page_address(page);
    if (vaddr) {
        BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
        pkmap_count[PKMAP_NR(vaddr)]++;
    }
    unlock_kmap_any(flags);
    return (void*) vaddr;
}
#endif

/**
 * kunmap_high - unmap a highmem page into memory
 * @page: &struct page to unmap
 *
 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
 * only from user context.
 */
void kunmap_high(struct page *page)
{
    unsigned long vaddr;
    unsigned long nr;
    unsigned long flags;
    int need_wakeup;
    unsigned int color = get_pkmap_color(page);
    wait_queue_head_t *pkmap_map_wait;

    lock_kmap_any(flags);
    vaddr = (unsigned long)page_address(page);
    BUG_ON(!vaddr);
    nr = PKMAP_NR(vaddr);

    /*
     * A count must never go down to zero
     * without a TLB flush!
     */
    need_wakeup = 0;
    switch (--pkmap_count[nr]) {
    case 0:
        BUG();
    case 1:
        /*
         * Avoid an unnecessary wake_up() function call.
         * The common case is pkmap_count[] == 1, but
         * no waiters.
         * The tasks queued in the wait-queue are guarded
         * by both the lock in the wait-queue-head and by
         * the kmap_lock.  As the kmap_lock is held here,
         * no need for the wait-queue-head's lock.  Simply
         * test if the queue is empty.
         */
        pkmap_map_wait = get_pkmap_wait_queue_head(color);
        need_wakeup = waitqueue_active(pkmap_map_wait);
    }
    unlock_kmap_any(flags);

    /* do wake-up, if needed, race-free outside of the spin lock */
    if (need_wakeup)
        wake_up(pkmap_map_wait);
}

EXPORT_SYMBOL(kunmap_high);
#endif

#if defined(HASHED_PAGE_VIRTUAL)

#define PA_HASH_ORDER    7

/*
 * Describes one page->virtual association
 */
struct page_address_map {
    struct page *page;
    void *virtual;
    struct list_head list;
};

static struct page_address_map page_address_maps[LAST_PKMAP];

/*
 * Hash table bucket
 */
static struct page_address_slot {
    struct list_head lh;            /* List of page_address_maps */
    spinlock_t lock;            /* Protect this bucket's list */
} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];

static struct page_address_slot *page_slot(const struct page *page)
{
    return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
}


//我们知道,在小于896M(低端内存)的物理地址空间和3G--3G+896M的线性地址空间是一一对应映射的
//所以我们只要知道page所对应的物理地址,就可以知道这个page对应的线性地址空间(pa+PAGE_OFFSET)
//那如何找一个page对应的物理地址呢?我们知道物理内存按照大小为(1<<PAGE_SHIFT)分为很多个页
//每个这样的页就对应一个struct page *page结构,这些页描述结构存放在一个称之为mem_map的数组里面
//而且是严格按照物理内存的顺序来存放的,也就是物理上的第一个页描述结构,作为mem_map数组的第一个元素,依次类推
//所以,每个页描述结构(page)在数组mem_map里的位置在乘以页的大小,就可以得到该页的物理地址了
//上面的代码就是依照这个原理来的:
//page_to_pfn(page)函数就是得到每个page在mem_map里的位置,
//左移PAGE_SHIFT就是乘以页的大小,这就得到了该页的物理地址
//这个物理地址加上个PAGE_OFFSET(3G)就得到了该page的线性地址了
//在低端内存中(小于896M),通过页(struct page *page)取得虚拟地址就是这样转换的


/**
 * page_address - get the mapped virtual address of a page
 * @page: &struct page to get the virtual address of
 *
 * Returns the page's virtual address.
 */
void *page_address(const struct page *page)
{
    unsigned long flags;
    void *ret;
    struct page_address_slot *pas;

    if (!PageHighMem(page))
        return lowmem_page_address(page);
    //判断是否属于高端内存,如果不是,那么就是属于低 
    //端内存的,通过上面的方法可以直接找到

    pas = page_slot(page);
    //见下分析,pas指向page对应的page_address_map结构所在的链表表头
    ret = NULL;
    spin_lock_irqsave(&pas->lock, flags);
    if (!list_empty(&pas->lh)) {
        struct page_address_map *pam;

        list_for_each_entry(pam, &pas->lh, list) {
            if (pam->page == page) {
                //遍历hash链表 找到page匹配项 返回其虚地址(线性地址)
                ret = pam->virtual;
                goto done;
            }
        }
    }
done:
    spin_unlock_irqrestore(&pas->lock, flags);
    return ret;
}

EXPORT_SYMBOL(page_address);


//在高端内存中,由于不能通过像在低端内存中一样,直接通过物理地址加PAGE_OFFSET得到线性地址,
//所以引入了一个结构叫做 page_address_map结构,该结构保存有每个page(仅高端内存中的)和对应的虚拟地址,
//所有的高端内存中的这种映射都通过链表链接起来,这个结构是在高端内存映射的时候建立,并加入到链表中的。
    
//又因为如果内存远远大于896M,那么高端内存中的page就比较多((内存-896M)/4K个页,假设页大小为4K),
//如果只用一个链表来表示,那么查找起来就比较耗时了,所以这里引入了HASH算法,
//采用多个链表,每个page通过一定的hash算法,对应到一个链表上,总够有128个链表:

//PA_HASH_ORDER=7, 所以一共有1<<7(128)个链表,每一个page通过HASH算法后对应一个 page_address_htable链表, 
//然后再遍历这个链表来找到对应的PAGE和虚拟地址。
//page通过HASH算法后对应一个 page_address_htable链表的代码如下:


//https://www.cnblogs.com/alantu2018/default.html?page=21

//https://www.cnblogs.com/alantu2018/category/1163287.html?page=1
/**
 * set_page_address - set a page's virtual address
 * @page: &struct page to set
 * @virtual: virtual address to use
 */
//将一页加入hash链表
void set_page_address(struct page *page, void *virtual)
{
    unsigned long flags;
    struct page_address_slot *pas;
    struct page_address_map *pam;

    BUG_ON(!PageHighMem(page));

    pas = page_slot(page);
    if (virtual) {        /* Add */
        pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
        pam->page = page;
        pam->virtual = virtual;

        spin_lock_irqsave(&pas->lock, flags);
        list_add_tail(&pam->list, &pas->lh);
        spin_unlock_irqrestore(&pas->lock, flags);
    } else {        /* Remove */
        spin_lock_irqsave(&pas->lock, flags);
        list_for_each_entry(pam, &pas->lh, list) {
            if (pam->page == page) {
                list_del(&pam->list);
                spin_unlock_irqrestore(&pas->lock, flags);
                goto done;
            }
        }
        spin_unlock_irqrestore(&pas->lock, flags);
    }
done:
    return;
}

void __init page_address_init(void)
{
    int i;

    for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
        INIT_LIST_HEAD(&page_address_htable[i].lh);
        spin_lock_init(&page_address_htable[i].lock);
    }
}

#endif    /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
 

 

mm/pagewalk.c

#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>


int debug_en_pagewalk = 0x00;    


static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
              struct mm_walk *walk)
{
    pte_t *pte;
    int err = 0;

    pte = pte_offset_map(pmd, addr);
    for (;;) {
        err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
        if (err)
               break;
        addr += PAGE_SIZE;
        if (addr == end)
            break;
        pte++;
    }

    pte_unmap(pte);
    return err;
}

static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
              struct mm_walk *walk)
{
    pmd_t *pmd;
    unsigned long next;
    int err = 0;

    pmd = pmd_offset(pud, addr);


    if( debug_en_pagewalk ){
        //printk("pmd = %p\n", pmd); 
    } 
    
    do {
again:
        next = pmd_addr_end(addr, end);

        if( debug_en_pagewalk ){
            //printk("next = %lx\n", next); 
            //printk("end = %lx\n", end); 
        } 

        
        if (pmd_none(*pmd) || !walk->vma) {
            if (walk->pte_hole){
                err = walk->pte_hole(addr, next, walk);
                //pagemap_pte_hole
            }
            if (err)
                break;
            continue;
        }
        /*
         * This implies that each ->pmd_entry() handler
         * needs to know about pmd_trans_huge() pmds
         */
        if (walk->pmd_entry){
            err = walk->pmd_entry(pmd, addr, next, walk);
            //pagemap_pmd_range
        }

        if( debug_en_pagewalk ){
            //printk("err = %lx\n", err); 
        } 

        
        if (err)
            break;

        /*
         * Check this here so we only break down trans_huge
         * pages when we _need_ to
         */


        
        if( debug_en_pagewalk ){
            //printk("walk->pte_entry = %pF\n", walk->pte_entry); 
        } 


        if (!walk->pte_entry)
            continue;

        split_huge_pmd(walk->vma, pmd, addr);
        if (pmd_trans_unstable(pmd))
            goto again;
        err = walk_pte_range(pmd, addr, next, walk);
        if (err)
            break;
    } while (pmd++, addr = next, addr != end);

    return err;
}

static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
              struct mm_walk *walk)
{
    pud_t *pud;
    unsigned long next;
    int err = 0;

    pud = pud_offset(pgd, addr);
    do {
        next = pud_addr_end(addr, end);
        if (pud_none_or_clear_bad(pud)) {
            if (walk->pte_hole)
                err = walk->pte_hole(addr, next, walk);
            if (err)
                break;
            continue;
        }
        if (walk->pmd_entry || walk->pte_entry)
            err = walk_pmd_range(pud, addr, next, walk);
        if (err)
            break;
    } while (pud++, addr = next, addr != end);

    return err;
}

static int walk_pgd_range(unsigned long addr, unsigned long end,
              struct mm_walk *walk)
{
    pgd_t *pgd;
    unsigned long next;
    int err = 0;

    pgd = pgd_offset(walk->mm, addr);


    if( debug_en_pagewalk ){
        //printk("1walk->mm = %p\n", walk->mm); 
        //printk("addr = %lx\n", addr); 
        //printk("end = %lx\n", end); 
        //printk("pgd = %p\n", pgd); 
    } 

    
    do {
        next = pgd_addr_end(addr, end);
        if (pgd_none_or_clear_bad(pgd)) {
            if (walk->pte_hole)
                err = walk->pte_hole(addr, next, walk);
            if (err)
                break;
            continue;
        }
        if (walk->pmd_entry || walk->pte_entry)
            err = walk_pud_range(pgd, addr, next, walk);
        if (err)
            break;
    } while (pgd++, addr = next, addr != end);

    return err;
}

#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
                       unsigned long end)
{
    unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
    return boundary < end ? boundary : end;
}

static int walk_hugetlb_range(unsigned long addr, unsigned long end,
                  struct mm_walk *walk)
{
    struct vm_area_struct *vma = walk->vma;
    struct hstate *h = hstate_vma(vma);
    unsigned long next;
    unsigned long hmask = huge_page_mask(h);
    pte_t *pte;
    int err = 0;

    do {
        next = hugetlb_entry_end(h, addr, end);
        pte = huge_pte_offset(walk->mm, addr & hmask);

        if (pte)
            err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
        else if (walk->pte_hole)
            err = walk->pte_hole(addr, next, walk);

        if (err)
            break;
    } while (addr = next, addr != end);

    return err;
}

#else /* CONFIG_HUGETLB_PAGE */
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
                  struct mm_walk *walk)
{
    return 0;
}

#endif /* CONFIG_HUGETLB_PAGE */

/*
 * Decide whether we really walk over the current vma on [@start, @end)
 * or skip it via the returned value. Return 0 if we do walk over the
 * current vma, and return 1 if we skip the vma. Negative values means
 * error, where we abort the current walk.
 */
static int walk_page_test(unsigned long start, unsigned long end,
            struct mm_walk *walk)
{
    struct vm_area_struct *vma = walk->vma;

    if (walk->test_walk)
        return walk->test_walk(start, end, walk);

    /*
     * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
     * range, so we don't walk over it as we do for normal vmas. However,
     * Some callers are interested in handling hole range and they don't
     * want to just ignore any single address range. Such users certainly
     * define their ->pte_hole() callbacks, so let's delegate them to handle
     * vma(VM_PFNMAP).
     */
    if (vma->vm_flags & VM_PFNMAP) {
        int err = 1;
        if (walk->pte_hole)
            err = walk->pte_hole(start, end, walk);
        return err ? err : 1;
    }
    return 0;
}

static int __walk_page_range(unsigned long start, unsigned long end,
            struct mm_walk *walk)
{
    int err = 0;
    struct vm_area_struct *vma = walk->vma;

    if (vma && is_vm_hugetlb_page(vma)) {
        if (walk->hugetlb_entry)
            err = walk_hugetlb_range(start, end, walk);
    } else
        err = walk_pgd_range(start, end, walk);

    return err;
}

/**
 * walk_page_range - walk page table with caller specific callbacks
 *
 * Recursively walk the page table tree of the process represented by @walk->mm
 * within the virtual address range [@start, @end). During walking, we can do
 * some caller-specific works for each entry, by setting up pmd_entry(),
 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
 * callbacks, the associated entries/pages are just ignored.
 * The return values of these callbacks are commonly defined like below:
 *  - 0  : succeeded to handle the current entry, and if you don't reach the
 *         end address yet, continue to walk.
 *  - >0 : succeeded to handle the current entry, and return to the caller
 *         with caller specific value.
 *  - <0 : failed to handle the current entry, and return to the caller
 *         with error code.
 *
 * Before starting to walk page table, some callers want to check whether
 * they really want to walk over the current vma, typically by checking
 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
 * purpose.
 *
 * struct mm_walk keeps current values of some common data like vma and pmd,
 * which are useful for the access from callbacks. If you want to pass some
 * caller-specific data to callbacks, @walk->private should be helpful.
 *
 * Locking:
 *   Callers of walk_page_range() and walk_page_vma() should hold
 *   @walk->mm->mmap_sem, because these function traverse vma list and/or
 *   access to vma's data.
 */
int walk_page_range(unsigned long start, unsigned long end,
            struct mm_walk *walk)
{
    int err = 0;
    unsigned long next;
    struct vm_area_struct *vma;
    struct task_struct *task;
    int debug_en;    


    rcu_read_lock();
    task = rcu_dereference(walk->mm->owner);
    rcu_read_unlock();
    
    //printk("task->comm = %s\n", task->comm);    
    if( !strcmp(task->comm, "mytest") ){
        debug_en_pagewalk = 0x01;
    }else{
        debug_en_pagewalk = 0x00; 
    }
    
        
    

    if (start >= end)
        return -EINVAL;

    if (!walk->mm)
        return -EINVAL;

    VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);

    vma = find_vma(walk->mm, start);


    if( debug_en_pagewalk ){
        //printk("vma = %p\n", vma); 
        //printk("start = %lx\n", start); 

        //printk("vma->vm_start = %lx\n", vma->vm_start); 
        //printk("vma->vm_end = %lx\n", vma->vm_end); 
        //printk("end = %lx\n", end); 
        

        
    } 
    
    do {
        if (!vma) { /* after the last vma */
            walk->vma = NULL;
            next = end;
        } else if (start < vma->vm_start) { /* outside vma */
            walk->vma = NULL;
            next = min(end, vma->vm_start);
        } else { /* inside vma */
            walk->vma = vma;
            next = min(end, vma->vm_end);
            vma = vma->vm_next;

            err = walk_page_test(start, next, walk);
            if (err > 0) {
                /*
                 * positive return values are purely for
                 * controlling the pagewalk, so should never
                 * be passed to the callers.
                 */
                err = 0;
                continue;
            }
            if (err < 0)
                break;
        }
        if (walk->vma || walk->pte_hole){
            if( debug_en_pagewalk ){
                //printk("startx = %lx\n", start); 
                //printk("nextc = %lx\n", next); 
            } 
            err = __walk_page_range(start, next, walk);
            if( debug_en_pagewalk ){
                break;
            }
            
        }
        if (err)
            break;
    } while (start = next, start < end);
    return err;
}

int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
{
    int err;

    if (!walk->mm)
        return -EINVAL;

    VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
    VM_BUG_ON(!vma);
    walk->vma = vma;
    err = walk_page_test(vma->vm_start, vma->vm_end, walk);
    if (err > 0)
        return 0;
    if (err < 0)
        return err;
    return __walk_page_range(vma->vm_start, vma->vm_end, walk);
}
 

fs/proc/task_mmu.c

#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>

#include <asm/elf.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include "internal.h"

void task_mem(struct seq_file *m, struct mm_struct *mm)
{
    unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
    unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;

    anon = get_mm_counter(mm, MM_ANONPAGES);
    file = get_mm_counter(mm, MM_FILEPAGES);
    shmem = get_mm_counter(mm, MM_SHMEMPAGES);

    /*
     * Note: to minimize their overhead, mm maintains hiwater_vm and
     * hiwater_rss only when about to *lower* total_vm or rss.  Any
     * collector of these hiwater stats must therefore get total_vm
     * and rss too, which will usually be the higher.  Barriers? not
     * worth the effort, such snapshots can always be inconsistent.
     */
    hiwater_vm = total_vm = mm->total_vm;
    if (hiwater_vm < mm->hiwater_vm)
        hiwater_vm = mm->hiwater_vm;
    hiwater_rss = total_rss = anon + file + shmem;
    if (hiwater_rss < mm->hiwater_rss)
        hiwater_rss = mm->hiwater_rss;

    text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
    lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
    swap = get_mm_counter(mm, MM_SWAPENTS);
    ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
    pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
    seq_printf(m,
        "VmPeak:\t%8lu kB\n"
        "VmSize:\t%8lu kB\n"
        "VmLck:\t%8lu kB\n"
        "VmPin:\t%8lu kB\n"
        "VmHWM:\t%8lu kB\n"
        "VmRSS:\t%8lu kB\n"
        "RssAnon:\t%8lu kB\n"
        "RssFile:\t%8lu kB\n"
        "RssShmem:\t%8lu kB\n"
        "VmData:\t%8lu kB\n"
        "VmStk:\t%8lu kB\n"
        "VmExe:\t%8lu kB\n"
        "VmLib:\t%8lu kB\n"
        "VmPTE:\t%8lu kB\n"
        "VmPMD:\t%8lu kB\n"
        "VmSwap:\t%8lu kB\n",
        hiwater_vm << (PAGE_SHIFT-10),
        total_vm << (PAGE_SHIFT-10),
        mm->locked_vm << (PAGE_SHIFT-10),
        mm->pinned_vm << (PAGE_SHIFT-10),
        hiwater_rss << (PAGE_SHIFT-10),
        total_rss << (PAGE_SHIFT-10),
        anon << (PAGE_SHIFT-10),
        file << (PAGE_SHIFT-10),
        shmem << (PAGE_SHIFT-10),
        mm->data_vm << (PAGE_SHIFT-10),
        mm->stack_vm << (PAGE_SHIFT-10), text, lib,
        ptes >> 10,
        pmds >> 10,
        swap << (PAGE_SHIFT-10));
    hugetlb_report_usage(m, mm);
}

unsigned long task_vsize(struct mm_struct *mm)
{
    return PAGE_SIZE * mm->total_vm;
}

unsigned long task_statm(struct mm_struct *mm,
             unsigned long *shared, unsigned long *text,
             unsigned long *data, unsigned long *resident)
{
    *shared = get_mm_counter(mm, MM_FILEPAGES) +
            get_mm_counter(mm, MM_SHMEMPAGES);
    *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
                                >> PAGE_SHIFT;
    *data = mm->data_vm + mm->stack_vm;
    *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
    return mm->total_vm;
}

#ifdef CONFIG_NUMA
/*
 * Save get_task_policy() for show_numa_map().
 */
static void hold_task_mempolicy(struct proc_maps_private *priv)
{
    struct task_struct *task = priv->task;

    task_lock(task);
    priv->task_mempolicy = get_task_policy(task);
    mpol_get(priv->task_mempolicy);
    task_unlock(task);
}
static void release_task_mempolicy(struct proc_maps_private *priv)
{
    mpol_put(priv->task_mempolicy);
}
#else
static void hold_task_mempolicy(struct proc_maps_private *priv)
{
}
static void release_task_mempolicy(struct proc_maps_private *priv)
{
}
#endif

static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
{
    const char __user *name = vma_get_anon_name(vma);
    struct mm_struct *mm = vma->vm_mm;

    unsigned long page_start_vaddr;
    unsigned long page_offset;
    unsigned long num_pages;
    unsigned long max_len = NAME_MAX;
    int i;

    page_start_vaddr = (unsigned long)name & PAGE_MASK;
    page_offset = (unsigned long)name - page_start_vaddr;
    num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);

    seq_puts(m, "[anon:");

    for (i = 0; i < num_pages; i++) {
        int len;
        int write_len;
        const char *kaddr;
        long pages_pinned;
        struct page *page;

        pages_pinned = get_user_pages_remote(current, mm,
                page_start_vaddr, 1, 0, &page, NULL);
        if (pages_pinned < 1) {
            seq_puts(m, "<fault>]");
            return;
        }

        kaddr = (const char *)kmap(page);
        len = min(max_len, PAGE_SIZE - page_offset);
        write_len = strnlen(kaddr + page_offset, len);
        seq_write(m, kaddr + page_offset, write_len);
        kunmap(page);
        put_page(page);

        /* if strnlen hit a null terminator then we're done */
        if (write_len != len)
            break;

        max_len -= len;
        page_offset = 0;
        page_start_vaddr += PAGE_SIZE;
    }

    seq_putc(m, ']');
}

static void vma_stop(struct proc_maps_private *priv)
{
    struct mm_struct *mm = priv->mm;

    release_task_mempolicy(priv);
    up_read(&mm->mmap_sem);
    mmput(mm);
}

static struct vm_area_struct *
m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
    if (vma == priv->tail_vma)
        return NULL;
    return vma->vm_next ?: priv->tail_vma;
}

static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
{
    if (m->count < m->size)    /* vma is copied successfully */
        m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
}

static void *m_start(struct seq_file *m, loff_t *ppos)
{
    struct proc_maps_private *priv = m->private;
    unsigned long last_addr = m->version;
    struct mm_struct *mm;
    struct vm_area_struct *vma;
    unsigned int pos = *ppos;

    /* See m_cache_vma(). Zero at the start or after lseek. */
    if (last_addr == -1UL)
        return NULL;

    priv->task = get_proc_task(priv->inode);
    if (!priv->task)
        return ERR_PTR(-ESRCH);

    mm = priv->mm;
    if (!mm || !atomic_inc_not_zero(&mm->mm_users))
        return NULL;

    down_read(&mm->mmap_sem);
    hold_task_mempolicy(priv);
    priv->tail_vma = get_gate_vma(mm);

    if (last_addr) {
        vma = find_vma(mm, last_addr - 1);
        if (vma && vma->vm_start <= last_addr)
            vma = m_next_vma(priv, vma);
        if (vma)
            return vma;
    }

    m->version = 0;
    if (pos < mm->map_count) {
        for (vma = mm->mmap; pos; pos--) {
            m->version = vma->vm_start;
            vma = vma->vm_next;
        }
        return vma;
    }

    /* we do not bother to update m->version in this case */
    if (pos == mm->map_count && priv->tail_vma)
        return priv->tail_vma;

    vma_stop(priv);
    return NULL;
}

static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
    struct proc_maps_private *priv = m->private;
    struct vm_area_struct *next;

    (*pos)++;
    next = m_next_vma(priv, v);
    if (!next)
        vma_stop(priv);
    return next;
}

static void m_stop(struct seq_file *m, void *v)
{
    struct proc_maps_private *priv = m->private;

    if (!IS_ERR_OR_NULL(v))
        vma_stop(priv);
    if (priv->task) {
        put_task_struct(priv->task);
        priv->task = NULL;
    }
}

static int proc_maps_open(struct inode *inode, struct file *file,
            const struct seq_operations *ops, int psize)
{
    struct proc_maps_private *priv = __seq_open_private(file, ops, psize);

    if (!priv)
        return -ENOMEM;

    priv->inode = inode;
    priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
    if (IS_ERR(priv->mm)) {
        int err = PTR_ERR(priv->mm);

        seq_release_private(inode, file);
        return err;
    }

    return 0;
}

static int proc_map_release(struct inode *inode, struct file *file)
{
    struct seq_file *seq = file->private_data;
    struct proc_maps_private *priv = seq->private;

    if (priv->mm)
        mmdrop(priv->mm);

    return seq_release_private(inode, file);
}

static int do_maps_open(struct inode *inode, struct file *file,
            const struct seq_operations *ops)
{
    return proc_maps_open(inode, file, ops,
                sizeof(struct proc_maps_private));
}

/*
 * Indicate if the VMA is a stack for the given task; for
 * /proc/PID/maps that is the stack of the main task.
 */
static int is_stack(struct proc_maps_private *priv,
            struct vm_area_struct *vma)
{
    /*
     * We make no effort to guess what a given thread considers to be
     * its "stack".  It's not even well-defined for programs written
     * languages like Go.
     */
    return vma->vm_start <= vma->vm_mm->start_stack &&
        vma->vm_end >= vma->vm_mm->start_stack;
}

static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
{
    struct mm_struct *mm = vma->vm_mm;
    struct file *file = vma->vm_file;
    struct proc_maps_private *priv = m->private;
    vm_flags_t flags = vma->vm_flags;
    unsigned long ino = 0;
    unsigned long long pgoff = 0;
    unsigned long start, end;
    dev_t dev = 0;
    const char *name = NULL;

    if (file) {
        struct inode *inode = file_inode(vma->vm_file);
        dev = inode->i_sb->s_dev;
        ino = inode->i_ino;
        pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
    }

    /* We don't show the stack guard page in /proc/maps */
    start = vma->vm_start;
    end = vma->vm_end;

    seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
    seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
            start,
            end,
            flags & VM_READ ? 'r' : '-',
            flags & VM_WRITE ? 'w' : '-',
            flags & VM_EXEC ? 'x' : '-',
            flags & VM_MAYSHARE ? 's' : 'p',
            pgoff,
            MAJOR(dev), MINOR(dev), ino);

    /*
     * Print the dentry name for named mappings, and a
     * special [heap] marker for the heap:
     */
    if (file) {
        seq_pad(m, ' ');
        seq_file_path(m, file, "\n");
        goto done;
    }

    if (vma->vm_ops && vma->vm_ops->name) {
        name = vma->vm_ops->name(vma);
        if (name)
            goto done;
    }

    name = arch_vma_name(vma);
    if (!name) {
        if (!mm) {
            name = "[vdso]";
            goto done;
        }

        if (vma->vm_start <= mm->brk &&
            vma->vm_end >= mm->start_brk) {
            name = "[heap]";
            goto done;
        }

        if (is_stack(priv, vma)) {
            name = "[stack]";
            goto done;
        }

        if (vma_get_anon_name(vma)) {
            seq_pad(m, ' ');
            seq_print_vma_name(m, vma);
        }
    }

done:
    if (name) {
        seq_pad(m, ' ');
        seq_puts(m, name);
    }
    seq_putc(m, '\n');
}

static int show_map(struct seq_file *m, void *v, int is_pid)
{
    show_map_vma(m, v, is_pid);
    m_cache_vma(m, v);
    return 0;
}

static int show_pid_map(struct seq_file *m, void *v)
{
    return show_map(m, v, 1);
}

static int show_tid_map(struct seq_file *m, void *v)
{
    return show_map(m, v, 0);
}

static const struct seq_operations proc_pid_maps_op = {
    .start    = m_start,
    .next    = m_next,
    .stop    = m_stop,
    .show    = show_pid_map
};

static const struct seq_operations proc_tid_maps_op = {
    .start    = m_start,
    .next    = m_next,
    .stop    = m_stop,
    .show    = show_tid_map
};

static int pid_maps_open(struct inode *inode, struct file *file)
{
    return do_maps_open(inode, file, &proc_pid_maps_op);
}

static int tid_maps_open(struct inode *inode, struct file *file)
{
    return do_maps_open(inode, file, &proc_tid_maps_op);
}

const struct file_operations proc_pid_maps_operations = {
    .open        = pid_maps_open,
    .read        = seq_read,
    .llseek        = seq_lseek,
    .release    = proc_map_release,
};

const struct file_operations proc_tid_maps_operations = {
    .open        = tid_maps_open,
    .read        = seq_read,
    .llseek        = seq_lseek,
    .release    = proc_map_release,
};

/*
 * Proportional Set Size(PSS): my share of RSS.
 *
 * PSS of a process is the count of pages it has in memory, where each
 * page is divided by the number of processes sharing it.  So if a
 * process has 1000 pages all to itself, and 1000 shared with one other
 * process, its PSS will be 1500.
 *
 * To keep (accumulated) division errors low, we adopt a 64bit
 * fixed-point pss counter to minimize division errors. So (pss >>
 * PSS_SHIFT) would be the real byte count.
 *
 * A shift of 12 before division means (assuming 4K page size):
 *     - 1M 3-user-pages add up to 8KB errors;
 *     - supports mapcount up to 2^24, or 16M;
 *     - supports PSS up to 2^52 bytes, or 4PB.
 */
#define PSS_SHIFT 12

#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
    unsigned long resident;
    unsigned long shared_clean;
    unsigned long shared_dirty;
    unsigned long private_clean;
    unsigned long private_dirty;
    unsigned long referenced;
    unsigned long anonymous;
    unsigned long anonymous_thp;
    unsigned long shmem_thp;
    unsigned long swap;
    unsigned long shared_hugetlb;
    unsigned long private_hugetlb;
    u64 pss;
    u64 swap_pss;
    bool check_shmem_swap;
};

static void smaps_account(struct mem_size_stats *mss, struct page *page,
        bool compound, bool young, bool dirty)
{
    int i, nr = compound ? 1 << compound_order(page) : 1;
    unsigned long size = nr * PAGE_SIZE;

    if (PageAnon(page))
        mss->anonymous += size;

    mss->resident += size;
    /* Accumulate the size in pages that have been accessed. */
    if (young || page_is_young(page) || PageReferenced(page))
        mss->referenced += size;

    /*
     * page_count(page) == 1 guarantees the page is mapped exactly once.
     * If any subpage of the compound page mapped with PTE it would elevate
     * page_count().
     */
    if (page_count(page) == 1) {
        if (dirty || PageDirty(page))
            mss->private_dirty += size;
        else
            mss->private_clean += size;
        mss->pss += (u64)size << PSS_SHIFT;
        return;
    }

    for (i = 0; i < nr; i++, page++) {
        int mapcount = page_mapcount(page);

        if (mapcount >= 2) {
            if (dirty || PageDirty(page))
                mss->shared_dirty += PAGE_SIZE;
            else
                mss->shared_clean += PAGE_SIZE;
            mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
        } else {
            if (dirty || PageDirty(page))
                mss->private_dirty += PAGE_SIZE;
            else
                mss->private_clean += PAGE_SIZE;
            mss->pss += PAGE_SIZE << PSS_SHIFT;
        }
    }
}

#ifdef CONFIG_SHMEM
static int smaps_pte_hole(unsigned long addr, unsigned long end,
        struct mm_walk *walk)
{
    struct mem_size_stats *mss = walk->private;

    mss->swap += shmem_partial_swap_usage(
            walk->vma->vm_file->f_mapping, addr, end);

    return 0;
}
#endif

static void smaps_pte_entry(pte_t *pte, unsigned long addr,
        struct mm_walk *walk)
{
    struct mem_size_stats *mss = walk->private;
    struct vm_area_struct *vma = walk->vma;
    struct page *page = NULL;

    if (pte_present(*pte)) {
        page = vm_normal_page(vma, addr, *pte);
    } else if (is_swap_pte(*pte)) {
        swp_entry_t swpent = pte_to_swp_entry(*pte);

        if (!non_swap_entry(swpent)) {
            int mapcount;

            mss->swap += PAGE_SIZE;
            mapcount = swp_swapcount(swpent);
            if (mapcount >= 2) {
                u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;

                do_div(pss_delta, mapcount);
                mss->swap_pss += pss_delta;
            } else {
                mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
            }
        } else if (is_migration_entry(swpent))
            page = migration_entry_to_page(swpent);
    } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
                            && pte_none(*pte))) {
        page = find_get_entry(vma->vm_file->f_mapping,
                        linear_page_index(vma, addr));
        if (!page)
            return;

        if (radix_tree_exceptional_entry(page))
            mss->swap += PAGE_SIZE;
        else
            put_page(page);

        return;
    }

    if (!page)
        return;

    smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
        struct mm_walk *walk)
{
    struct mem_size_stats *mss = walk->private;
    struct vm_area_struct *vma = walk->vma;
    struct page *page;

    /* FOLL_DUMP will return -EFAULT on huge zero page */
    page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
    if (IS_ERR_OR_NULL(page))
        return;
    if (PageAnon(page))
        mss->anonymous_thp += HPAGE_PMD_SIZE;
    else if (PageSwapBacked(page))
        mss->shmem_thp += HPAGE_PMD_SIZE;
    else if (is_zone_device_page(page))
        /* pass */;
    else
        VM_BUG_ON_PAGE(1, page);
    smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
        struct mm_walk *walk)
{
}
#endif

static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
               struct mm_walk *walk)
{
    struct vm_area_struct *vma = walk->vma;
    pte_t *pte;
    spinlock_t *ptl;

    ptl = pmd_trans_huge_lock(pmd, vma);
    if (ptl) {
        smaps_pmd_entry(pmd, addr, walk);
        spin_unlock(ptl);
        return 0;
    }

    if (pmd_trans_unstable(pmd))
        return 0;
    /*
     * The mmap_sem held all the way back in m_start() is what
     * keeps khugepaged out of here and from collapsing things
     * in here.
     */
    pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
    for (; addr != end; pte++, addr += PAGE_SIZE)
        smaps_pte_entry(pte, addr, walk);
    pte_unmap_unlock(pte - 1, ptl);
    cond_resched();
    return 0;
}

static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
{
    /*
     * Don't forget to update Documentation/ on changes.
     */
    static const char mnemonics[BITS_PER_LONG][2] = {
        /*
         * In case if we meet a flag we don't know about.
         */
        [0 ... (BITS_PER_LONG-1)] = "??",

        [ilog2(VM_READ)]    = "rd",
        [ilog2(VM_WRITE)]    = "wr",
        [ilog2(VM_EXEC)]    = "ex",
        [ilog2(VM_SHARED)]    = "sh",
        [ilog2(VM_MAYREAD)]    = "mr",
        [ilog2(VM_MAYWRITE)]    = "mw",
        [ilog2(VM_MAYEXEC)]    = "me",
        [ilog2(VM_MAYSHARE)]    = "ms",
        [ilog2(VM_GROWSDOWN)]    = "gd",
        [ilog2(VM_PFNMAP)]    = "pf",
        [ilog2(VM_DENYWRITE)]    = "dw",
#ifdef CONFIG_X86_INTEL_MPX
        [ilog2(VM_MPX)]        = "mp",
#endif
        [ilog2(VM_LOCKED)]    = "lo",
        [ilog2(VM_IO)]        = "io",
        [ilog2(VM_SEQ_READ)]    = "sr",
        [ilog2(VM_RAND_READ)]    = "rr",
        [ilog2(VM_DONTCOPY)]    = "dc",
        [ilog2(VM_DONTEXPAND)]    = "de",
        [ilog2(VM_ACCOUNT)]    = "ac",
        [ilog2(VM_NORESERVE)]    = "nr",
        [ilog2(VM_HUGETLB)]    = "ht",
        [ilog2(VM_ARCH_1)]    = "ar",
        [ilog2(VM_DONTDUMP)]    = "dd",
#ifdef CONFIG_MEM_SOFT_DIRTY
        [ilog2(VM_SOFTDIRTY)]    = "sd",
#endif
        [ilog2(VM_MIXEDMAP)]    = "mm",
        [ilog2(VM_HUGEPAGE)]    = "hg",
        [ilog2(VM_NOHUGEPAGE)]    = "nh",
        [ilog2(VM_MERGEABLE)]    = "mg",
        [ilog2(VM_UFFD_MISSING)]= "um",
        [ilog2(VM_UFFD_WP)]    = "uw",
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
        /* These come out via ProtectionKey: */
        [ilog2(VM_PKEY_BIT0)]    = "",
        [ilog2(VM_PKEY_BIT1)]    = "",
        [ilog2(VM_PKEY_BIT2)]    = "",
        [ilog2(VM_PKEY_BIT3)]    = "",
#endif
    };
    size_t i;

    seq_puts(m, "VmFlags: ");
    for (i = 0; i < BITS_PER_LONG; i++) {
        if (!mnemonics[i][0])
            continue;
        if (vma->vm_flags & (1UL << i)) {
            seq_printf(m, "%c%c ",
                   mnemonics[i][0], mnemonics[i][1]);
        }
    }
    seq_putc(m, '\n');
}

#ifdef CONFIG_HUGETLB_PAGE
static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
                 unsigned long addr, unsigned long end,
                 struct mm_walk *walk)
{
    struct mem_size_stats *mss = walk->private;
    struct vm_area_struct *vma = walk->vma;
    struct page *page = NULL;

    if (pte_present(*pte)) {
        page = vm_normal_page(vma, addr, *pte);
    } else if (is_swap_pte(*pte)) {
        swp_entry_t swpent = pte_to_swp_entry(*pte);

        if (is_migration_entry(swpent))
            page = migration_entry_to_page(swpent);
    }
    if (page) {
        int mapcount = page_mapcount(page);

        if (mapcount >= 2)
            mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
        else
            mss->private_hugetlb += huge_page_size(hstate_vma(vma));
    }
    return 0;
}
#endif /* HUGETLB_PAGE */

void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
{
}

static int show_smap(struct seq_file *m, void *v, int is_pid)
{
    struct vm_area_struct *vma = v;
    struct mem_size_stats mss;
    struct mm_walk smaps_walk = {
        .pmd_entry = smaps_pte_range,
#ifdef CONFIG_HUGETLB_PAGE
        .hugetlb_entry = smaps_hugetlb_range,
#endif
        .mm = vma->vm_mm,
        .private = &mss,
    };

    memset(&mss, 0, sizeof mss);

#ifdef CONFIG_SHMEM
    if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
        /*
         * For shared or readonly shmem mappings we know that all
         * swapped out pages belong to the shmem object, and we can
         * obtain the swap value much more efficiently. For private
         * writable mappings, we might have COW pages that are
         * not affected by the parent swapped out pages of the shmem
         * object, so we have to distinguish them during the page walk.
         * Unless we know that the shmem object (or the part mapped by
         * our VMA) has no swapped out pages at all.
         */
        unsigned long shmem_swapped = shmem_swap_usage(vma);

        if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
                    !(vma->vm_flags & VM_WRITE)) {
            mss.swap = shmem_swapped;
        } else {
            mss.check_shmem_swap = true;
            smaps_walk.pte_hole = smaps_pte_hole;
        }
    }
#endif

    /* mmap_sem is held in m_start */
    walk_page_vma(vma, &smaps_walk);

    show_map_vma(m, vma, is_pid);

    if (vma_get_anon_name(vma)) {
        seq_puts(m, "Name:           ");
        seq_print_vma_name(m, vma);
        seq_putc(m, '\n');
    }

    seq_printf(m,
           "Size:           %8lu kB\n"
           "Rss:            %8lu kB\n"
           "Pss:            %8lu kB\n"
           "Shared_Clean:   %8lu kB\n"
           "Shared_Dirty:   %8lu kB\n"
           "Private_Clean:  %8lu kB\n"
           "Private_Dirty:  %8lu kB\n"
           "Referenced:     %8lu kB\n"
           "Anonymous:      %8lu kB\n"
           "AnonHugePages:  %8lu kB\n"
           "ShmemPmdMapped: %8lu kB\n"
           "Shared_Hugetlb: %8lu kB\n"
           "Private_Hugetlb: %7lu kB\n"
           "Swap:           %8lu kB\n"
           "SwapPss:        %8lu kB\n"
           "KernelPageSize: %8lu kB\n"
           "MMUPageSize:    %8lu kB\n"
           "Locked:         %8lu kB\n",
           (vma->vm_end - vma->vm_start) >> 10,
           mss.resident >> 10,
           (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
           mss.shared_clean  >> 10,
           mss.shared_dirty  >> 10,
           mss.private_clean >> 10,
           mss.private_dirty >> 10,
           mss.referenced >> 10,
           mss.anonymous >> 10,
           mss.anonymous_thp >> 10,
           mss.shmem_thp >> 10,
           mss.shared_hugetlb >> 10,
           mss.private_hugetlb >> 10,
           mss.swap >> 10,
           (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
           vma_kernel_pagesize(vma) >> 10,
           vma_mmu_pagesize(vma) >> 10,
           (vma->vm_flags & VM_LOCKED) ?
            (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);

    arch_show_smap(m, vma);
    show_smap_vma_flags(m, vma);
    m_cache_vma(m, vma);
    return 0;
}

static int show_pid_smap(struct seq_file *m, void *v)
{
    return show_smap(m, v, 1);
}

static int show_tid_smap(struct seq_file *m, void *v)
{
    return show_smap(m, v, 0);
}

static const struct seq_operations proc_pid_smaps_op = {
    .start    = m_start,
    .next    = m_next,
    .stop    = m_stop,
    .show    = show_pid_smap
};

static const struct seq_operations proc_tid_smaps_op = {
    .start    = m_start,
    .next    = m_next,
    .stop    = m_stop,
    .show    = show_tid_smap
};

static int pid_smaps_open(struct inode *inode, struct file *file)
{
    return do_maps_open(inode, file, &proc_pid_smaps_op);
}

static int tid_smaps_open(struct inode *inode, struct file *file)
{
    return do_maps_open(inode, file, &proc_tid_smaps_op);
}

const struct file_operations proc_pid_smaps_operations = {
    .open        = pid_smaps_open,
    .read        = seq_read,
    .llseek        = seq_lseek,
    .release    = proc_map_release,
};

const struct file_operations proc_tid_smaps_operations = {
    .open        = tid_smaps_open,
    .read        = seq_read,
    .llseek        = seq_lseek,
    .release    = proc_map_release,
};

enum clear_refs_types {
    CLEAR_REFS_ALL = 1,
    CLEAR_REFS_ANON,
    CLEAR_REFS_MAPPED,
    CLEAR_REFS_SOFT_DIRTY,
    CLEAR_REFS_MM_HIWATER_RSS,
    CLEAR_REFS_LAST,
};

struct clear_refs_private {
    enum clear_refs_types type;
};

#ifdef CONFIG_MEM_SOFT_DIRTY
static inline void clear_soft_dirty(struct vm_area_struct *vma,
        unsigned long addr, pte_t *pte)
{
    /*
     * The soft-dirty tracker uses #PF-s to catch writes
     * to pages, so write-protect the pte as well. See the
     * Documentation/vm/soft-dirty.txt for full description
     * of how soft-dirty works.
     */
    pte_t ptent = *pte;

    if (pte_present(ptent)) {
        ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
        ptent = pte_wrprotect(ptent);
        ptent = pte_clear_soft_dirty(ptent);
        ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
    } else if (is_swap_pte(ptent)) {
        ptent = pte_swp_clear_soft_dirty(ptent);
        set_pte_at(vma->vm_mm, addr, pte, ptent);
    }
}
#else
static inline void clear_soft_dirty(struct vm_area_struct *vma,
        unsigned long addr, pte_t *pte)
{
}
#endif

#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
        unsigned long addr, pmd_t *pmdp)
{
    pmd_t pmd = *pmdp;

    /* See comment in change_huge_pmd() */
    pmdp_invalidate(vma, addr, pmdp);
    if (pmd_dirty(*pmdp))
        pmd = pmd_mkdirty(pmd);
    if (pmd_young(*pmdp))
        pmd = pmd_mkyoung(pmd);

    pmd = pmd_wrprotect(pmd);
    pmd = pmd_clear_soft_dirty(pmd);

    set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
#else
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
        unsigned long addr, pmd_t *pmdp)
{
}
#endif

static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
                unsigned long end, struct mm_walk *walk)
{
    struct clear_refs_private *cp = walk->private;
    struct vm_area_struct *vma = walk->vma;
    pte_t *pte, ptent;
    spinlock_t *ptl;
    struct page *page;

    ptl = pmd_trans_huge_lock(pmd, vma);
    if (ptl) {
        if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
            clear_soft_dirty_pmd(vma, addr, pmd);
            goto out;
        }

        page = pmd_page(*pmd);

        /* Clear accessed and referenced bits. */
        pmdp_test_and_clear_young(vma, addr, pmd);
        test_and_clear_page_young(page);
        ClearPageReferenced(page);
out:
        spin_unlock(ptl);
        return 0;
    }

    if (pmd_trans_unstable(pmd))
        return 0;

    pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
    for (; addr != end; pte++, addr += PAGE_SIZE) {
        ptent = *pte;

        if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
            clear_soft_dirty(vma, addr, pte);
            continue;
        }

        if (!pte_present(ptent))
            continue;

        page = vm_normal_page(vma, addr, ptent);
        if (!page)
            continue;

        /* Clear accessed and referenced bits. */
        ptep_test_and_clear_young(vma, addr, pte);
        test_and_clear_page_young(page);
        ClearPageReferenced(page);
    }
    pte_unmap_unlock(pte - 1, ptl);
    cond_resched();
    return 0;
}

static int clear_refs_test_walk(unsigned long start, unsigned long end,
                struct mm_walk *walk)
{
    struct clear_refs_private *cp = walk->private;
    struct vm_area_struct *vma = walk->vma;

    if (vma->vm_flags & VM_PFNMAP)
        return 1;

    /*
     * Writing 1 to /proc/pid/clear_refs affects all pages.
     * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
     * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
     * Writing 4 to /proc/pid/clear_refs affects all pages.
     */
    if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
        return 1;
    if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
        return 1;
    return 0;
}

static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                size_t count, loff_t *ppos)
{
    struct task_struct *task;
    char buffer[PROC_NUMBUF];
    struct mm_struct *mm;
    struct vm_area_struct *vma;
    enum clear_refs_types type;
    int itype;
    int rv;

    memset(buffer, 0, sizeof(buffer));
    if (count > sizeof(buffer) - 1)
        count = sizeof(buffer) - 1;
    if (copy_from_user(buffer, buf, count))
        return -EFAULT;
    rv = kstrtoint(strstrip(buffer), 10, &itype);
    if (rv < 0)
        return rv;
    type = (enum clear_refs_types)itype;
    if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
        return -EINVAL;

    task = get_proc_task(file_inode(file));
    if (!task)
        return -ESRCH;
    mm = get_task_mm(task);
    if (mm) {
        struct clear_refs_private cp = {
            .type = type,
        };
        struct mm_walk clear_refs_walk = {
            .pmd_entry = clear_refs_pte_range,
            .test_walk = clear_refs_test_walk,
            .mm = mm,
            .private = &cp,
        };

        if (type == CLEAR_REFS_MM_HIWATER_RSS) {
            if (down_write_killable(&mm->mmap_sem)) {
                count = -EINTR;
                goto out_mm;
            }

            /*
             * Writing 5 to /proc/pid/clear_refs resets the peak
             * resident set size to this mm's current rss value.
             */
            reset_mm_hiwater_rss(mm);
            up_write(&mm->mmap_sem);
            goto out_mm;
        }

        down_read(&mm->mmap_sem);
        if (type == CLEAR_REFS_SOFT_DIRTY) {
            for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (!(vma->vm_flags & VM_SOFTDIRTY))
                    continue;
                up_read(&mm->mmap_sem);
                if (down_write_killable(&mm->mmap_sem)) {
                    count = -EINTR;
                    goto out_mm;
                }
                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                    vma->vm_flags &= ~VM_SOFTDIRTY;
                    vma_set_page_prot(vma);
                }
                downgrade_write(&mm->mmap_sem);
                break;
            }
            mmu_notifier_invalidate_range_start(mm, 0, -1);
        }
        walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
        if (type == CLEAR_REFS_SOFT_DIRTY)
            mmu_notifier_invalidate_range_end(mm, 0, -1);
        flush_tlb_mm(mm);
        up_read(&mm->mmap_sem);
out_mm:
        mmput(mm);
    }
    put_task_struct(task);

    return count;
}

const struct file_operations proc_clear_refs_operations = {
    .write        = clear_refs_write,
    .llseek        = noop_llseek,
};

typedef struct {
    u64 pme;
} pagemap_entry_t;

struct pagemapread {
    int pos, len;        /* units: PM_ENTRY_BYTES, not bytes */
    pagemap_entry_t *buffer;
    bool show_pfn;
};

#define PAGEMAP_WALK_SIZE    (PMD_SIZE)
#define PAGEMAP_WALK_MASK    (PMD_MASK)

#define PM_ENTRY_BYTES        sizeof(pagemap_entry_t)
#define PM_PFRAME_BITS        55
#define PM_PFRAME_MASK        GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
#define PM_SOFT_DIRTY        BIT_ULL(55)
#define PM_MMAP_EXCLUSIVE    BIT_ULL(56)
#define PM_FILE            BIT_ULL(61)
#define PM_SWAP            BIT_ULL(62)
#define PM_PRESENT        BIT_ULL(63)

#define PM_END_OF_BUFFER    1

static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
{
    return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
}

static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
              struct pagemapread *pm)
{
    pm->buffer[pm->pos++] = *pme;
    if (pm->pos >= pm->len)
        return PM_END_OF_BUFFER;
    return 0;
}

static int pagemap_pte_hole(unsigned long start, unsigned long end,
                struct mm_walk *walk)
{
    struct pagemapread *pm = walk->private;
    unsigned long addr = start;
    int err = 0;


    //dump_stack();

    while (addr < end) {
        struct vm_area_struct *vma = find_vma(walk->mm, addr);
        pagemap_entry_t pme = make_pme(0, 0);
        /* End of address space hole, which we mark as non-present. */
        unsigned long hole_end;

        if (vma)
            hole_end = min(end, vma->vm_start);
        else
            hole_end = end;

        for (; addr < hole_end; addr += PAGE_SIZE) {
            err = add_to_pagemap(addr, &pme, pm);
            if (err)
                goto out;
        }

        if (!vma)
            break;

        /* Addresses in the VMA. */
        if (vma->vm_flags & VM_SOFTDIRTY)
            pme = make_pme(0, PM_SOFT_DIRTY);
        for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
            err = add_to_pagemap(addr, &pme, pm);
            if (err)
                goto out;
        }
    }
out:
    return err;
}


#if 0
 CPU: 0 PID: 3363 Comm: mytest Tainted: G           O    4.9.118 #1465
[   98.979100] Hardware name: sun8iw15
[   98.982981] [<c01112c8>] (unwind_backtrace) from [<c010cd7c>] (show_stack+0x20/0x24)
[   98.991604] [<c010cd7c>] (show_stack) from [<c0484e10>] (dump_stack+0x78/0x94)
[   98.999652] [<c0484e10>] (dump_stack) from [<c02f782c>] (pagemap_pmd_range+0xb4/0x2a8)
[   99.008467] [<c02f782c>] (pagemap_pmd_range) from [<c0270c7c>] (walk_pgd_range+0x108/0x184)
[   99.017758] [<c0270c7c>] (walk_pgd_range) from [<c0270e38>] (walk_page_range+0xe0/0x104)
[   99.026761] [<c0270e38>] (walk_page_range) from [<c02f7bc8>] (pagemap_read+0x1a8/0x2e0)
[   99.035672] [<c02f7bc8>] (pagemap_read) from [<c02905b8>] (__vfs_read+0x48/0x13c)
[   99.044003] [<c02905b8>] (__vfs_read) from [<c02913a0>] (vfs_read+0xa0/0x154)
[   99.051938] [<c02913a0>] (vfs_read) from [<c0292470>] (SyS_read+0x60/0xb0)
[   99.059596] [<c0292470>] (SyS_read) from [<c0107f40>] (ret_fast_syscall+0x0/0x48)
#endif


static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
        struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
    u64 frame = 0, flags = 0;
    struct page *page = NULL;


    //printk("addr = %lx\n", addr);
    //printk("pte = %lx\n", pte);

    //dump_stack();

    

    if (pte_present(pte)) {
        //printk("pte.0\n");


        //printk("pm->show_pfn = %lx\n", pm->show_pfn);
        
        if (pm->show_pfn)
            frame = pte_pfn(pte);
        flags |= PM_PRESENT;
        page = vm_normal_page(vma, addr, pte);

        printk("addr = %lx\n", addr);
        printk("page = %lx\n", page);

        void *kaddr = kmap(page);
        kaddr = (unsigned int)kaddr | (addr & 0xfff);
        printk("kaddr = 0x%lx\n", kaddr);
        print_hex_dump_bytes("walk.data: ", DUMP_PREFIX_ADDRESS, (u8 *)(kaddr), 0x40 );
        kunmap(page);

            

        
        if (pte_soft_dirty(pte))
            flags |= PM_SOFT_DIRTY;
    } else if (is_swap_pte(pte)) {
        printk("pte.1\n");
        swp_entry_t entry;
        if (pte_swp_soft_dirty(pte))
            flags |= PM_SOFT_DIRTY;
        entry = pte_to_swp_entry(pte);
        frame = swp_type(entry) |
            (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
        flags |= PM_SWAP;
        if (is_migration_entry(entry))
            page = migration_entry_to_page(entry);
    }

    if (page && !PageAnon(page))
        flags |= PM_FILE;
    if (page && page_mapcount(page) == 1)
        flags |= PM_MMAP_EXCLUSIVE;
    if (vma->vm_flags & VM_SOFTDIRTY)
        flags |= PM_SOFT_DIRTY;

    return make_pme(frame, flags);
}

static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
                 struct mm_walk *walk)
{
    struct vm_area_struct *vma = walk->vma;
    struct pagemapread *pm = walk->private;
    spinlock_t *ptl;
    pte_t *pte, *orig_pte;
    int err = 0;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE

kkkk
    ptl = pmd_trans_huge_lock(pmdp, vma);
    if (ptl) {
        u64 flags = 0, frame = 0;
        pmd_t pmd = *pmdp;

        if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
            flags |= PM_SOFT_DIRTY;

        /*
         * Currently pmd for thp is always present because thp
         * can not be swapped-out, migrated, or HWPOISONed
         * (split in such cases instead.)
         * This if-check is just to prepare for future implementation.
         */
        if (pmd_present(pmd)) {
            struct page *page = pmd_page(pmd);

            if (page_mapcount(page) == 1)
                flags |= PM_MMAP_EXCLUSIVE;

            flags |= PM_PRESENT;
            if (pm->show_pfn)
                frame = pmd_pfn(pmd) +
                    ((addr & ~PMD_MASK) >> PAGE_SHIFT);
        }

        for (; addr != end; addr += PAGE_SIZE) {
            pagemap_entry_t pme = make_pme(frame, flags);

            err = add_to_pagemap(addr, &pme, pm);
            if (err)
                break;
            if (pm->show_pfn && (flags & PM_PRESENT))
                frame++;
        }
        spin_unlock(ptl);
        return err;
    }

    if (pmd_trans_unstable(pmdp))
        return 0;
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

    /*
     * We can assume that @vma always points to a valid one and @end never
     * goes beyond vma->vm_end.
     */
    orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);


    printk("\n\npagemap_pmd_range start...\n");
    for (; addr < end; pte++, addr += PAGE_SIZE) {
        pagemap_entry_t pme;


        printk("addr = %lx\n", addr);
        printk("end = %lx\n", end);
        printk("pte = %p\n", pte);
        printk("*pte = %lx\n", *pte);

        

        pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
        err = add_to_pagemap(addr, &pme, pm);
        if (!err)
            break;
    }
    printk("pagemap_pmd_range end!!! err = %d\n\n", err);
    pte_unmap_unlock(orig_pte, ptl);

    cond_resched();

    return err;
}

#ifdef CONFIG_HUGETLB_PAGE
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
                 unsigned long addr, unsigned long end,
                 struct mm_walk *walk)
{
    struct pagemapread *pm = walk->private;
    struct vm_area_struct *vma = walk->vma;
    u64 flags = 0, frame = 0;
    int err = 0;
    pte_t pte;

    if (vma->vm_flags & VM_SOFTDIRTY)
        flags |= PM_SOFT_DIRTY;

    pte = huge_ptep_get(ptep);
    if (pte_present(pte)) {
        struct page *page = pte_page(pte);

        if (!PageAnon(page))
            flags |= PM_FILE;

        if (page_mapcount(page) == 1)
            flags |= PM_MMAP_EXCLUSIVE;

        flags |= PM_PRESENT;
        if (pm->show_pfn)
            frame = pte_pfn(pte) +
                ((addr & ~hmask) >> PAGE_SHIFT);
    }

    for (; addr != end; addr += PAGE_SIZE) {
        pagemap_entry_t pme = make_pme(frame, flags);

        err = add_to_pagemap(addr, &pme, pm);
        if (err)
            return err;
        if (pm->show_pfn && (flags & PM_PRESENT))
            frame++;
    }

    cond_resched();

    return err;
}
#endif /* HUGETLB_PAGE */

extern int debug_en_pagewalk;    


/*
 * /proc/pid/pagemap - an array mapping virtual pages to pfns
 *
 * For each page in the address space, this file contains one 64-bit entry
 * consisting of the following:
 *
 * Bits 0-54  page frame number (PFN) if present
 * Bits 0-4   swap type if swapped
 * Bits 5-54  swap offset if swapped
 * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
 * Bit  56    page exclusively mapped
 * Bits 57-60 zero
 * Bit  61    page is file-page or shared-anon
 * Bit  62    page swapped
 * Bit  63    page present
 *
 * If the page is not present but in swap, then the PFN contains an
 * encoding of the swap file number and the page's offset into the
 * swap. Unmapped pages return a null PFN. This allows determining
 * precisely which pages are mapped (or in swap) and comparing mapped
 * pages between processes.
 *
 * Efficient users of this interface will use /proc/pid/maps to
 * determine which areas of memory are actually mapped and llseek to
 * skip over unmapped regions.
 */
static ssize_t pagemap_read(struct file *file, char __user *buf,
                size_t count, loff_t *ppos)
{
    struct mm_struct *mm = file->private_data;
    struct pagemapread pm;
    struct mm_walk pagemap_walk = {};
    unsigned long src;
    unsigned long svpfn;
    unsigned long start_vaddr;
    unsigned long end_vaddr;
    int ret = 0, copied = 0;
    struct task_struct    *task;

    

    printk("\n\n\n\n\n*****************************************************\n");

    printk("in pagemap_read\n");

    printk("buf = %p\n", buf);
    printk("count = %x\n", count);
    printk("ppos = %p\n", ppos);
    printk("*ppos = %llx\n", *ppos);


    task = mm->owner;


    printk("task->comm = %s\n", task->comm);
    

    

    

    
    //task = current;
    //if( task->comm[0]=='c' && task->comm[1]=='a' && task->comm[2]=='t' ){
      //  debug_en = 0x01;  


    

    //printk("mm->mm_users = %lx\n", mm->mm_users);
    //printk("mm = %p\n", mm);

    

    if (!mm || !atomic_inc_not_zero(&mm->mm_users))
        goto out;

    //printk("mm->mm_users = %lx\n", mm->mm_users);
    

    ret = -EINVAL;
    /* file position must be aligned */
    if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
        goto out_mm;

    ret = 0;
    if (!count)
        goto out_mm;

    /* do not disclose physical addresses: attack vector */
    pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);

    pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
    pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
    ret = -ENOMEM;
    if (!pm.buffer)
        goto out_mm;

    pagemap_walk.pmd_entry = pagemap_pmd_range;
    pagemap_walk.pte_hole = pagemap_pte_hole;
#ifdef CONFIG_HUGETLB_PAGE
    pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
#endif
    pagemap_walk.mm = mm;
    pagemap_walk.private = &pm;

    src = *ppos;
    svpfn = src / PM_ENTRY_BYTES;
    start_vaddr = svpfn << PAGE_SHIFT;
    end_vaddr = mm->task_size;

    /* watch out for wraparound */
    if (svpfn > mm->task_size >> PAGE_SHIFT)
        start_vaddr = end_vaddr;

    /*
     * The odds are that this will stop walking way
     * before end_vaddr, because the length of the
     * user buffer is tracked in "pm", and the walk
     * will stop when we hit the end of the buffer.
     */
    ret = 0;


    //printk("pm.buffer = %p\n", pm.buffer);
    //printk("start_vaddr = %lx\n", start_vaddr);
    //printk("end_vaddr = %lx\n", end_vaddr);

    
        


    while (count && (start_vaddr < end_vaddr)) {
        int len;
        unsigned long end;

        pm.pos = 0;
        end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;

        //PAGEMAP_WALK_SIZE = 200000
        //PAGEMAP_WALK_MASK = ffe00000

        //printk("PAGEMAP_WALK_SIZE = %lx\n", PAGEMAP_WALK_SIZE);
        //printk("PAGEMAP_WALK_MASK = %lx\n", PAGEMAP_WALK_MASK);

        
        /* overflow ? */
        if (end < start_vaddr || end > end_vaddr)
            end = end_vaddr;


        
        
        down_read(&mm->mmap_sem);


        //printk("task_mmu start_vaddr = %lx\n", start_vaddr);
        //printk("task_mmu end = %lx\n", end);
        ret = walk_page_range(start_vaddr, end, &pagemap_walk);
        up_read(&mm->mmap_sem);
        start_vaddr = end;

        len = min(count, PM_ENTRY_BYTES * pm.pos);

        //printk("PM_ENTRY_BYTES = %lx\n", PM_ENTRY_BYTES);
        //PM_ENTRY_BYTES = 8
        //printk("pm.pos = %lx\n", pm.pos);
        //printk("count = %lx\n", count);
        
        //printk("len = %lx\n", len);

        

        

        

        
        
        if (copy_to_user(buf, pm.buffer, len)) {
            ret = -EFAULT;
            goto out_free;
        }
        copied += len;
        buf += len;
        count -= len;
    }
    *ppos += copied;
    if (!ret || ret == PM_END_OF_BUFFER)
        ret = copied;

out_free:
    kfree(pm.buffer);
out_mm:
    mmput(mm);
out:
    debug_en_pagewalk = 0x00; 
    return ret;
}

static int pagemap_open(struct inode *inode, struct file *file)
{
    struct mm_struct *mm;

    mm = proc_mem_open(inode, PTRACE_MODE_READ);
    if (IS_ERR(mm))
        return PTR_ERR(mm);
    file->private_data = mm;
    return 0;
}

static int pagemap_release(struct inode *inode, struct file *file)
{
    struct mm_struct *mm = file->private_data;

    if (mm)
        mmdrop(mm);
    return 0;
}

const struct file_operations proc_pagemap_operations = {
    .llseek        = mem_lseek, /* borrow this */
    .read        = pagemap_read,
    .open        = pagemap_open,
    .release    = pagemap_release,
};
#endif /* CONFIG_PROC_PAGE_MONITOR */

#ifdef CONFIG_NUMA

struct numa_maps {
    unsigned long pages;
    unsigned long anon;
    unsigned long active;
    unsigned long writeback;
    unsigned long mapcount_max;
    unsigned long dirty;
    unsigned long swapcache;
    unsigned long node[MAX_NUMNODES];
};

struct numa_maps_private {
    struct proc_maps_private proc_maps;
    struct numa_maps md;
};

static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
            unsigned long nr_pages)
{
    int count = page_mapcount(page);

    md->pages += nr_pages;
    if (pte_dirty || PageDirty(page))
        md->dirty += nr_pages;

    if (PageSwapCache(page))
        md->swapcache += nr_pages;

    if (PageActive(page) || PageUnevictable(page))
        md->active += nr_pages;

    if (PageWriteback(page))
        md->writeback += nr_pages;

    if (PageAnon(page))
        md->anon += nr_pages;

    if (count > md->mapcount_max)
        md->mapcount_max = count;

    md->node[page_to_nid(page)] += nr_pages;
}

static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
        unsigned long addr)
{
    struct page *page;
    int nid;

    if (!pte_present(pte))
        return NULL;

    page = vm_normal_page(vma, addr, pte);
    if (!page)
        return NULL;

    if (PageReserved(page))
        return NULL;

    nid = page_to_nid(page);
    if (!node_isset(nid, node_states[N_MEMORY]))
        return NULL;

    return page;
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
                          struct vm_area_struct *vma,
                          unsigned long addr)
{
    struct page *page;
    int nid;

    if (!pmd_present(pmd))
        return NULL;

    page = vm_normal_page_pmd(vma, addr, pmd);
    if (!page)
        return NULL;

    if (PageReserved(page))
        return NULL;

    nid = page_to_nid(page);
    if (!node_isset(nid, node_states[N_MEMORY]))
        return NULL;

    return page;
}
#endif

static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
        unsigned long end, struct mm_walk *walk)
{
    struct numa_maps *md = walk->private;
    struct vm_area_struct *vma = walk->vma;
    spinlock_t *ptl;
    pte_t *orig_pte;
    pte_t *pte;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    ptl = pmd_trans_huge_lock(pmd, vma);
    if (ptl) {
        struct page *page;

        page = can_gather_numa_stats_pmd(*pmd, vma, addr);
        if (page)
            gather_stats(page, md, pmd_dirty(*pmd),
                     HPAGE_PMD_SIZE/PAGE_SIZE);
        spin_unlock(ptl);
        return 0;
    }

    if (pmd_trans_unstable(pmd))
        return 0;
#endif
    orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
    do {
        struct page *page = can_gather_numa_stats(*pte, vma, addr);
        if (!page)
            continue;
        gather_stats(page, md, pte_dirty(*pte), 1);

    } while (pte++, addr += PAGE_SIZE, addr != end);
    pte_unmap_unlock(orig_pte, ptl);
    return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
        unsigned long addr, unsigned long end, struct mm_walk *walk)
{
    pte_t huge_pte = huge_ptep_get(pte);
    struct numa_maps *md;
    struct page *page;

    if (!pte_present(huge_pte))
        return 0;

    page = pte_page(huge_pte);
    if (!page)
        return 0;

    md = walk->private;
    gather_stats(page, md, pte_dirty(huge_pte), 1);
    return 0;
}

#else
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
        unsigned long addr, unsigned long end, struct mm_walk *walk)
{
    return 0;
}
#endif

/*
 * Display pages allocated per node and memory policy via /proc.
 */
static int show_numa_map(struct seq_file *m, void *v, int is_pid)
{
    struct numa_maps_private *numa_priv = m->private;
    struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
    struct vm_area_struct *vma = v;
    struct numa_maps *md = &numa_priv->md;
    struct file *file = vma->vm_file;
    struct mm_struct *mm = vma->vm_mm;
    struct mm_walk walk = {
        .hugetlb_entry = gather_hugetlb_stats,
        .pmd_entry = gather_pte_stats,
        .private = md,
        .mm = mm,
    };
    struct mempolicy *pol;
    char buffer[64];
    int nid;

    if (!mm)
        return 0;

    /* Ensure we start with an empty set of numa_maps statistics. */
    memset(md, 0, sizeof(*md));

    pol = __get_vma_policy(vma, vma->vm_start);
    if (pol) {
        mpol_to_str(buffer, sizeof(buffer), pol);
        mpol_cond_put(pol);
    } else {
        mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
    }

    seq_printf(m, "%08lx %s", vma->vm_start, buffer);

    if (file) {
        seq_puts(m, " file=");
        seq_file_path(m, file, "\n\t= ");
    } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
        seq_puts(m, " heap");
    } else if (is_stack(proc_priv, vma)) {
        seq_puts(m, " stack");
    }

    if (is_vm_hugetlb_page(vma))
        seq_puts(m, " huge");

    /* mmap_sem is held by m_start */
    walk_page_vma(vma, &walk);

    if (!md->pages)
        goto out;

    if (md->anon)
        seq_printf(m, " anon=%lu", md->anon);

    if (md->dirty)
        seq_printf(m, " dirty=%lu", md->dirty);

    if (md->pages != md->anon && md->pages != md->dirty)
        seq_printf(m, " mapped=%lu", md->pages);

    if (md->mapcount_max > 1)
        seq_printf(m, " mapmax=%lu", md->mapcount_max);

    if (md->swapcache)
        seq_printf(m, " swapcache=%lu", md->swapcache);

    if (md->active < md->pages && !is_vm_hugetlb_page(vma))
        seq_printf(m, " active=%lu", md->active);

    if (md->writeback)
        seq_printf(m, " writeback=%lu", md->writeback);

    for_each_node_state(nid, N_MEMORY)
        if (md->node[nid])
            seq_printf(m, " N%d=%lu", nid, md->node[nid]);

    seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
out:
    seq_putc(m, '\n');
    m_cache_vma(m, vma);
    return 0;
}

static int show_pid_numa_map(struct seq_file *m, void *v)
{
    return show_numa_map(m, v, 1);
}

static int show_tid_numa_map(struct seq_file *m, void *v)
{
    return show_numa_map(m, v, 0);
}

static const struct seq_operations proc_pid_numa_maps_op = {
    .start  = m_start,
    .next   = m_next,
    .stop   = m_stop,
    .show   = show_pid_numa_map,
};

static const struct seq_operations proc_tid_numa_maps_op = {
    .start  = m_start,
    .next   = m_next,
    .stop   = m_stop,
    .show   = show_tid_numa_map,
};

static int numa_maps_open(struct inode *inode, struct file *file,
              const struct seq_operations *ops)
{
    return proc_maps_open(inode, file, ops,
                sizeof(struct numa_maps_private));
}

static int pid_numa_maps_open(struct inode *inode, struct file *file)
{
    return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
}

static int tid_numa_maps_open(struct inode *inode, struct file *file)
{
    return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
}

const struct file_operations proc_pid_numa_maps_operations = {
    .open        = pid_numa_maps_open,
    .read        = seq_read,
    .llseek        = seq_lseek,
    .release    = proc_map_release,
};

const struct file_operations proc_tid_numa_maps_operations = {
    .open        = tid_numa_maps_open,
    .read        = seq_read,
    .llseek        = seq_lseek,
    .release    = proc_map_release,
};
#endif /* CONFIG_NUMA */
 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值