Linux物理内存描述三个层级

Linux物理内存描述三个层级的struct:

pglist_data//描述内存节点

zone//描述节点内的分区,有normal、DMA、highmem

page//描述一页,通常为4K大小

各结构体成员的具体含义,详见下面代码中的注释,英文注释清晰处请直接参考

/*
 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
 * (mostly NUMA machines?) to denote a higher-level memory zone than the
 * zone denotes.
 *
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
 * it's memory layout.
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
        struct zone node_zones[MAX_NR_ZONES];//该节点内的内存区
	struct zonelist node_zonelists[MAX_ZONELISTS];//节点的备用内存区,也就是所有节点的内存区链表,当该节点没有可用的内存时,就从备用内存区分配;事实上,除非分配内存时指定了GFP_THISNODE标志,否则均从备用内存区内存区分配,选择的优先顺序是Highmem>Normal>DMA
	/*可用内存区数目*/
        int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
	struct page *node_mem_map;//本节点第一个页面的描述符指针
#endif
#ifndef CONFIG_NO_BOOTMEM
	struct bootmem_data *bdata;//内核启动阶段Bootmem分配器用来管理内存的struct,其成员node_bootmem_map是bit map指针,每一个bit描述一个页是否已经被使用
#endif

	unsigned long node_start_pfn;//该节点内起始页面的帧号,即该节点在全局mem_map中的index
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	wait_queue_head_t kswapd_wait;//该节点的页交换守护进程的等待队列,在节点中的页需要换出时使用
	struct task_struct *kswapd;//负该节点的页交换的守护进程
	int kswapd_max_order;//最大可交换的页数
} pg_data_t;
struct zone {
	/* Fields commonly accessed by the page allocator */

	/* zone watermarks, access with *_wmark_pages(zone) macros */
	unsigned long watermark[NR_WMARK];//管理区的三个水线值:高水线、低水线、MIN水线

#ifdef CONFIG_NUMA
	int node;//该内存区所属的节点
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
	unsigned long		min_unmapped_pages;//可回收页面数超过此值启动回收
	unsigned long		min_slab_pages;//本管理区中,用于slab的可回收页面数大于此值时,将回收slab中的缓存页
#endif
	struct per_cpu_pageset __percpu *pageset;//每个cpu页面缓存,由单个页组成的页链表,用于在申请单个页面时使用,
	//由于是每个cpu有自己的pageset,这样可以避免使用锁,避免该页被其他cpu使用造成缓存失效,避免内存区被分解为很多小块,
	//另外per_cpu_pages中有三个成员count,high,batch,分别表示该缓存中的页数、页数上限,如果页数超过了上限,就释放batch个
	//页回伙伴系统,如果没有缓存页可用分配就从buddy中释放batch个页到缓存,关于high、batch两个值的计算分别由函数zone_batchsize,
	//setup_pageset完成,结论是:当本zone大约512M时,batch=32,high=6*32=192,当本zone小约512M时,batch=present_pages/1024/4,
	//也就是high~=0.15%的本区总内存
	/*
	 * free areas of different sizes
	 */
	spinlock_t		lock;//保护free_area的自旋锁

#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;//保护spanned/present_pages,不发生热插拔,这两个值不会改变,故使用顺序所seqlock,两个变量含义下面介绍
#endif
	struct free_area	free_area[MAX_ORDER];//Buddy管理的11个队列,每个队列的节点管理的内存大小为2^n个页面,11个队列,n从1~11

	ZONE_PADDING(_pad1_)//填充字段,确保后面成员缓存行对齐

	/* Fields commonly accessed by the page reclaim scanner */
	//Linux 中的页面回收是基于 LRU(least recently used,即最近最少使用 ) 算法的。LRU 算法基于这样一个事实,
	//过去一段时间内频繁使用的页面,在不久的将来很可能会被再次访问到。反过来说,已经很久没有访问过的页面在未来较短的时间内
	//也不会被频繁访问到。因此,在物理内存不够用的情况下,这样的页面成为被换出的最佳候选者。LRU 算法的基本原理很简单,
	//为每个物理页面绑定一个计数器,用以标识该页面的访问频度。
	spinlock_t		lru_lock;	
	struct zone_lru {
		struct list_head list;
	} lru[NR_LRU_LISTS];

	struct zone_reclaim_stat reclaim_stat;

	unsigned long		pages_scanned;	   /* since last reclaim */
	unsigned long		flags;		   /* zone flags, see below */

	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];

	/*
	 * prev_priority holds the scanning priority for this zone.  It is
	 * defined as the scanning priority at which we achieved our reclaim
	 * target at the previous try_to_free_pages() or balance_pgdat()
	 * invocation.
	 *
	 * We use prev_priority as a measure of how much stress page reclaim is
	 * under - it drives the swappiness decision: whether to unmap mapped
	 * pages.
	 *
	 * Access to both this field is quite racy even on uniprocessor.  But
	 * it is expected to average out OK.
	 */
	int prev_priority;

	/*
	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
	 * this zone's LRU.  Maintained by the pageout code.
	 */
	unsigned int inactive_ratio;


	ZONE_PADDING(_pad2_)
	/* Rarely used or read-mostly fields */

	/*
	 * wait_table		-- the array holding the hash table
	 * wait_table_hash_nr_entries	-- the size of the hash table array
	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
	 *
	 * The purpose of all these is to keep track of the people
	 * waiting for a page to become available and make them
	 * runnable again when possible. The trouble is that this
	 * consumes a lot of space, especially when so few things
	 * wait on pages at a given time. So instead of using
	 * per-page waitqueues, we use a waitqueue hash table.
	 *
	 * The bucket discipline is to sleep on the same queue when
	 * colliding and wake all in that wait queue when removing.
	 * When something wakes, it must check to be sure its page is
	 * truly available, a la thundering herd. The cost of a
	 * collision is great, but given the expected load of the
	 * table, they should be so rare as to be outweighed by the
	 * benefits from the saved space.
	 *
	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
	 * primary users of these fields, and in mm/page_alloc.c
	 * free_area_init_core() performs the initialization of them.
	 */
	wait_queue_head_t	* wait_table;
	unsigned long		wait_table_hash_nr_entries;
	unsigned long		wait_table_bits;

	/*
	 * Discontig memory support fields.
	 */
	struct pglist_data	*zone_pgdat;//该区所在的节点
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;//管理区的第一个页面在全局mem_map中的偏移

	/*
	 * zone_start_pfn, spanned_pages and present_pages are all
	 * protected by span_seqlock.  It is a seqlock because it has
	 * to be read outside of zone->lock, and it is done in the main
	 * allocator path.  But, it is written quite infrequently.
	 *
	 * The lock is declared along with zone->lock because it is
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
	 */
	unsigned long		spanned_pages;	/* total size, including holes */
	unsigned long		present_pages;	/* amount of memory (excluding holes) */

	/*
	 * rarely used fields:
	 */
	const char		*name;
} ____cacheline_internodealigned_in_smp;



/*
 * Each physical page in the system has a struct page associated with
 * it to keep track of whatever it is we are using the page for at the
 * moment. Note that we have no way to track which tasks are using
 * a page, though if it is a pagecache page, rmap structures can tell us
 * who is mapping it.
 */
struct page {
	unsigned long flags;		/* Atomic flags, some possibly
					 * updated asynchronously */
	atomic_t _count;		/* Usage count, see below. */
	union {
		atomic_t _mapcount;	/* Count of ptes mapped in mms,
					 * to show when page is mapped
					 * & limit reverse map searches.
					 */
		struct {		/* SLUB */
			u16 inuse;
			u16 objects;
		};
	};
	union {
	    struct {
		unsigned long private;		/* Mapping-private opaque data:
					 	 * usually used for buffer_heads
						 * if PagePrivate set; used for
						 * swp_entry_t if PageSwapCache;
						 * indicates order in the buddy
						 * system if PG_buddy is set.
						 */
		struct address_space *mapping;	/* If low bit clear, points to
						 * inode address_space, or NULL.
						 * If page mapped as anonymous
						 * memory, low bit is set, and
						 * it points to anon_vma object:
						 * see PAGE_MAPPING_ANON below.
						 */
	    };
#if USE_SPLIT_PTLOCKS
	    spinlock_t ptl;
#endif
	    struct kmem_cache *slab;	/* SLUB: Pointer to slab */
	    struct page *first_page;	/* Compound tail pages */如果该页在buddy中,并且不是buddy中的第一个页,那么该指针指向第一个页
	};
	union {
		pgoff_t index;		/* Our offset within mapping. */如果该页是文件映射,那么表示本页面在文件中的偏移
		void *freelist;		/* SLUB: freelist req. slab lock */
	};
	struct list_head lru;		/* Pageout list, eg. active_list
					 * protected by zone->lru_lock !
					 */
	/*
	 * On machines where all RAM is mapped into kernel address space,
	 * we can simply calculate the virtual address. On machines with
	 * highmem some memory is mapped into kernel virtual memory
	 * dynamically, so we need a place to store that address.
	 * Note that this field could be 16 bits on x86 ... ;)
	 *
	 * Architectures with slow multiplication can define
	 * WANT_PAGE_VIRTUAL in asm/page.h
	 */
#if defined(WANT_PAGE_VIRTUAL)
	void *virtual;			/* Kernel virtual address (NULL if
					   not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
	unsigned long debug_flags;	/* Use atomic bitops on this */
#endif

#ifdef CONFIG_KMEMCHECK
	/*
	 * kmemcheck wants to track the status of each byte in a page; this
	 * is a pointer to such a status block. NULL if not tracked.
	 */
	void *shadow;
#endif
};


                
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值