Linux内存管理

系统中内存使用情况 cat /proc/meminfo

进程的内存使用情况:/proc/28040/status

查询进程 cpu 和内存使用占比 top

虚拟内存统计 vmstat

查询内存总使用率 free

 

https://cloud.tencent.com/developer/article/1005671

 

 

 

 

 

 

 

 

伙伴系统

请算法

  • 申请 2^i 个页块存储空间,如果 2^i 对应的块链表有空闲页块,则分配给应用
  • 如果没有空闲页块,则查找 2^(i 1) 对应的块链表是否有空闲页块,如果有,则分配 2^i 块链表节点给应用,另外 2^i 块链表节点插入到 2^i 对应的块链表中
  • 如果 2^(i 1) 块链表中没有空闲页块,则重复步骤 2,直到找到有空闲页块的块链表
  • 如果仍然没有,则返回内存分配失败

2)    回收算法

  • 释放 2^i 个页块存储空间,查找 2^i 个页块对应的块链表,是否有与其物理地址是连续的页块,如果没有,则无需合并

 

slab 算法——基本原理

  • 它的基本思想是将内核中经常使用的对象放到高速缓存中,并且由系统保持为初始的可利用状态。比如进程描述符,内核中会频繁对此数据进行申请和释放
  • 小内存分配

 

缺页异常

  • 通过 get_free_pages 申请一个或多个物理页面
  • 换算 addr 在进程 pdg 映射中所在的 pte 地址
  • 将 addr 对应的 pte 设置为物理页面的首地址
  • 系统调用:Brk—申请内存小于等于 128kb,do_map—申请内存大于 128kb

 

 

mmap

https://www.cnblogs.com/huxiao-tee/p/4660352.html

映射--缺页--swap cache--磁盘IO加载--系统自动进行脏页回

https://cloud.tencent.com/developer/article/1084101

内存管理,红黑树和链表两种形式来组织管理,红黑树的节点嵌入链表节点vm_area_struct中

mm_users:进程引用计数,vfork和clone时+1;

对于fork,则会分配新的空间,然后拷贝mm_struct

 

这里介绍Linux中的虚拟内存的具体实现,如下图,task_struct结构体是进程描述符,属于进程管理(PCB),其中,mm(memory manage)表示内存管理,它指向mm_struct结构体,它描述linux下进程的虚拟地址空间,它又包含两个重要字段:pgd、mmap,其中,pgd指向第一级页表的基址,而mmap指向一个vm_area_struct(区域结构)的链表,vm_start,vm_end分别表示数据的起始地址,vm_prot描述的是这个区域包含的所有页的读写许可权限;vm_flags描述这个区域是和别的进行共享的,还是该进程私有的

struct mm_struct {
	struct vm_area_struct * mmap;		/* list of VMAs */
	struct rb_root mm_rb;
	struct vm_area_struct * mmap_cache;	/* last find_vma result */
	unsigned long (*get_unmapped_area) (struct file *filp,
				unsigned long addr, unsigned long len,
				unsigned long pgoff, unsigned long flags);
	void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
	unsigned long mmap_base;		/* base of mmap area */
	unsigned long task_size;		/* size of task vm space */
	unsigned long cached_hole_size; 	/* if non-zero, the largest hole below free_area_cache */
	unsigned long free_area_cache;		/* first hole of size cached_hole_size or larger */
	pgd_t * pgd;
	atomic_t mm_users;			/* How many users with user space? */
	atomic_t mm_count;			/* How many references to "struct mm_struct" (users count as 1) */
	int map_count;				/* number of VMAs */
	struct rw_semaphore mmap_sem;
	spinlock_t page_table_lock;		/* Protects page tables and some counters */

	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
						 * together off init_mm.mmlist, and are protected
						 * by mmlist_lock
						 */

	/* Special counters, in some configurations protected by the
	 * page_table_lock, in other configurations by being atomic.
	 */
	mm_counter_t _file_rss;
	mm_counter_t _anon_rss;

	unsigned long hiwater_rss;	/* High-watermark of RSS usage */
	unsigned long hiwater_vm;	/* High-water virtual memory usage */

	unsigned long total_vm, locked_vm, shared_vm, exec_vm;
	unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
	unsigned long start_code, end_code, start_data, end_data;
	unsigned long start_brk, brk, start_stack;
	unsigned long arg_start, arg_end, env_start, env_end;

	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */

	cpumask_t cpu_vm_mask;

	/* Architecture-specific MM context */
	mm_context_t context;

	/* Swap token stuff */
	/*
	 * Last value of global fault stamp as seen by this process.
	 * In other words, this value gives an indication of how long
	 * it has been since this task got the token.
	 * Look at mm/thrash.c
	 */
	unsigned int faultstamp;
	unsigned int token_priority;
	unsigned int last_interval;

	unsigned long flags; /* Must use atomic bitops to access the bits */

	/* coredumping support */
	int core_waiters;
	struct completion *core_startup_done, core_done;

	/* aio bits */
	rwlock_t		ioctx_list_lock;
	struct kioctx		*ioctx_list;
};
struct page {
	unsigned long flags;		/* Atomic flags, some possibly
					 * updated asynchronously */
	atomic_t _count;		/* Usage count, see below. */
	union {
		atomic_t _mapcount;	/* Count of ptes mapped in mms,
					 * to show when page is mapped
					 * & limit reverse map searches.
					 */
		unsigned int inuse;	/* SLUB: Nr of objects */
	};
	union {
	    struct {
		unsigned long private;		/* Mapping-private opaque data:
					 	 * usually used for buffer_heads
						 * if PagePrivate set; used for
						 * swp_entry_t if PageSwapCache;
						 * indicates order in the buddy
						 * system if PG_buddy is set.
						 */
		struct address_space *mapping;	/* If low bit clear, points to
						 * inode address_space, or NULL.
						 * If page mapped as anonymous
						 * memory, low bit is set, and
						 * it points to anon_vma object:
						 * see PAGE_MAPPING_ANON below.
						 */
	    };
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
	    spinlock_t ptl;
#endif
	    struct kmem_cache *slab;	/* SLUB: Pointer to slab */
	    struct page *first_page;	/* Compound tail pages */
	};
	union {
		pgoff_t index;		/* Our offset within mapping. */
		void *freelist;		/* SLUB: freelist req. slab lock */
	};
	struct list_head lru;		/* Pageout list, eg. active_list
					 * protected by zone->lru_lock !
					 */
	/*
	 * On machines where all RAM is mapped into kernel address space,
	 * we can simply calculate the virtual address. On machines with
	 * highmem some memory is mapped into kernel virtual memory
	 * dynamically, so we need a place to store that address.
	 * Note that this field could be 16 bits on x86 ... ;)
	 *
	 * Architectures with slow multiplication can define
	 * WANT_PAGE_VIRTUAL in asm/page.h
	 */
#if defined(WANT_PAGE_VIRTUAL)
	void *virtual;			/* Kernel virtual address (NULL if
					   not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
};

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值