低内存手机kernel的几个性能优化点
1. 快速释放内存 boost free anon pages of sigkill process
1.1 修改@kernel/msm-4.9 / arch/arm/mm/fault.c
#include "fault.h"
#include <linux/boost_sigkill_free.h>
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
static int __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
unsigned int flags, struct task_struct *tsk)
{
struct vm_area_struct *vma;
int fault;
if (unlikely(test_bit(MMF_FAST_FREEING, &mm->flags))) {
task_clear_jobctl_pending(tsk, JOBCTL_PENDING_MASK);
sigaddset(&tsk->pending.signal, SIGKILL);
set_tsk_thread_flag(tsk, TIF_SIGPENDING);
return VM_FAULT_BADMAP;
}
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
if (unlikely(!vma))
goto out;
if (unlikely(vma->vm_start > addr))
goto check_stack;
/*
1.2 添加@kernel/msm-4.9 / include/linux/boost_sigkill_free.h
/*include/linux/boost_sigkill_free.h
*
* Boost memory free for SIGKILLed process
*
* Copyright (C) 2016 Huawei Technologies Co., Ltd.
*/
#ifndef _BOOST_SIGKILL_FREE_H
#define _BOOST_SIGKILL_FREE_H
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/signal.h>
#define MMF_FAST_FREEING 21
#define sig_kernel_kill(sig) ((sig) == SIGKILL)
extern void fast_free_user_mem(void);
#endif
1.3 @kernel/msm-4.9 / kernel/exit.c
#include <asm/mmu_context.h>
#include <linux/boost_sigkill_free.h>
...
void
do_group_exit(int exit_code)
{
struct signal_struct *sig = current->signal;
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
if (signal_group_exit(sig))
exit_code = sig->group_exit_code;
else if (!thread_group_empty(current)) {
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
if (signal_group_exit(sig))
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
}
if (sig_kernel_kill(exit_code))
fast_free_user_mem();
do_exit(exit_code);
/* NOTREACHED */
}
1.4 @kernel/msm-4.9 / mm/memory.c
#include "internal.h"
#include <linux/boost_sigkill_free.h>
...
void ptlock_free(struct page *page)
{
kmem_cache_free(page_ptl_cachep, page->ptl);
}
#endif
static void __fast_free_user_mem(struct mm_struct *mm)
{
struct vm_area_struct *vma;
struct mmu_gather tlb;
tlb_gather_mmu(&tlb, mm, 0, -1);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (is_vm_hugetlb_page(vma))
continue;
/*
* mlocked VMAs require explicit munlocking before unmap.
* Let's keep it simple here and skip such VMAs.
*/
if (vma->vm_flags & VM_LOCKED)
continue;
if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
unmap_page_range(&tlb, vma, vma->vm_start,
vma->vm_end, NULL);
}
tlb_finish_mmu(&tlb, 0, -1);
}
void fast_free_user_mem(void)
{
struct mm_struct *mm = current->mm;
if (!mm)
return;
down_read(&mm->mmap_sem);
if (test_and_set_bit(MMF_FAST_FREEING, &mm->flags)) {
up_read(&mm->mmap_sem);
return;
}
__fast_free_user_mem(mm);
up_read(&mm->mmap_sem);
}
2. 尽量释放更多的匿名页和文件缓存页try to reclaim more anon & file cache pages by kswapd
@kernel/msm-4.9 / mm/vmscan.c
static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
struct scan_control *sc, unsigned long *lru_pages)
{
struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
unsigned long nr[NR_LRU_LISTS];
unsigned long targets[NR_LRU_LISTS];
unsigned long nr_to_scan;
enum lru_list lru;
unsigned long nr_reclaimed = 0;
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
struct blk_plug plug;
bool scan_adjusted;
get_scan_count(lruvec, memcg, sc, nr, lru_pages);
/* Record the original scan target for proportional adjustments later */
memcpy(targets, nr, sizeof(nr));
/*
* sc->priority: 12, 11, 10, 9
* (4) shift: 4, 3, 2, 1
* nr: 2, 4, 8, 16
* (5) shift: 5, 4, 3, 2
* nr: 1, 2, 4, 8
* (3) shift: 3, 2, 1, 0
* nr: 4, 8, 16, 32
*/
/* Only kswapd is allowed to reclaim more anon & file cache pages */
if (current_is_kswapd() && sc->priority > 8) {
unsigned long adjust_nr = 1;
int shift = 4 - DEF_PRIORITY + sc->priority;
if (shift >= 0)
adjust_nr = SWAP_CLUSTER_MAX >> shift;
if (nr[LRU_INACTIVE_ANON] < adjust_nr)
nr[LRU_INACTIVE_ANON] = adjust_nr;
if (nr[LRU_ACTIVE_ANON] < adjust_nr)
nr[LRU_ACTIVE_ANON] = adjust_nr;
if (nr[LRU_INACTIVE_FILE] < adjust_nr)
nr[LRU_INACTIVE_FILE] = adjust_nr;
if (nr[LRU_ACTIVE_FILE] < adjust_nr)
nr[LRU_ACTIVE_FILE] = adjust_nr;
}
/*
* Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
3. 动态调整swappiness参数dynamically adjust swappiness by trigger owner
@/kernel/msm-4.9 / mm/vmscan.c
/*
* From 0 .. 100. Higher means more swappy.
*/
int vm_swappiness = 100;
...
static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
struct scan_control *sc, unsigned long *nr,
unsigned long *lru_pages)
{
int swappiness = mem_cgroup_swappiness(memcg);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
u64 fraction[2];
u64 denominator = 0; /* gcc */
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
unsigned long anon_prio, file_prio;
enum scan_balance scan_balance;
unsigned long anon, file;
unsigned long ap, fp;
enum lru_list lru;
if (!current_is_kswapd()) {
swappiness = 60;
}
/* If we have no swap space, do not bother scanning anon pages. */
if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
scan_balance = SCAN_FILE;
goto out;
}
4. 快杀机制boost killing by increasing thread priority
@kernel/msm-4.9 / kernel/signal.c
static void complete_signal(int sig, struct task_struct *p, int group)
{
struct signal_struct *signal = p->signal;
struct task_struct *t;
...
/*
* Found a killable thread. If the signal will be fatal,
* then start taking the whole group down immediately.
*/
if (sig_fatal(p, sig) &&
!(signal->flags & SIGNAL_GROUP_EXIT) &&
!sigismember(&t->real_blocked, sig) &&
(sig == SIGKILL || !p->ptrace)) {
/*
* This signal will be fatal to the whole group.
*/
if (!sig_kernel_coredump(sig)) {
/*
* Start a group exit and wake everybody up.
* This way we don't have other threads
* running and doing things after a slower
* thread has the fatal signal pending.
*/
signal->flags = SIGNAL_GROUP_EXIT;
signal->group_exit_code = sig;
signal->group_stop_count = 0;
t = p;
do {
if (can_nice(t, -20)) {
set_user_nice(t, -20);
}
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
} while_each_thread(p, t);
return;
}
}