WALT kernel4.14

一、关键数据结构

  • task event

enum task_event {
       PUT_PREV_TASK   = 0,
       PICK_NEXT_TASK  = 1,
       TASK_WAKE       = 2,
       TASK_MIGRATE    = 3,
       TASK_UPDATE     = 4,
       IRQ_UPDATE      = 5,
};

  • ravg

/* ravg represents frequency scaled cpu-demand of tasks */
struct ravg {
        * 'mark_start' marks the beginning of an event (task waking up, task
        * starting to execute, task being preempted) within a window

        * 'sum' represents how runnable a task has been within current
        * window. It incorporates both running time and wait time and is
        * frequency scaled.

        * 'sum_history' keeps track of history of 'sum' seen over previous
        * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
        * ignored.

        * 'demand' represents maximum sum seen over previous
        * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
        * demand for tasks.
        *
        * 'curr_window' represents task's contribution to cpu busy time
        * statistics (rq->curr_runnable_sum) in current window
        *
        * 'prev_window' represents task's contribution to cpu busy time
        * statistics (rq->prev_runnable_sum) in previous window
        */
       u64 mark_start;                //记录如上的task_event
       u32 sum, demand;           
//sum是task在当前窗口运行的时间和,这个时间是经过FIE和CIE后,归一化到大核最高频点的时间;
//demand表示依据最近sysctl_sched_ravg_hist_size个windows(=5 default) ,根据如下不同的方式计算出的task的demand,详见update_history:

 28 #define WINDOW_STATS_RECENT     0        //5个窗口最近的
 29 #define WINDOW_STATS_MAX        1            //5个窗口最大的
 30 #define WINDOW_STATS_MAX_RECENT_AVG 2   //max{前4个窗口平均值,最近窗口}
 31 #define WINDOW_STATS_AVG        3             //5个窗口平均值
 32 #define WINDOW_STATS_INVALID_POLICY 4

       u32 sum_history[RAVG_HIST_SIZE_MAX];
       u32 curr_window, prev_window;
       u16 active_windows;
};

  • task_struct

struct task_struct {
#ifdef CONFIG_SCHED_WALT
       struct ravg ravg;        //p->ravg.demand :Task demand. Used in the EAS task placement
       /*
        * 'init_load_pct' represents the initial task load assigned to children
        * of this task
        */
       u32 init_load_pct;
       u64 last_sleep_ts;
#endif
}

  • rq

struct cfs_rq {
#ifdef CONFIG_SCHED_WALT
       u64 cumulative_runnable_avg;       //cfs_rq累加task的负载:p->ravg.demand 
#endif
}

struct rq {
#ifdef CONFIG_SCHED_WALT
       u64 cumulative_runnable_avg;     //Sum of the demand of all runnable tasks on this CPU. Represent the Instantaneous load. Used in the EAS task placement
       u64 window_start;
       u64 curr_runnable_sum;
       u64 prev_runnable_sum;        // CPU utilization in the recent complete window. Input to the schedutil
       u64 nt_curr_runnable_sum;
       u64 nt_prev_runnable_sum;
       u64 cur_irqload;
       u64 avg_irqload;
       u64 irqload_ts;
       u64 cum_window_demand;        //Sum of the demand of tasks ran in the current window. This signal is used to estimate the frequency. Used in the EAS task placement for evaluating the energy difference
#endif /* CONFIG_SCHED_WALT */
}

1. cfs_rq->cumulative_runnable_avg的更新 

walt_fixup_cumulative_runnable_avg_fair(struct rq *rq,
 5166                             struct task_struct *p,
 5167                             u64 new_task_load)
 5168 {
 5169     struct cfs_rq *cfs_rq;
 5170     struct sched_entity *se = &p->se;
 5171     s64 task_load_delta = (s64)new_task_load - p->ravg.demand;
 5172

向上loop se tree,直到loop 到root task group中的se,即最上层的cfs_rq中的se,se->parent == NULL。
 5173     for_each_sched_entity(se) {
 5174         cfs_rq = cfs_rq_of(se);
 5175
 5176         cfs_rq->cumulative_runnable_avg += task_load_delta;
 5177         if (cfs_rq_throttled(cfs_rq))
 5178             break;
 5179     }
 5180

只有se tree上的cfs_rq都没被throttle,才会更新rq的cumulative_runnable_avg
 5181     /* Fix up rq only if we didn't find any throttled cfs_rq */
 5182     if (!se)
 5183         walt_fixup_cumulative_runnable_avg(rq, p, new_task_load);
 5184 }

二、文件系统接口

/include/linux/sched/sysctl.h
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_walt_init_task_load_pct;
extern unsigned int sysctl_sched_walt_cpu_high_irqload;
#endif

三、trace event

四、关键函数

1. walt_init_new_task_load() - 新创建的task的负载如何初始化?

                                                                         |<-sched_fork<-copy_process|<-fork_idle(_do_fork)
                                                                         |<-init_idle<-fork_idle(sched_init,idle_thread_get)
walt_init_new_task_load(p)<-|<-__sched_fork/                                                      
                                               |<-wake_up_new_task<-|------------------------------------|<-_do_fork

函数调用关系如上图所示,do_fork和fork_idle两个代码路径,分别都调用了两次walt_init_new_task_load()?

初始化以下成员:
 p->ravg.demand
 p->ravg.sum_history[i]
 p->init_load_pct = 0
为init_load_windows,init_load_windows=init_load_pct * walt_ravg_window/100, 即按16ms窗口的百分比初始化new task上述成员
init_load_pct的按优先级从大到小选择:
1. 如果当前task的:current->init_load_pct 存在,则按current task的pct给p成员赋值;
2. 如果current->init_load_pct 不存在,task所在的stune group的: (st->init_task_load_pct > 0),则按stune的pct给p成员赋值;
3. 如果current->init_load_pct 不存在,且st->init_task_load_pct \leq 0,则按系统pct设置:sysctl_sched_walt_init_task_load_pct。

sh-4.4# cat /proc/sys/kernel/sched_walt_init_task_load_pct
15

2. walt_fixup_busy_time()

                                                                  |<-set_task_cpu
trace_sched_migrate_task(p, new_cpu)<-|
walt_fixup_busy_time(p, new_cpu)<-------|

把task p放到新的cpu上运行,需要在原来的cpu上减去task
1185 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1186 {
1224     trace_sched_migrate_task(p, new_cpu);
1225
1226     if (task_cpu(p) != new_cpu) {
1227         if (p->sched_class->migrate_task_rq)
 //各调度类都有实现,以cfs为例,对于WALT只是更新了: task的se->vruntime, cfs_rq->load_last_update_time_copy
1228             p->sched_class->migrate_task_rq(p);
1229         p->se.nr_migrations++;
1230         perf_event_task_migrate(p);
1231
1232         walt_fixup_busy_time(p, new_cpu);
1233     }
1234
1235     __set_task_cpu(p, new_cpu);
1236 }

889 void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
890 {
891     struct rq *src_rq = task_rq(p);
892     struct rq *dest_rq = cpu_rq(new_cpu);
893     u64 wallclock;
894
当task在rq上,或在WAKING state时,才继续执行
895     if (!p->on_rq && p->state != TASK_WAKING)
896         return;
897
898     if (exiting_task(p)) {
899         return;
900     }
901
902     if (p->state == TASK_WAKING)
903         double_rq_lock(src_rq, dest_rq);
904
905     wallclock = walt_ktime_clock();
906
907     walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
908             TASK_UPDATE, wallclock, 0);
909     walt_update_task_ravg(dest_rq->curr, dest_rq,
910             TASK_UPDATE, wallclock, 0);
911
912     walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
913
914     /*
915      * When a task is migrating during the wakeup, adjust
916      * the task's contribution towards cumulative window
917      * demand.
918      */
919     if (p->state == TASK_WAKING &&
920         p->last_sleep_ts >= src_rq->window_start) {
921         fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
922         fixup_cum_window_demand(dest_rq, p->ravg.demand);
923     }
924
925     if (p->ravg.curr_window) {
926         src_rq->curr_runnable_sum -= p->ravg.curr_window;
927         dest_rq->curr_runnable_sum += p->ravg.curr_window;
928     }
929
930     if (p->ravg.prev_window) {
931         src_rq->prev_runnable_sum -= p->ravg.prev_window;
932         dest_rq->prev_runnable_sum += p->ravg.prev_window;
933     }
934
935     if ((s64)src_rq->prev_runnable_sum < 0) {
936         src_rq->prev_runnable_sum = 0;
937         WARN_ON(1);
938     }
939     if ((s64)src_rq->curr_runnable_sum < 0) {
940         src_rq->curr_runnable_sum = 0;
941         WARN_ON(1);
942     }
943
944     trace_walt_migration_update_sum(src_rq, p);
945     trace_walt_migration_update_sum(dest_rq, p);
946
947     if (p->state == TASK_WAKING)
948         double_rq_unlock(src_rq, dest_rq);
949 }

  • walt_update_task_ravg()

  1. 调用update_window_start:
            更新rq->window_start到距wallclock最近的时间点;
            更新rq->cum_window_demand = rq->cumulative_runnable_avg,表示rq新window出的负载基数等于rq的平均负载;
  2. 调用update_task_demand(p, rq, event, wallclock):
     a. 非busy event(TASK_WAKE, walt_account_wait_time=0时的PICK_NEXT_TASK 和TASK_MIGRATE event),在跨窗口时,更新上个窗口的p->ravg.sum到p->ravg.sum_history[0],如果跨多个窗口,期间task处在非busy time,这些窗口被忽略,不更新入history数组。
    b. 如果busy event没跨窗口,调用add_to_task_demand,把经过FIE和CIE的delta time累加到p->ravg.sum
    c. 如果跨n个窗口(n≥1), 更新最早的window,(window_start - mark_start)差值经过FIE,CIE后累加到p->ravg.sum
            关于p->ravg.sum的更新,因为在每个scheduler_tick都会更新p->ravg.sum,所以p->ravg.sum已经表示了当前task跨窗口前的所有busy event时间。为什么呢?
             walt_set_window_start()在最开始时设置rq->window_start = 1,所有window的边界都在(n*16ms+1ns)处,tick每隔4ms更新,更新时间位于window边界内,并没有跨window,正好位于window边界前1ns,这样task在这个window的busytime就能全被统计到。
            如果要保证上述计算方法成立,必须满足以下条件:
    scheduler_tick被调用时,4417行获取的walt_ktime_clock值,必须在{n*4ms~n*4ms+1ns}之间?待验证

    4404 void scheduler_tick(void)
    4405 { 
    4406     int cpu = smp_processor_id();  
    4407     struct rq *rq = cpu_rq(cpu);   
    4408     struct task_struct *curr = rq->curr;
    4409     struct rq_flags rf;
    4410   
    4411     sched_clock_tick();  
    4412   
    4413     rq_lock(rq, &rf);    
    4414   
    4415     walt_set_window_start(rq, &rf);
    4416     walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
    4417             walt_ktime_clock(), 0);  

    d. 调用update_history更新p->ravg.sum到p->ravg.sum_history[0],并更新p->ravg.demand
    e. 调用update_history更新n个窗口full busy time(即16ms)的FIE,CIE时间到p->ravg.sum_history[n-1,0]
    f. 更新window_start到最近的时间点
    g. 调用add_to_task_demand(), 更新(wallclock - window_start)时间差FIE,CIE后到p->ravg.sum
  3. 调用update_cpu_busy_time()
  4. 更新p->ravg.mark_start = wallclock

更新task的event类型及调用点如下:  

  1   3219  kernel/sched/core.c <<walt_try_to_wake_up>>
             walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
   2   3220  kernel/sched/core.c <<walt_try_to_wake_up>>
             walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
   3   3404  kernel/sched/core.c <<try_to_wake_up_local>>
             walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
   4   3405  kernel/sched/core.c <<try_to_wake_up_local>>
             walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
   5   4416  kernel/sched/core.c <<scheduler_tick>>
             walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
   6   4792  kernel/sched/core.c <<__schedule>>
             walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
   7   4793  kernel/sched/core.c <<__schedule>>
             walt_update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
   8    265  kernel/sched/walt.c <<walt_account_irqtime>>
             walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
   9    867  kernel/sched/walt.c <<walt_fixup_busy_time>>
             walt_update_task_ravg(task_rq(p)->curr, task_rq(p),TASK_UPDATE, wallclock, 0);
  10    869  kernel/sched/walt.c <<walt_fixup_busy_time>>
             walt_update_task_ravg(dest_rq->curr, dest_rq,TASK_UPDATE, wallclock, 0);
  11    873  kernel/sched/walt.c <<walt_fixup_busy_time>>
             walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);

797 /* Reflect task activity on its demand and cpu's busy time statistics */
798 void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
799          int event, u64 wallclock, u64 irqtime)
800 {
801     u64 old_window_start;
802
803     if (walt_disabled || !rq->window_start)
804         return;
805
806     lockdep_assert_held(&rq->lock);
807
808     old_window_start = update_window_start(rq, wallclock);
809
810     if (!p->ravg.mark_start)
811         goto done;
812
813     update_task_demand(p, rq, event, wallclock);
814     update_cpu_busy_time(p, rq, event, wallclock, irqtime);
815
816 done:
817     if (rq->window_start > old_window_start) {
818         unsigned long cap_orig = capacity_orig_of(cpu_of(rq));
819         u64 busy_limit = (walt_ravg_window * walt_busy_threshold) / 100;
820
821         busy_limit = (busy_limit * cap_orig) >> SCHED_CAPACITY_SHIFT;
822         if (rq->prev_runnable_sum >= busy_limit) {
823             if (rq->is_busy == CPU_BUSY_CLR)
824                 rq->is_busy = CPU_BUSY_PREPARE;
825             else if (rq->is_busy == CPU_BUSY_PREPARE)
826                 rq->is_busy = CPU_BUSY_SET;
827         } else if (rq->is_busy != CPU_BUSY_CLR) {
828             rq->is_busy = CPU_BUSY_CLR;
829         }
830     }
831
832     trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
833
834     p->ravg.mark_start = wallclock;
835 }

  • update_cpu_busy_time
     

  •  update_task_demand ()做了什么

714 static void update_task_demand(struct task_struct *p, struct rq *rq,
715          int event, u64 wallclock)
716 {
717     u64 mark_start = p->ravg.mark_start;
718     u64 delta, window_start = rq->window_start;
719     int new_window, nr_full_windows;
720     u32 window_size = walt_ravg_window;
721     u32 window_scale = scale_exec_time(window_size, rq);
722
723     new_window = mark_start < window_start;

跟据task p所在的stune的st->account_wait_time,判断task在各种event时是否产生了demand:
(account_wait_time=0时,不统计将要切换到running状态的event产生的demand)
1. TASK_WAKE event demand=0
2. 其它event
    account_wait_time=0: PICK_NEXT_TASK/TASK_MIGRATE demand=0,其它PUT_PREV_TASK/TASK_UPDATE/IRQ_UPDATE demand=1
    account_wait_time=1: 都产生demand

724     if (!account_busy_for_task_demand(p, event)) {
725         if (new_window) {
726             /* If the time accounted isn't being accounted as
727              * busy time, and a new window started, only the
728              * previous window need be closed out with the
729              * pre-existing demand. Multiple windows may have
730              * elapsed, but since empty windows are dropped,
731              * it is not necessary to account those. */
732             update_history(rq, p, p->ravg.sum, 1, event);
733             if (sysctl_sched_walt_cross_window_util)
734                 p->ravg.sum = 0;
735         }
736         if (sysctl_sched_walt_cross_window_util)
737             p->ravg.sum_latest = 0;
738         return;
739     }
740
741     if (!new_window) {
742         /* The simple case - busy time contained within the existing
743          * window. */
744         add_to_task_demand(rq, p, wallclock - mark_start);
745
746         goto done;
747     }
748
749     /* Busy time spans at least two windows. Temporarily rewind
750      * window_start to first window boundary after mark_start. */
751     delta = window_start - mark_start;
752     nr_full_windows = div64_u64(delta, window_size);
753     window_start -= (u64)nr_full_windows * (u64)window_size;
754
755     /* Process (window_start - mark_start) first */
756     add_to_task_demand(rq, p, window_start - mark_start);
757
758     /* Push new sample(s) into task's demand history */
759     update_history(rq, p, p->ravg.sum, 1, event);
760     if (sysctl_sched_walt_cross_window_util)
761         p->ravg.sum = p->ravg.sum_latest;
762     if (nr_full_windows) {
763         update_history(rq, p, window_scale,
764                    nr_full_windows, event);
765         if (sysctl_sched_walt_cross_window_util) {
766             p->ravg.sum = window_scale;
767             p->ravg.sum_latest = window_scale;
768         }
769     }
770     /* Roll window_start back to current to process any remainder
771      * in current window. */
772     window_start += (u64)nr_full_windows * (u64)window_size;
773
774     /* Process (wallclock - window_start) next */
775     mark_start = window_start;
776     add_to_task_demand(rq, p, wallclock - mark_start);
777
778 done:
779     /* Update task demand in current window when policy is
780      * WINDOW_STATS_MAX. The purpose is to create opportunity
781      * for rising cpu freq when cr_avg is used for cpufreq
782      */
783     if (p->ravg.sum > p->ravg.demand &&
784         walt_window_stats_policy == WINDOW_STATS_MAX) {
785             if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
786                 if (task_on_rq_queued(p))
787                     p->sched_class->fixup_cumulative_runnable_avg(
788                         rq, p, p->ravg.sum);
789                 else if (rq->curr == p)
790                     fixup_cum_window_demand(
791                         rq, p->ravg.sum);
792             }
793             p->ravg.demand = p->ravg.sum;
794     }
795 }

  • update_history

当task跨过了一个新的window边界,这个函数用以更新:
p->ravg.sum_history[]        //task在过去n个window的sum记录,(sum是经过CIE和FIE的运行时间)
p->ravg.demand        //根据n个sum记录和当前窗口的p->ravg.sum,按不同的配置方式,计算出task的demand,这个demand反映了task在过去n个窗口时间,对cpu的负载需求。

571 static void update_history(struct rq *rq, struct task_struct *p,
572              u32 runtime, int samples, int event)
573 {
574     u32 *hist = &p->ravg.sum_history[0];
575     int ridx, widx;
576     u32 max = 0, avg, demand;
577     u64 sum = 0;
578 
579     /* Ignore windows where task had no activity */
580     if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
581             goto done;
582 
583     /* Push new 'runtime' value onto stack */
584     widx = walt_ravg_hist_size - 1;
585     ridx = widx - samples;
586     for (; ridx >= 0; --widx, --ridx) {
587         hist[widx] = hist[ridx];
588         sum += hist[widx];
589         if (hist[widx] > max)
590             max = hist[widx];
591     }
592 
593     for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
594         hist[widx] = runtime;
595         sum += hist[widx];
596         if (hist[widx] > max)
597             max = hist[widx];
598     }
599 
600     p->ravg.sum = 0;
601 
602     if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
603         demand = runtime;
604     } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
605         demand = max;
606     } else {
607         avg = div64_u64(sum, walt_ravg_hist_size);
608         if (walt_window_stats_policy == WINDOW_STATS_AVG)
609             demand = avg;
610         else
611             demand = max(avg, runtime);
612     }                                           
613 
614     /*
615      * A throttled deadline sched class task gets dequeued without
616      * changing p->on_rq. Since the dequeue decrements hmp stats
617      * avoid decrementing it here again.
618      *
619      * When window is rolled over, the cumulative window demand
620      * is reset to the cumulative runnable average (contribution from
621      * the tasks on the runqueue). If the current task is dequeued
622      * already, it's demand is not included in the cumulative runnable
623      * average. So add the task demand separately to cumulative window
624      * demand.

625      */
如果不是dl task,或dl task没被throttle(即超过deadline时间还没执行完):
626     if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
627         if (task_on_rq_queued(p))        //p->on_rq == TASK_ON_RQ_QUEUED

53 /* task_struct::on_rq states: */
54 #define TASK_ON_RQ_QUEUED   1
55 #define TASK_ON_RQ_MIGRATING    2

//各调度类有不同的实现方法,cfs的如:walt_fixup_cumulative_runnable_avg_fair 
628             p->sched_class->fixup_cumulative_runnable_avg(rq, p, 
629                                       demand);
如果rq->curr=p当前已经不在rq上,那它的demand应更新到rq->cum_window_demand,而不用更新到rq->cumulative_runnable_avg(621~624行)
630         else if (rq->curr == p)
631             fixup_cum_window_demand(rq, demand);
632     }
633 
634     p->ravg.demand = demand;
635 
636 done:
637     trace_walt_update_history(rq, p, runtime, samples, event);
638     return;
639 }

  • walt_fixup_cumulative_runnable_avg_fair 

更新cfs_rq->cumulative_runnable_avg:代表cfs_rq的平均负载,把task_load_delta 更新到其上
更新rq->cumulative_runnable_avg:代表rq的平均负载,把task_load_delta 更新到其上
task_load_delta :新计算出的task demand和更新前的p->ravg.demand的差值

 5234 static void walt_fixup_cumulative_runnable_avg_fair(struct rq *rq,
 5235                             struct task_struct *p,         
 5236                             u64 new_task_load)             
 5237 {
 5238     struct cfs_rq *cfs_rq;
 5239     struct sched_entity *se = &p->se;
 5240     s64 task_load_delta = (s64)new_task_load - p->ravg.demand;
 5241  
 5242     for_each_sched_entity(se) {    
 5243         cfs_rq = cfs_rq_of(se);        
 5244  
 5245         cfs_rq->cumulative_runnable_avg += task_load_delta;
 5246         if (cfs_rq_throttled(cfs_rq))  
 5247             break;
 5248     }
 5249  
 5250     /* Fix up rq only if we didn't find any throttled cfs_rq */
 5251     if (!se)
 5252         walt_fixup_cumulative_runnable_avg(rq, p, new_task_load);
 5253 }

  • walt_fixup_cumulative_runnable_avg

更新rq->cumulative_runnable_avg        //Sum of the demand of all runnable tasks on this CPU. Represent the Instantaneous load. Used in the EAS task placement 
更新rq->cum_window_demand        //Sum of the demand of tasks ran in the current window. This signal is used to estimate the frequency. Used in the EAS task placement for evaluating the energy difference

107 walt_fixup_cumulative_runnable_avg(struct rq *rq,
108                    struct task_struct *p, u64 new_task_load)
109 {  
110     s64 task_load_delta = (s64)new_task_load - task_load(p);
111    
112     rq->cumulative_runnable_avg += task_load_delta;
113     if ((s64)rq->cumulative_runnable_avg < 0)
114         panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
115             task_load_delta, task_load(p));
116    
117     fixup_cum_window_demand(rq, task_load_delta);
118 }  

  • fixup_cum_window_demand

   1     87  kernel/sched/walt.c <<walt_inc_cumulative_runnable_avg>>
             fixup_cum_window_demand(rq, p->ravg.demand);
   2    103  kernel/sched/walt.c <<walt_dec_cumulative_runnable_avg>>
             fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
   3    117  kernel/sched/walt.c <<walt_fixup_cumulative_runnable_avg>>
             fixup_cum_window_demand(rq, task_load_delta);
   4    631  kernel/sched/walt.c <<update_history>>
             fixup_cum_window_demand(rq, demand);
   5    882  kernel/sched/walt.c <<walt_fixup_busy_time>>
             fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
   6    883  kernel/sched/walt.c <<walt_fixup_busy_time>>
             fixup_cum_window_demand(dest_rq, p->ravg.demand);

更新rq->cum_window_demand:rq在当前窗口的demand
Sum of the demand of tasks ran in the current window. This signal is used to estimate the frequency. Used in the EAS task placement for evaluating the energy difference

 66 static inline void fixup_cum_window_demand(struct rq *rq, s64 delta)
 67 {
 68     rq->cum_window_demand += delta;
 69     if (unlikely((s64)rq->cum_window_demand < 0))
 70         rq->cum_window_demand = 0;     
 71 }  

rq->cum_window_demand在update_window_start()时被更新,参考update_history()的619~621,更新窗口时,新窗口的rq->cum_window_demand继承rq的cumulative_runnable_avg,cumulative_runnable_avg体现了rq上的平均负载:

206 update_window_start(struct rq *rq, u64 wallclock)
224     rq->cum_window_demand = rq->cumulative_runnable_avg;
225 }

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值