Kernel Scheduler学习之八:loading tracking

  1. Overview
    scheduler看task或者rq的loading是如何更新的?什么时候更新的?衡量task与cpu loading是通过什么Index呢?
     
  2.  代码走读
      loading的更新主要是通过如下的函数进行的。
    /* Update task and its cfs_rq load average */
    static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
    {
    	u64 now = cfs_rq_clock_pelt(cfs_rq);//获取当前时钟
    	int decayed;
    
    	/*
    	 * Track task load average for carrying it to new CPU after migrated, and
    	 * track group sched_entity load average for task_h_load calc in migration
    	 */
    	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
    		__update_load_avg_se(now, cfs_rq, se);
    
    	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
    	decayed |= propagate_entity_load_avg(se);
    
    	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
    
    		/*
    		 * DO_ATTACH means we're here from enqueue_entity().
    		 * !last_update_time means we've passed through
    		 * migrate_task_rq_fair() indicating we migrated.
    		 *
    		 * IOW we're enqueueing a task on a new CPU.
    		 */
    		attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
    		update_tg_load_avg(cfs_rq, 0);
    
    	} else if (decayed) {
    		cfs_rq_util_change(cfs_rq, 0);
    
    		if (flags & UPDATE_TG)
    			update_tg_load_avg(cfs_rq, 0);
    	}
    }
    a.获取当前pelt计时:
    #ifdef CONFIG_CFS_BANDWIDTH
    /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
    static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
    {
    	if (unlikely(cfs_rq->throttle_count))
    		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
    
    	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
    }
    #else
    static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
    {
    	return rq_clock_pelt(rq_of(cfs_rq));
    }
    #endif
    
    static inline u64 rq_clock_pelt(struct rq *rq)
    {
    	lockdep_assert_held(&rq->lock);
    	assert_clock_updated(rq);
    
    	return rq->clock_pelt - rq->lost_idle_time;
    }
    
    /*
     * The clock_pelt scales the time to reflect the effective amount of
     * computation done during the running delta time but then sync back to
     * clock_task when rq is idle.
     *
     *
     * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
     * @ max capacity  ------******---------------******---------------
     * @ half capacity ------************---------************---------
     * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
     *
     */
    static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
    {
    	if (unlikely(is_idle_task(rq->curr))) {
    		/* The rq is idle, we can sync to clock_task */
    		rq->clock_pelt  = rq_clock_task(rq);
    		return;
    	}
    
    	/*
    	 * When a rq runs at a lower compute capacity, it will need
    	 * more time to do the same amount of work than at max
    	 * capacity. In order to be invariant, we scale the delta to
    	 * reflect how much work has been really done.
    	 * Running longer results in stealing idle time that will
    	 * disturb the load signal compared to max capacity. This
    	 * stolen idle time will be automatically reflected when the
    	 * rq will be idle and the clock will be synced with
    	 * rq_clock_task.
    	 */
    
    	/*
    	 * Scale the elapsed time to reflect the real amount of
    	 * computation
    	 */
    	delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
    	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
    
    	rq->clock_pelt += delta;
    }
    





     
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值