struct energy_env
struct energy_env {
/* Utilization to move */
struct task_struct *p;
unsigned long util_delta;//Task utilization的估计值
unsigned long util_delta_boosted;//boost Task utilization的值
/* Mask of CPUs candidates to evaluate */
cpumask_t cpus_mask;
/* CPU candidates to evaluate */
struct eenv_cpu *cpu;
int eenv_cpu_count;
#ifdef DEBUG_EENV_DECISIONS
/* pointer to the memory block reserved
* for debug on this CPU - there will be
* sizeof(struct _eenv_debug) *
* (EAS_CPU_CNT * EAS_EENV_DEBUG_LEVELS)
* bytes allocated here.
*/
struct _eenv_debug *debug;
#endif
/*//最节能的CPU的索引
* Index (into energy_env::cpu) of the morst energy efficient CPU for
* the specified energy_env::task//
*/
int next_idx;
int max_cpu_count;
/* Support data */
struct sched_group *sg_top;
struct sched_group *sg_cap;
struct sched_group *sg;
};
struct _eenv_debug {
unsigned long cap;
unsigned long norm_util;
unsigned long cap_energy;
unsigned long idle_energy;
unsigned long this_energy;
unsigned long this_busy_energy;
unsigned long this_idle_energy;
cpumask_t group_cpumask;
unsigned long cpu_util[1];
};
struct eenv_cpu
struct eenv_cpu {
/* CPU ID, must be in cpus_mask */
int cpu_id;
/*
* Index (into sched_group_energy::cap_states) of the OPP the
* CPU needs to run at if the task is placed on it.
* This includes the both active and blocked load, due to
* other tasks on this CPU, as well as the task's own
* utilization.
*/
int cap_idx;
int cap;
/* Estimated system energy */
unsigned long energy;
/* Estimated energy variation wrt EAS_CPU_PRV */
long nrg_delta;
#ifdef DEBUG_EENV_DECISIONS
struct _eenv_debug *debug;
int debug_idx;
#endif /* DEBUG_EENV_DECISIONS */
};
reset_eenv
reset_eenv实际上就是复位eenv结构体,同时保存eenv->eenv_cpu_count = cpu_count;、
static inline void reset_eenv(struct energy_env *eenv)
{
int cpu_count;
struct eenv_cpu *cpu;
#ifdef DEBUG_EENV_DECISIONS
struct _eenv_debug *debug;
int cpu_idx;
debug = eenv->debug;
#endif
cpu_count = eenv->eenv_cpu_count;
cpu = eenv->cpu;
memset(eenv, 0, sizeof(struct energy_env));
eenv->cpu = cpu;
memset(eenv->cpu, 0, sizeof(struct eenv_cpu)*cpu_count);
eenv->eenv_cpu_count = cpu_count;
#ifdef DEBUG_EENV_DECISIONS
memset(debug, 0, eenv_debug_size());
eenv->debug = debug;
for (cpu_idx = 0; cpu_idx < eenv->eenv_cpu_count; cpu_idx++)
eenv->cpu[cpu_idx].debug = eenv_debug_percpu_debug_env_ptr(debug, cpu_idx);
#endif
}
/* given a pointer to the per-cpu global copy of _eenv_debug, get
* a pointer to the specified _eenv_cpu debug env.
*/
static inline struct _eenv_debug *eenv_debug_percpu_debug_env_ptr(struct _eenv_debug *base, int cpu_idx)
{
char *ptr = (char *)base;
ptr += (cpu_idx * eenv_debug_size_per_cpu_entry());
return (struct _eenv_debug *)ptr;
}
static inline int eenv_debug_size_per_cpu_entry(void)
{
/* each cpu struct has an array of _eenv_debug structs
* which have an array of unsigned longs at the end -
* the allocation should be extended so that there are
* at least 'num_possible_cpus' entries in the array.
*/
return EAS_EENV_DEBUG_LEVELS * eenv_debug_size_per_dbg_entry();
}
static inline int eenv_debug_size_per_dbg_entry(void)
{
return sizeof(struct _eenv_debug) + (sizeof(unsigned long) * num_possible_cpus());
}
get_eenv
eenv->util_delta = task_util_est(p);
eenv->util_delta_boosted = boosted_task_util(p);
获取估算的使用率和boost的使用率
/*
* get_eenv - reset the eenv struct cached for this CPU
*
* When the eenv is returned, it is configured to do
* energy calculations for the maximum number of CPUs
* the task can be placed on. The prev_cpu entry is
* filled in here. Callers are responsible for adding
* other CPU candidates up to eenv->max_cpu_count.
*/
/*
当eenv返回时,
它被配置为针对可以放置任务的最大CPU数量进行能量计算。
prev_cpu条目在此处填写。 调用者负责将其他CPU候选者添加到eenv-> max_cpu_count。
*/
static inline struct energy_env *get_eenv(struct task_struct *p, int prev_cpu)
{
struct energy_env *eenv;
cpumask_t cpumask_possible_cpus;
int cpu = smp_processor_id();
int i;
eenv = &(per_cpu(eenv_cache, cpu));
reset_eenv(eenv);
/* populate eenv */
eenv->p = p;
/* use boosted task util for capacity selection
* during energy calculation, but unboosted task
* util for group utilization calculations
*/
eenv->util_delta = task_util_est(p);
eenv->util_delta_boosted = boosted_task_util(p);
cpumask_and(&cpumask_possible_cpus, &p->cpus_allowed, cpu_online_mask);
eenv->max_cpu_count = cpumask_weight(&cpumask_possible_cpus);
for (i=0; i < eenv->max_cpu_count; i++)
eenv->cpu[i].cpu_id = -1;
eenv->cpu[EAS_CPU_PRV].cpu_id = prev_cpu;
eenv->next_idx = EAS_CPU_PRV;
return eenv;
}
#define EAS_CPU_PRV 0
wake_wide
/*
* Detect M:N waker/wakee relationships via a switching-frequency heuristic.
*
* A waker of many should wake a different task than the one last awakened
* at a frequency roughly N times higher than one of its wakees.
*
* In order to determine whether we should let the load spread vs consolidating
* to shared cache, we look for a minimum 'flip' frequency of llc_size in one
* partner, and a factor of lls_size higher frequency in the other.
*
* With both conditions met, we can be relatively sure that the relationship is
* non-monogamous, with partner count exceeding socket size.
*
* Waker/wakee being client/server, worker/dispatcher, interrupt source or
* whatever is irrelevant, spread criteria is apparent partner count exceeds
* socket size.
*/
//wakee的数量
static int wake_wide(struct task_struct *p, int sibling_count_hint)
{
unsigned int master = current->wakee_flips;
unsigned int slave = p->wakee_flips;
int llc_size = this_cpu_read(sd_llc_size);
if (sibling_count_hint >= llc_size)
return 1;
if (master < slave)
swap(master, slave);
if (slave < llc_size || master < slave * llc_size)
return 0;
return 1;
}
wake_cap
/*
* Disable WAKE_AFFINE in the case where task @p doesn't fit in the
* capacity of either the waking CPU