1 另一个复现问题的方式
首先我们构造如下 msleep.sh脚本,用于触发 bug 复现:
#!/bin/sh
i=0
for i in `seq 1 10000`;
do
# i=`expr i+1`
echo "aaaaaaaaaaaaaa" > tmp.txt
done
sleep 1
接着是 usetup.sh,用于清理测试:
#!/bin/sh
killall stress
rmdir /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32/zy_test_l2_1024
rmdir /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32/zy_test_l2_32
rmdir /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32
最后是 setup.sh,执行测试程序:
#!/bin/sh
function handle_ctrl_c() {
echo "*************************"
echo " do usetup, exit"
echo "*************************"
./usetup.sh
exit 1
}
trap handle_ctrl_c SIGINT
mkdir /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32
echo 32 > /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32/cpu.shares
mkdir /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32/zy_test_l2_32
echo 32 > /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32/zy_test_l2_32/cpu.shares
mkdir /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32/zy_test_l2_1024
# zy_test_l1_32 32
# zy_test_l2_32 32 -> stress
# zy_test_l2_1024 1024 -> sleep
taskset -c 10 chrt -b 0 stress -c 1 -t 1000 &
stress_pid=`pgrep stress`
r_stree_pid=`echo $stress_pid | awk '{print $2}'`
echo $r_stree_pid > /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32/zy_test_l2_32/tasks
./msleep.sh &
sleep_pid=$!
echo $sleep_pid > /sys/fs/cgroup/cpu,cpuacct/zy_test_l1_32/zy_test_l2_1024/tasks
#sleep 1.5
#echo $sleep_pid > /sys/fs/cgroup/cpuset/zy_test/tasks
for i in `seq 1 1000`;
do
echo "===================================`date`============================================";
cat /proc/sched_debug | grep -A 30 "cfs_rq.*zy" | grep -E "zy|tg_load_avg";
sleep 1;
done
接着开始测试,检查输出:
下面是测试中正常程序的输出:
# ./setup.sh
stress: info: [64192] dispatching hogs: 1 cpu, 0 io, 0 vm, 0 hdd
===================================Mon Mar 20 14:27:08 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 982
.tg_load_avg : 982
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 30
.tg_load_avg : 1039
cfs_rq[72]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 23301
cfs_rq[72]:/zy_test_l1_32
.tg_load_avg_contrib : 44
.tg_load_avg : 2656
cfs_rq[73]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1015
.tg_load_avg : 22288
cfs_rq[73]:/zy_test_l1_32
.tg_load_avg_contrib : 205
.tg_load_avg : 2755
cfs_rq[74]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 22288
cfs_rq[74]:/zy_test_l1_32
.tg_load_avg_contrib : 200
.tg_load_avg : 2788
cfs_rq[75]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 22288
cfs_rq[75]:/zy_test_l1_32
.tg_load_avg_contrib : 314
.tg_load_avg : 2617
cfs_rq[76]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 22288
cfs_rq[76]:/zy_test_l1_32
.tg_load_avg_contrib : 315
.tg_load_avg : 2184
cfs_rq[77]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 22288
cfs_rq[77]:/zy_test_l1_32
.tg_load_avg_contrib : 37
.tg_load_avg : 1718
cfs_rq[79]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 23301
cfs_rq[79]:/zy_test_l1_32
.tg_load_avg_contrib : 37
.tg_load_avg : 1218
cfs_rq[80]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 23301
cfs_rq[80]:/zy_test_l1_32
.tg_load_avg_contrib : 38
.tg_load_avg : 1046
cfs_rq[81]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 23301
cfs_rq[81]:/zy_test_l1_32
.tg_load_avg_contrib : 38
.tg_load_avg : 962
cfs_rq[82]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 23301
cfs_rq[82]:/zy_test_l1_32
.tg_load_avg_contrib : 43
.tg_load_avg : 940
cfs_rq[83]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 1013
cfs_rq[83]:/zy_test_l1_32
.tg_load_avg_contrib : 38
.tg_load_avg : 934
cfs_rq[84]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 4052
cfs_rq[84]:/zy_test_l1_32
.tg_load_avg_contrib : 37
.tg_load_avg : 959
cfs_rq[85]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 8104
cfs_rq[85]:/zy_test_l1_32
.tg_load_avg_contrib : 39
.tg_load_avg : 985
cfs_rq[86]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 12156
cfs_rq[86]:/zy_test_l1_32
.tg_load_avg_contrib : 39
.tg_load_avg : 1001
cfs_rq[87]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 14182
cfs_rq[87]:/zy_test_l1_32
.tg_load_avg_contrib : 43
.tg_load_avg : 1013
cfs_rq[88]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 18234
cfs_rq[88]:/zy_test_l1_32
.tg_load_avg_contrib : 42
.tg_load_avg : 1026
cfs_rq[89]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 22286
cfs_rq[89]:/zy_test_l1_32
.tg_load_avg_contrib : 42
.tg_load_avg : 2008
cfs_rq[90]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 21273
cfs_rq[90]:/zy_test_l1_32
.tg_load_avg_contrib : 43
.tg_load_avg : 3315
cfs_rq[91]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 22286
cfs_rq[91]:/zy_test_l1_32
.tg_load_avg_contrib : 43
.tg_load_avg : 3929
cfs_rq[92]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 22286
cfs_rq[92]:/zy_test_l1_32
.tg_load_avg_contrib : 43
.tg_load_avg : 4085
cfs_rq[93]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 23299
cfs_rq[93]:/zy_test_l1_32
.tg_load_avg_contrib : 43
.tg_load_avg : 4287
cfs_rq[94]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 22286
cfs_rq[94]:/zy_test_l1_32
.tg_load_avg_contrib : 43
.tg_load_avg : 4393
cfs_rq[95]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1013
.tg_load_avg : 22286
cfs_rq[95]:/zy_test_l1_32
.tg_load_avg_contrib : 1015
.tg_load_avg : 4429
...
...
...
===================================Mon Mar 20 14:27:12 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 1014
.tg_load_avg : 1014
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 31
.tg_load_avg : 356
cfs_rq[72]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[72]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[73]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[73]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[74]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[74]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[75]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[75]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[76]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[76]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[77]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 221
.tg_load_avg : 437
cfs_rq[77]:/zy_test_l1_32
.tg_load_avg_contrib : 71
.tg_load_avg : 203
cfs_rq[78]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[78]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[79]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[79]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[80]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[80]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[81]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 437
cfs_rq[81]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[82]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[82]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[83]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[83]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[84]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[84]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[85]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[85]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[86]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[86]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[87]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 176
.tg_load_avg : 397
cfs_rq[87]:/zy_test_l1_32
.tg_load_avg_contrib : 101
.tg_load_avg : 203
cfs_rq[88]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[88]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[89]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[89]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[90]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[90]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[91]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[91]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[92]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[92]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[93]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[93]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[94]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[94]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
cfs_rq[95]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 397
cfs_rq[95]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 203
...
...
...
===================================Mon Mar 20 14:27:27 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 1014
.tg_load_avg : 1014
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 32
.tg_load_avg : 32
^C*************************
do usetup, exit
*************************
可以看到 msleep.sh 任务退出后只剩下 stress 在 zy_test_l2_32 任务组后,zy_test_l1_32 的任务组总负载为 32 和设置的值一致,zy_test_l2_1024 上的 msleep.sh 的负载全部被更新移除掉。
下面是测试程序中触发 bug 的输出:
~/zy/fair_new# ./setup.sh
stress: info: [55205] dispatching hogs: 1 cpu, 0 io, 0 vm, 0 hdd
===================================Fri Mar 17 11:41:38 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 982
.tg_load_avg : 982
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 2369
cfs_rq[48]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[48]:/zy_test_l1_32
.tg_load_avg_contrib : 1008
.tg_load_avg : 23833
cfs_rq[49]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[49]:/zy_test_l1_32
.tg_load_avg_contrib : 1015
.tg_load_avg : 21799
cfs_rq[50]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[50]:/zy_test_l1_32
.tg_load_avg_contrib : 1019
.tg_load_avg : 21799
cfs_rq[51]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[51]:/zy_test_l1_32
.tg_load_avg_contrib : 1005
.tg_load_avg : 22806
cfs_rq[52]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[52]:/zy_test_l1_32
.tg_load_avg_contrib : 1005
.tg_load_avg : 23306
cfs_rq[53]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[53]:/zy_test_l1_32
.tg_load_avg_contrib : 1013
.tg_load_avg : 23306
cfs_rq[54]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 1024
cfs_rq[54]:/zy_test_l1_32
.tg_load_avg_contrib : 1017
.tg_load_avg : 22222
cfs_rq[55]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 1024
cfs_rq[55]:/zy_test_l1_32
.tg_load_avg_contrib : 1007
.tg_load_avg : 21723
cfs_rq[56]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[56]:/zy_test_l1_32
.tg_load_avg_contrib : 1012
.tg_load_avg : 20220
cfs_rq[57]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[57]:/zy_test_l1_32
.tg_load_avg_contrib : 1011
.tg_load_avg : 19717
cfs_rq[58]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[58]:/zy_test_l1_32
.tg_load_avg_contrib : 1015
.tg_load_avg : 18194
cfs_rq[59]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[59]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 16668
cfs_rq[60]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[60]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 15665
cfs_rq[61]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[61]:/zy_test_l1_32
.tg_load_avg_contrib : 959
.tg_load_avg : 14653
cfs_rq[62]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 2048
cfs_rq[62]:/zy_test_l1_32
.tg_load_avg_contrib : 503
.tg_load_avg : 13641
cfs_rq[63]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 3072
cfs_rq[63]:/zy_test_l1_32
.tg_load_avg_contrib : 505
.tg_load_avg : 12637
cfs_rq[64]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1024
.tg_load_avg : 3072
cfs_rq[64]:/zy_test_l1_32
.tg_load_avg_contrib : 506
.tg_load_avg : 12636
cfs_rq[65]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 4096
cfs_rq[65]:/zy_test_l1_32
.tg_load_avg_contrib : 508
.tg_load_avg : 13648
cfs_rq[66]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 4096
cfs_rq[66]:/zy_test_l1_32
.tg_load_avg_contrib : 509
.tg_load_avg : 14094
cfs_rq[67]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1024
.tg_load_avg : 4096
cfs_rq[67]:/zy_test_l1_32
.tg_load_avg_contrib : 501
.tg_load_avg : 14084
cfs_rq[68]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 4096
cfs_rq[68]:/zy_test_l1_32
.tg_load_avg_contrib : 502
.tg_load_avg : 14084
cfs_rq[69]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 4096
cfs_rq[69]:/zy_test_l1_32
.tg_load_avg_contrib : 503
.tg_load_avg : 14084
cfs_rq[70]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 4096
cfs_rq[70]:/zy_test_l1_32
.tg_load_avg_contrib : 505
.tg_load_avg : 13281
cfs_rq[71]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 4096
cfs_rq[71]:/zy_test_l1_32
.tg_load_avg_contrib : 506
.tg_load_avg : 13273
cfs_rq[78]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 3072
cfs_rq[78]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 9666
...
...
...
===================================Fri Mar 17 11:41:42 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 1017
.tg_load_avg : 1017
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 31
.tg_load_avg : 19632
cfs_rq[48]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 942
cfs_rq[48]:/zy_test_l1_32
.tg_load_avg_contrib : 1009
.tg_load_avg : 21170
cfs_rq[49]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 943
cfs_rq[49]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 22095
cfs_rq[50]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 943
cfs_rq[50]:/zy_test_l1_32
.tg_load_avg_contrib : 1010
.tg_load_avg : 22095
cfs_rq[51]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 943
cfs_rq[51]:/zy_test_l1_32
.tg_load_avg_contrib : 1004
.tg_load_avg : 22022
cfs_rq[52]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 943
cfs_rq[52]:/zy_test_l1_32
.tg_load_avg_contrib : 993
.tg_load_avg : 22022
cfs_rq[53]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 945
cfs_rq[53]:/zy_test_l1_32
.tg_load_avg_contrib : 934
.tg_load_avg : 21942
cfs_rq[54]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1
.tg_load_avg : 945
cfs_rq[54]:/zy_test_l1_32
.tg_load_avg_contrib : 918
.tg_load_avg : 21942
cfs_rq[55]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 946
cfs_rq[55]:/zy_test_l1_32
.tg_load_avg_contrib : 933
.tg_load_avg : 20925
cfs_rq[56]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 946
cfs_rq[56]:/zy_test_l1_32
.tg_load_avg_contrib : 0
.tg_load_avg : 20925
cfs_rq[57]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 946
cfs_rq[57]:/zy_test_l1_32
.tg_load_avg_contrib : 926
.tg_load_avg : 20925
cfs_rq[58]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1
.tg_load_avg : 946
cfs_rq[58]:/zy_test_l1_32
.tg_load_avg_contrib : 919
.tg_load_avg : 21864
cfs_rq[59]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 946
cfs_rq[59]:/zy_test_l1_32
.tg_load_avg_contrib : 996
.tg_load_avg : 21864
cfs_rq[60]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 947
cfs_rq[60]:/zy_test_l1_32
.tg_load_avg_contrib : 932
.tg_load_avg : 21787
cfs_rq[61]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 947
cfs_rq[61]:/zy_test_l1_32
.tg_load_avg_contrib : 932
.tg_load_avg : 21787
cfs_rq[62]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 947
cfs_rq[62]:/zy_test_l1_32
.tg_load_avg_contrib : 925
.tg_load_avg : 21787
cfs_rq[63]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 950
cfs_rq[63]:/zy_test_l1_32
.tg_load_avg_contrib : 1011
.tg_load_avg : 17577
cfs_rq[64]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 950
cfs_rq[64]:/zy_test_l1_32
.tg_load_avg_contrib : 939
.tg_load_avg : 17577
cfs_rq[65]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 950
cfs_rq[65]:/zy_test_l1_32
.tg_load_avg_contrib : 938
.tg_load_avg : 17577
cfs_rq[66]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 950
cfs_rq[66]:/zy_test_l1_32
.tg_load_avg_contrib : 932
.tg_load_avg : 17577
cfs_rq[67]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 952
cfs_rq[67]:/zy_test_l1_32
.tg_load_avg_contrib : 1010
.tg_load_avg : 17517
cfs_rq[68]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 952
cfs_rq[68]:/zy_test_l1_32
.tg_load_avg_contrib : 927
.tg_load_avg : 17517
cfs_rq[69]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1
.tg_load_avg : 952
cfs_rq[69]:/zy_test_l1_32
.tg_load_avg_contrib : 925
.tg_load_avg : 17517
cfs_rq[70]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1
.tg_load_avg : 952
cfs_rq[70]:/zy_test_l1_32
.tg_load_avg_contrib : 938
.tg_load_avg : 17517
cfs_rq[71]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 1
.tg_load_avg : 954
cfs_rq[71]:/zy_test_l1_32
.tg_load_avg_contrib : 930
.tg_load_avg : 18456
===================================Fri Mar 17 11:41:43 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 1017
.tg_load_avg : 1017
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 32
.tg_load_avg : 1028
cfs_rq[59]:/zy_test_l1_32/zy_test_l2_1024
.tg_load_avg_contrib : 0
.tg_load_avg : 0
cfs_rq[59]:/zy_test_l1_32
.tg_load_avg_contrib : 996
.tg_load_avg : 1027
===================================Fri Mar 17 11:41:44 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 1017
.tg_load_avg : 1017
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 31
.tg_load_avg : 1027
===================================Fri Mar 17 11:41:48 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 1017
.tg_load_avg : 1017
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 31
.tg_load_avg : 1027
===================================Fri Mar 17 11:41:49 CST 2023============================================
cfs_rq[10]:/zy_test_l1_32/zy_test_l2_32
.tg_load_avg_contrib : 1017
.tg_load_avg : 1017
cfs_rq[10]:/zy_test_l1_32
.tg_load_avg_contrib : 31
.tg_load_avg : 1027
可以看到在触发 bug 后,即便 zy_test_l2_1024 上的 sleep 任务退出了,zy_test_l1_32 上的 tg_load_avg 还是等于 1027,比实际 zy_test_l2_32 上贡献的 31 多了 996,多出来的负载则是 zy_test_l2_1024 上 sleep 程序贡献的,而因为触发了 bug,导致这部分负载没有被更新掉反而是被遗留在了 zy_test_l1_32 任务组上。
2 patch-2 修复问题描述
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f4795b8008415..e7c8277e3d54a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8029,7 +8029,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
- update_load_avg(cfs_rq_of(se), se, 0);
+ update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
// 对应代码如下
for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
struct sched_entity *se;
/* throttled entities do not contribute to load */
if (throttled_hierarchy(cfs_rq))
continue;
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) ------------------(1)
update_tg_load_avg(cfs_rq, 0);
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se)) ----------------------------------------------(2)
update_load_avg(cfs_rq_of(se), se, 0);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
* decayed cfs_rqs linger on the list.
*/
if (cfs_rq_is_decayed(cfs_rq)) --------------------------------------------------(3)
list_del_leaf_cfs_rq(cfs_rq);
/* Don't need periodic decay once load/util_avg are null */
if (cfs_rq_has_blocked(cfs_rq))
done = false;
}
可以看到,patch 的功能是在 __update_blocked_fair
期间对 child cfs_rq
进行负载更新的同时对对应的任务组实体 se 也进行更新,添加UPDATE_TG
。
那为什么之前 update_load_avg
标记为 0呢?
(1)首先 __update_blocked_fair
中使用 for_each_leaf_cfs_rq_safe
对每个 cpu 的 cfs_rq 队列自底向上依次进行遍历,在遍历过程中更新每个 cfs_rq 的负载并且如果负载有发生变化则会将该部分负载同步到对应任务组的load_avg中。
(2)接着将cfs_rq的负载变化同步到对应的 se 中,skip_blocked_update
判断对应 se 是否有负载或者所属的cfs_rq是否有广播的负载变化,如果有则会返回false,表示需要对se调用update_load_avg
更新对应cfs_rq和se的负载。
(3)cfs_rq_is_decayed
主要是判断cfs_rq是否还有权重,负载等信息,如果没有权重或者负载,说明该cfs_rq没有任务运行,过去负载也衰减为零了,那么该cfs_rq不再需要参与负载更新,所以在这里将其移除rq list队列。
那么为什么在(2)中 update_load_avg
第三个参数为零(该参数表示是否需要对对应的任务组负载进行更新),for_each_leaf_cfs_rq_safe
属于自底向上的依次遍历,那么在更新完child cfs_rq 后,在遍历下一个cfs_rq时拿到的即是 parent cfs_rq,因为我们每次都会对cfs_rq进行负载更新,所以在对 se 负载同步和广播负载时我们不需要多余的去更新任务组负载,反正遍历下一个节点时,如果负载有更新均会更新到任务组负载。
那为什么 patch 又对这里添加了 UPDATE_TG呢,同样,根据 patch 描述,有如下场景:
__update_blocked_fair
...
for_each_leaf_cfs_rq_safe: child cfs_rq
update cfs_rq_load_avg() for child cfs_rq
...
update_load_avg(cfs_rq_of(se), se, 0)
...
update cfs_rq_load_avg() for parent cfs_rq
-propagation of child's load makes parent cfs_rq->load_sum
becoming null
-UPDATE_TG is not set so it doesn't update parent
cfs_rq->tg_load_avg_contrib
..
for_each_leaf_cfs_rq_safe: parent cfs_rq
update cfs_rq_load_avg() for parent cfs_rq
- nothing to do because parent cfs_rq has already been updated
recently so cfs_rq->tg_load_avg_contrib is not updated
...
parent cfs_rq is decayed
list_del_leaf_cfs_rq parent cfs_rq
- but it still contibutes to tg->load_avg
we must set UPDATE_TG flags when propagting pending load to the parent
意思是说,我们在 child cfs_rq 进行更新时,同时会去同步 se,此时如果底层有广播的负载向上传播,那么可能导致parent cfs_rq->load_sum 为零,而此时没有UPDATE_TG
标记我们不会向任务组更新负载,那么在进行parnet cfs_rq 进行遍历时,由于 parent cfs_rq 在上一次同步 se 时已被更新,那么这次便不会更新到 cfs_rq,对应任务组负载也不会更新,而此时cfs_rq的负载不会更新到任务组负载,而由于 cfs_rq_is_decayed
中只对 load_sum
进行了判断,而没有检测 load_avg
来判断是否需要删除cfs_rq,根据 patch 描述在向上广播负载时可能会导致 load_sum
为零对应load_avg
不为零,而 cfs_rq 由于 load_sum 为零被移除,那么会导致任务组的 load_avg 更新延迟或者出现问题,最后影响cfs公平性。添加 UPDATE_TG
后,保证在对 se 同步时会更新对应 parent cfs_rq 的任务组负载,那么即便遍历到下一次没有 cfs_rq 更新我们也可以保证对应任务组负载得到正确更新。
3 patch-2 修复逻辑
3.1 对 child cfs_rq 的更新
首先是在 child cfs_rq 的更新中有以下逻辑:
for_each_leaf_cfs_rq_safe(child cfs_rq)
-> update_cfs_rq_load_avg -----------------------(1)
-> update_tg_load_avg -------------------------(2)
-> update_load_avg ------------------------------(3)
-> if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq); -------------(4)
(1)对 child 的 cfs_rq 进行负载更新,如果负载有变化则会调用(2) update_tg_load_avg 来对任务组负载进行更新。
接着如果 child cfs_rq 负载有广播的或者负载还存在的则会同步调用(3) update_load_avg 去更新 cfs_rq 对应的 se,在这里会对 parent cfs_rq 进行更新,但是这里没有 UPDATE_TG 标记,所以对 parent cfs_rq 负载更新不会同步到任务组 load_avg,而是在遍历 parent 时才完成更新。
接着是在 parent cfs_rq 的更新中有相同的上述逻辑,不同之处在于(1),这个时候存在一种可能update_cfs_rq_load_avg 并没有负载变化所以不会调用(2)来更新负载到任务组load_avg,而且在之后(4)逻辑中由于广播负载关系可能造成 cfs_rq->load_sum 等于零而 cfs_rq->load_avg 不等于零的情况导致该 cfs_rq 节点却被移出链表不再更新 cfs_rq 的负载,那么看起来就是上面打印输出中的 zy_test_l1_32 中 tg_load_avg 高于实际的 tg_load_avg_contrib。
根据上述描述为什么会有(1)无负载变化并且 load_sum 等于零而实际的 load_avg 还不等的情况呢?
首先看(3)中的逻辑:
update_load_avg
-> __update_load_avg_se
-> update_cfs_rq_load_avg
-> propagate_entity_load_avg
update_load_avg 分别根据条件更新调度实体 se 负载,更新 cfs_rq 负载,以及处理更新由 child cfs_rq 广播上来的负载,并且根据负载变化和 flags 来判断是否要更新任务组负载,在(3)的条件下这里不会去更新任务组负载。着重看一下 propagate_entity_load_avg
函数逻辑:
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq, *gcfs_rq;
if (entity_is_task(se))
return 0;
gcfs_rq = group_cfs_rq(se);
if (!gcfs_rq->propagate) // 如果 se 对应的 cfs_rq 被标记有负载需要向上广播则继续向下执行,否则返回。
return 0;
gcfs_rq->propagate = 0;
cfs_rq = cfs_rq_of(se);
add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
update_tg_cfs_util(cfs_rq, se, gcfs_rq);
update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); /* 1 */
return 1;
}
1)update_tg_cfs_runnable
函数处理由下面广播上来的负载来更新 load_avg 和 load_sum,函数如下:
static inline void
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long runnable_load_avg, load_avg;
u64 runnable_load_sum, load_sum = 0;
s64 delta_sum;
if (!runnable_sum)
return;
gcfs_rq->prop_runnable_sum = 0;
if (runnable_sum >= 0) {
/*
* Add runnable; clip at LOAD_AVG_MAX. Reflects that until
* the CPU is saturated running == runnable.
*/
runnable_sum += se->avg.load_sum;
runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
} else {
/*
* Estimate the new unweighted runnable_sum of the gcfs_rq by
* assuming all tasks are equally runnable.
*/
if (scale_load_down(gcfs_rq->load.weight)) {
load_sum = div_s64(gcfs_rq->avg.load_sum,
scale_load_down(gcfs_rq->load.weight));
}
/* But make sure to not inflate se's runnable */
runnable_sum = min(se->avg.load_sum, load_sum);
}
/*
* runnable_sum can't be lower than running_sum
* As running sum is scale with CPU capacity wehreas the runnable sum
* is not we rescale running_sum 1st
*/
running_sum = se->avg.util_sum /
arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
runnable_sum = max(runnable_sum, running_sum);
load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, LOAD_AVG_MAX);
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
delta_avg = load_avg - se->avg.load_avg;
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
add_positive(&cfs_rq->avg.load_avg, delta_avg); /* 2 */
add_positive(&cfs_rq->avg.load_sum, delta_sum); /* 2 */
runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
se->avg.runnable_load_sum = runnable_sum;
se->avg.runnable_load_avg = runnable_load_avg;
if (se->on_rq) {
add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
}
}
根据 patch 描述在这里更新 load_sum 和 load_avg 时可能会出现 load_sum 等于 0,load_avg 不等于 0 的情况。对于这种问题的出现,社区有出 patch 想保持 load_sum 和 load_avg 的同步更新,部分 patch 如下:
@@ -3499,10 +3499,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+ long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg;
u64 load_sum = 0;
- s64 delta_sum;
u32 divider;
if (!runnable_sum)
@@ -3549,13 +3548,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, divider);
- delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
- delta_avg = load_avg - se->avg.load_avg;
+ delta = load_avg - se->avg.load_avg;
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
- add_positive(&cfs_rq->avg.load_avg, delta_avg);
- add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+ add_positive(&cfs_rq->avg.load_avg, delta);
+ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
}
先计算 load_avg,接着 load_sum 等于 load_avg * divider。这种方式可以保证 load_sum 为零时 load_avg 也为零。具体改动原理未分析,这里展示 patch 只是说明在我们当前场景下会出现 load_sum = 0 而 load_avg != 0 的情况。
所以根据上述分析可以得出结论,当一个任务在退出后或者迁移至其他 cpu 后,任务原先节点的负载 load_sum 和 load_avg 会逐渐衰减直至为 0,而在某些时候负载衰减向上广播阶段会导致在 child cfs_rq 中调用步骤(3)会出现 parent cfs_rq->avg.load_sum 等于 0,而 cfs_rq->avg.load_avg 不等于 0。那么什么时候会触发负载广播这个操作呢?
当任务在任务组中的不同 cpu 之间迁移时会触发广播负载
任务睡眠状态下被迁移到其他 cpu,在唤醒时会触发
所以当我们让 msleep.sh 脚本能够多触发迁移操作时更容易复现 bug。
接着是 parent cfs_rq 的更新
3.2 对 parent cfs_rq 的更新
当对 parent cfs_rq 进行更新时在(1)中调用 update_cfs_rq_load_avg 更新 cfs_rq 并且有衰减会同步调用(2)更新任务组负载,此时对于触发问题的情况,这里不会有负载变化,什么条件会触发没有负载变化?首先看 update_cfs_rq_load_avg
函数:
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0;
struct sched_avg *sa = &cfs_rq->avg;
int decayed = 0;
if (cfs_rq->removed.nr) {
unsigned long r;
u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
raw_spin_lock(&cfs_rq->removed.lock);
swap(cfs_rq->removed.util_avg, removed_util);
swap(cfs_rq->removed.load_avg, removed_load);
swap(cfs_rq->removed.runnable_sum, removed_runnable_sum);
cfs_rq->removed.nr = 0;
raw_spin_unlock(&cfs_rq->removed.lock);
r = removed_load;
sub_positive(&sa->load_avg, r);
sub_positive(&sa->load_sum, r * divider);
r = removed_util;
sub_positive(&sa->util_avg, r);
sub_positive(&sa->util_sum, r * divider);
add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);
decayed = 1; // 这里会标记需要衰减,所以触发 bug 不会进这里。
}
decayed |= __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq); // 更新 cfs_rq 可能触发衰减。
#ifndef CONFIG_64BIT
smp_wmb();
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
if (decayed)
cfs_rq_util_change(cfs_rq, 0);
return decayed;
}
其中 __update_load_avg_cfs_rq
函数调用如下:
int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
{
if (___update_load_sum(now, cpu, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
scale_load_down(cfs_rq->runnable_weight),
cfs_rq->curr != NULL)) {
___update_load_avg(&cfs_rq->avg, 1, 1);
return 1;
}
return 0;
}
要不触发衰减,则需要 __update_load_avg_cfs_rq
返回零,那么需要 ___update_load_sum
返回 0,___update_load_sum
函数如下:
static __always_inline int
___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
u64 delta;
delta = now - sa->last_update_time;
/*
* This should only happen when time goes backwards, which it
* unfortunately does during sched clock init when we swap over to TSC.
*/
if ((s64)delta < 0) { // 对于测试用例,不在这里返回
sa->last_update_time = now;
return 0;
}
/*
* Use 1024ns as the unit of measurement since it's a reasonable
* approximation of 1us and fast to compute.
*/
delta >>= 10;
if (!delta) // 对于测试用例,如果传入的 now(来自于 clock_task),可能还没有发生改变,此时 delta = 0。
return 0;
sa->last_update_time += delta << 10;
/*
* running is a subset of runnable (weight) so running can't be set if
* runnable is clear. But there are some corner cases where the current
* se has been already dequeued but cfs_rq->curr still points to it.
* This means that weight will be 0 but not running for a sched_entity
* but also for a cfs_rq if the latter becomes idle. As an example,
* this happens during idle_balance() which calls
* update_blocked_averages()
*/
if (!load)
runnable = running = 0;
/*
* Now we know we crossed measurement unit boundaries. The *_avg
* accrues by two steps:
*
* Step 1: accumulate *_sum since last_update_time. If we haven't
* crossed period boundaries, finish.
*/
if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
return 0;
return 1;
}
可以看到如果需要不触发衰减除了上述排除的两个地方剩下的就是 accumulate_sum
:
static __always_inline u32
accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
unsigned long scale_freq, scale_cpu;
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
u64 periods;
scale_freq = arch_scale_freq_capacity(cpu);
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
delta += sa->period_contrib;
periods = delta / 1024; /* A period is 1024us (~1ms) */ --------------------------------(1)
/*
* Step 1: decay old *_sum if we crossed period boundaries.
*/
if (periods) {
sa->load_sum = decay_load(sa->load_sum, periods);
sa->runnable_load_sum =
decay_load(sa->runnable_load_sum, periods);
sa->util_sum = decay_load((u64)(sa->util_sum), periods);
/*
* Step 2
*/
delta %= 1024;
contrib = __accumulate_pelt_segments(periods,
1024 - sa->period_contrib, delta);
}
sa->period_contrib = delta;
contrib = cap_scale(contrib, scale_freq);
if (load) ---------------------------------------------------------------------------(2)
sa->load_sum += load * contrib;
if (runnable)
sa->runnable_load_sum += runnable * contrib;
if (running)
sa->util_sum += contrib * scale_cpu;
return periods;
}
(1)当本次更新距离上次更新不足 1024 us 的更新窗口时,那么本次会返回 0,对于负载更新函数,那么就是表示不需要更新负载到任务组,因为没有对负载进行衰减。
(2)对于一个不再提供负载的任务(任务已经退出,或者长时间睡眠)load_sum 还是等于 0。
所以根据上述代码逻辑可以得出,当对 parent cfs_rq 进行更新时如果在 child cfs_rq 中已经对 parent cfs_rq 进行了更新,那么这里如果调用的时间还不满足 1024us 这个衰减窗口,将不会调用到 update_tg_load_avg
来更新任务组负载。而触发 bug 的实际情况是,由于广播负载关系,load_sum 此时等于 0,load_avg 不等于 0,我们并没有实时的更新 cfs_rq->avg.load_avg
到任务组 load_avg。
接着会调用 cfs_rq_is_decayed
判断是否需要将 cfs_rq 移除 rq list 链表,cfs_rq_is_decayed
函数如下:
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
if (cfs_rq->load.weight)
return false;
if (cfs_rq->avg.load_sum) -----------------------------------------(1)
return false;
if (cfs_rq->avg.util_sum)
return false;
if (cfs_rq->avg.runnable_load_sum)
return false;
return true;
}
(1)对于我们测试的情况是由于广播负载的相关操作导致 load_sum 此时等于 0,而 load_avg 不等于 0,本来还应该继续为任务组负载更新做出贡献,而由于没有同步 load_sum 和 load_avg 此时的 cfs_rq 被移出链表不再参与更新,如果对应的任务组的对应 cpu cfs_rq 没有其他任务被入队那么该部分负载将会永久的附加在任务组负载中。
这对这个 bug,社区有通过修改广播负载过程中 update_tg_cfs_runnable
逻辑来同步 load_sum 和 load_avg 解决问题,这个 case 里使用在 for_each_leaf_cfs_rq_safe -> update_tg_load_avg
中添加 UPDATE_TG
来解决部分问题。当添加 UPDATE_TG
后,在 child 中对 parent se 更新期间则会对 parnet cfs_rq 任务组负载进行更新,避免后续出现更新不到 cfs_rq 的情况。