blob: 07a448541e6bee9dcb1a58894013875970893c22 [file] [log] [blame]
01cfcde9c26d ("sched/fair: handle case of task_h_load() returning 0")
0b0695f2b34a ("sched/fair: Rework load_balance()")
fcf0553db6f4 ("sched/fair: Remove meaningless imbalance calculation")
a34983470301 ("sched/fair: Rename sg_lb_stats::sum_nr_running to sum_h_nr_running")
490ba971d8b4 ("sched/fair: Clean up asym packing")
a3df067974c5 ("sched/fair: Rename weighted_cpuload() to cpu_runnable_load()")
af75d1a9a9f7 ("sched/fair: Remove sgs->sum_weighted_load")
1c1b8a7b03ef ("sched/fair: Replace source_load() & target_load() with weighted_cpuload()")
5e83eafbfd3b ("sched/fair: Remove the rq->cpu_load[] update code")
10a35e6812aa ("sched/pelt: Skip updating util_est when utilization is higher than CPU's capacity")
4ad4e481bd02 ("sched/fair: Fix rounding bug for asym packing")
630246a06ae2 ("sched/fair: Clean-up update_sg_lb_stats parameters")
6aa140fa4508 ("sched/topology: Reference the Energy Model of CPUs when available")
fdf5f315d5cf ("sched/fair: Disable LB_BIAS by default")
7477a3504e61 ("sched/numa: Remove unused numa_stats::nr_running field")
4ad3831a9d4a ("sched/fair: Don't move tasks to lower capacity CPUs unless necessary")
757ffdd705ee ("sched/fair: Set rq->rd->overload when misfit")
e90c8fe15a3b ("sched/fair: Wrap rq->rd->overload accesses with READ/WRITE_ONCE()")
575638d1047e ("sched/core: Change root_domain->overload type to int")
dbbad719449e ("sched/fair: Change 'prefer_sibling' type to bool")
cad68e552e77 ("sched/fair: Consider misfit tasks when load-balancing")
e3d6d0cb66f2 ("sched/fair: Add sched_group per-CPU max capacity")
3b1baa6496e6 ("sched/fair: Add 'group_misfit_task' load-balance type")
287cdaac5700 ("sched/fair: Fix scale_rt_capacity() for SMT")
2d4056fafa19 ("sched/numa: Remove numa_has_capacity()")
10864a9e2220 ("sched/numa: Remove unused task_capacity from 'struct numa_stats'")
bbb62c0b024a ("sched/core: Remove the rt_avg code")
523e979d3164 ("sched/core: Use PELT for scale_rt_capacity()")
371bf4273269 ("sched/rt: Add rt_rq utilization tracking")
c079629862b2 ("sched/pelt: Move PELT related code in a dedicated file")
d519329f72a6 ("sched/fair: Update util_est only on util_avg updates")
7f65ea42eb00 ("sched/fair: Add util_est on top of PELT")
31e77c93e432 ("sched/fair: Update blocked load when newly idle")
47ea54121e46 ("sched/fair: Move idle_balance()")
dd707247abab ("sched/nohz: Merge CONFIG_NO_HZ_COMMON blocks")
af3fe03c5620 ("sched/fair: Move rebalance_domains()")
1936c53ce8c8 ("sched/fair: Reduce the periodic update duration")
f643ea220701 ("sched/nohz: Stop NOHZ stats when decayed")
00357f5ec5d6 ("sched/nohz: Clean up nohz enter/exit")
e022e0d38ad4 ("sched/fair: Update blocked load from NEWIDLE")
a4064fb614f8 ("sched/fair: Add NOHZ stats balancing")
4550487a993d ("sched/fair: Restructure nohz_balance_kick()")
b7031a02ec75 ("sched/fair: Add NOHZ_STATS_KICK")
a22e47a4e3f5 ("sched/core: Convert nohz_flags to atomic_t")
a92057e14beb ("sched/idle: Merge kernel/sched/idle.c and kernel/sched/idle_task.c")
325ea10c0809 ("sched/headers: Simplify and clean up header usage in the scheduler")
97fb7a0a8944 ("sched: Clean up and harmonize the coding style of the scheduler code base")
dcdedb24159b ("sched/nohz: Remove the 1 Hz tick code")
d84b31313ef8 ("sched/isolation: Offload residual 1Hz scheduler tick")
eeb603986391 ("sched/fair: Defer calculation of 'prev_eff_load' in wake_affine_weight() until needed")