summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChuck Ebbert <cebbert@redhat.com>2007-11-14 18:33:16 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2007-11-26 09:42:30 -0800
commit23a5e6a55c8653d79783a0ebda8083999fb97054 (patch)
tree619ee4e278561006aaffd49fae557bc6eb9f83b4
parent77675886269b9af9f88d2fcea2fe1a3d02af0f6e (diff)
Fix divide-by-zero in the 2.6.23 scheduler code
No patch in mainline as this logic has been removed from 2.6.24 so it is not necessary. https://bugzilla.redhat.com/show_bug.cgi?id=340161 The problem code has been removed in 2.6.24. The below patch disables SCHED_FEAT_PRECISE_CPU_LOAD which causes the offending code to be skipped but does not prevent the user from enabling it. The divide-by-zero is here in kernel/sched.c: static void update_cpu_load(struct rq *this_rq) { u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64; unsigned long total_load = this_rq->ls.load.weight; unsigned long this_load = total_load; struct load_stat *ls = &this_rq->ls; int i, scale; this_rq->nr_load_updates++; if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD))) goto do_avg; /* Update delta_fair/delta_exec fields first */ update_curr_load(this_rq); fair_delta64 = ls->delta_fair + 1; ls->delta_fair = 0; exec_delta64 = ls->delta_exec + 1; ls->delta_exec = 0; sample_interval64 = this_rq->clock - ls->load_update_last; ls->load_update_last = this_rq->clock; if ((s64)sample_interval64 < (s64)TICK_NSEC) sample_interval64 = TICK_NSEC; if (exec_delta64 > sample_interval64) exec_delta64 = sample_interval64; idle_delta64 = sample_interval64 - exec_delta64; ======> tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64); tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64); this_load = (unsigned long)tmp64; do_avg: /* Update our load: */ for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { unsigned long old_load, new_load; /* scale is effectively 1 << i now, and >> i divides by scale */ old_load = this_rq->cpu_load[i]; new_load = this_load; this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; } } For stable only; the code has been removed in 2.6.24. Signed-off-by: Chuck Ebbert <cebbert@redhat.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--kernel/sched_fair.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 67c67a87146e..2f592fe9ed6d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -93,7 +93,7 @@ unsigned int sysctl_sched_features __read_mostly =
SCHED_FEAT_FAIR_SLEEPERS *1 |
SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
- SCHED_FEAT_PRECISE_CPU_LOAD *1 |
+ SCHED_FEAT_PRECISE_CPU_LOAD *0 |
SCHED_FEAT_START_DEBIT *1 |
SCHED_FEAT_SKIP_INITIAL *0;