summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-09-20 23:38:02 +0200
committerGreg Kroah-Hartman <gregkh@suse.de>2010-04-01 15:52:21 -0700
commitecfb7fb9b13c617447a7f6b5925da26798c1a8a1 (patch)
tree7242d25bc9fef033284cee5d9343484f1eacb124 /include
parentaa7659a10684907bec7c0c6887f8755767fb8dea (diff)
sched: wakeup preempt when small overlap
commit 15afe09bf496ae10c989e1a375a6b5da7bd3e16e upstream. Lin Ming reported a 10% OLTP regression against 2.6.27-rc4. The difference seems to come from different preemption agressiveness, which affects the cache footprint of the workload and its effective cache trashing. Aggresively preempt a task if its avg overlap is very small, this should avoid the task going to sleep and find it still running when we schedule back to it - saving a wakeup. Reported-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 20d6dd59cb0b..6d32974f3c1a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -901,7 +901,7 @@ struct sched_class {
void (*yield_task) (struct rq *rq);
int (*select_task_rq)(struct task_struct *p, int sync);
- void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);