summaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-04-14 08:53:32 +0200
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:44:57 +0200
commit018d6db4cb5bbdcd65424a16f2dcca692ed32ae4 (patch)
treeaee5a5ae44ba3f345c26acf91d419dae1ba6d1e1 /kernel/sched_fair.c
parent3925e6fc1f774048404fdd910b0345b06c699eb4 (diff)
sched: re-do "sched: fix fair sleepers"
re-apply: | commit e22ecef1d2658ba54ed7d3fdb5d60829fb434c23 | Author: Ingo Molnar <mingo@elte.hu> | Date: Fri Mar 14 22:16:08 2008 +0100 | | sched: fix fair sleepers | | Fair sleepers need to scale their latency target down by runqueue | weight. Otherwise busy systems will gain ever larger sleep bonus. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0080968d3e4a..86a93376282c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -510,8 +510,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS))
- vruntime -= sysctl_sched_latency;
+ if (sched_feat(NEW_FAIR_SLEEPERS)) {
+ vruntime -= calc_delta_fair(sysctl_sched_latency,
+ &cfs_rq->load);
+ }
/* ensure we never gain time by being placed backwards. */
vruntime = max_vruntime(se->vruntime, vruntime);