summaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-07-19 09:58:49 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-07-21 07:42:23 +0200
commita0a93e3e6e1e8c35d1d6cf624f2f74a976ab43d5 (patch)
tree6497351c755cdaab03d336bbe8b90f6d17b0f56d /kernel/sched/fair.c
parent80495c708490eccbd5a1e8e934a674c93df1b9d8 (diff)
Revert "sched/core: Optimize SCHED_SMT"
This reverts commit 1b568f0aabf280555125bc7cefc08321ff0ebaba. For the 4.9 kernel tree, this patch causes scheduler regressions. It is fixed in newer kernels with a large number of individual patches, the sum of which is too big for the stable kernel tree. Ingo recommended just reverting the single patch for this tree, as it's much simpler. Reported-by: Ben Guthro <ben@guthro.net> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 1 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c242944f5cbd..15fdae7531e9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5376,7 +5376,7 @@ static inline bool test_idle_cores(int cpu, bool def)
* Since SMT siblings share all cache levels, inspecting this limited remote
* state should be fairly cheap.
*/
-void __update_idle_core(struct rq *rq)
+void update_idle_core(struct rq *rq)
{
int core = cpu_of(rq);
int cpu;
@@ -5408,9 +5408,6 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
int core, cpu, wrap;
- if (!static_branch_likely(&sched_smt_present))
- return -1;
-
if (!test_idle_cores(target, false))
return -1;
@@ -5444,9 +5441,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
{
int cpu;
- if (!static_branch_likely(&sched_smt_present))
- return -1;
-
for_each_cpu(cpu, cpu_smt_mask(target)) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
continue;