From a0a93e3e6e1e8c35d1d6cf624f2f74a976ab43d5 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 19 Jul 2017 09:58:49 +0200 Subject: Revert "sched/core: Optimize SCHED_SMT" This reverts commit 1b568f0aabf280555125bc7cefc08321ff0ebaba. For the 4.9 kernel tree, this patch causes scheduler regressions. It is fixed in newer kernels with a large number of individual patches, the sum of which is too big for the stable kernel tree. Ingo recommended just reverting the single patch for this tree, as it's much simpler. Reported-by: Ben Guthro Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Mike Galbraith Cc: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- kernel/sched/core.c | 19 ------------------- kernel/sched/fair.c | 8 +------- kernel/sched/sched.h | 23 ++++++----------------- 3 files changed, 7 insertions(+), 43 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 692c948ae333..737381d601e6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7422,22 +7422,6 @@ int sched_cpu_dying(unsigned int cpu) } #endif -#ifdef CONFIG_SCHED_SMT -DEFINE_STATIC_KEY_FALSE(sched_smt_present); - -static void sched_init_smt(void) -{ - /* - * We've enumerated all CPUs and will assume that if any CPU - * has SMT siblings, CPU0 will too. - */ - if (cpumask_weight(cpu_smt_mask(0)) > 1) - static_branch_enable(&sched_smt_present); -} -#else -static inline void sched_init_smt(void) { } -#endif - void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; @@ -7467,9 +7451,6 @@ void __init sched_init_smp(void) init_sched_rt_class(); init_sched_dl_class(); - - sched_init_smt(); - sched_smp_initialized = true; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c242944f5cbd..15fdae7531e9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5376,7 +5376,7 @@ static inline bool test_idle_cores(int cpu, bool def) * Since SMT siblings share all cache levels, inspecting this limited remote * state should be fairly cheap. */ -void __update_idle_core(struct rq *rq) +void update_idle_core(struct rq *rq) { int core = cpu_of(rq); int cpu; @@ -5408,9 +5408,6 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); int core, cpu, wrap; - if (!static_branch_likely(&sched_smt_present)) - return -1; - if (!test_idle_cores(target, false)) return -1; @@ -5444,9 +5441,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t { int cpu; - if (!static_branch_likely(&sched_smt_present)) - return -1; - for_each_cpu(cpu, cpu_smt_mask(target)) { if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) continue; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 055f935d4421..ad77d666583c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -43,6 +43,12 @@ extern void cpu_load_update_active(struct rq *this_rq); static inline void cpu_load_update_active(struct rq *this_rq) { } #endif +#ifdef CONFIG_SCHED_SMT +extern void update_idle_core(struct rq *rq); +#else +static inline void update_idle_core(struct rq *rq) { } +#endif + /* * Helpers for converting nanosecond timing to jiffy resolution */ @@ -731,23 +737,6 @@ static inline int cpu_of(struct rq *rq) #endif } - -#ifdef CONFIG_SCHED_SMT - -extern struct static_key_false sched_smt_present; - -extern void __update_idle_core(struct rq *rq); - -static inline void update_idle_core(struct rq *rq) -{ - if (static_branch_unlikely(&sched_smt_present)) - __update_idle_core(rq); -} - -#else -static inline void update_idle_core(struct rq *rq) { } -#endif - DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) -- cgit v1.2.3