summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@parallels.com>2014-09-22 22:36:24 +0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-11-14 10:10:24 -0800
commit74622033f1d67fa4b3054c2b0dfbb2527ec7a977 (patch)
tree40328470a3214918d068e7f42f99cf5cc9d9e525 /kernel
parent2743684fa1a9c4a0adfb54f8ef8316df1eb61459 (diff)
sched: Use dl_bw_of() under RCU read lock
commit 66339c31bc3978d5fff9c4b4cb590a861def4db2 upstream. dl_bw_of() dereferences rq->rd which has to have RCU read lock held. Probability of use-after-free isn't zero here. Also add lockdep assert into dl_bw_cpus(). Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20140922183624.11015.71558.stgit@localhost Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ec1a286684a5..8388295c637f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1977,6 +1977,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_SMP
inline struct dl_bw *dl_bw_of(int i)
{
+ rcu_lockdep_assert(rcu_read_lock_sched_held(),
+ "sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
@@ -1985,6 +1987,8 @@ static inline int dl_bw_cpus(int i)
struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0;
+ rcu_lockdep_assert(rcu_read_lock_sched_held(),
+ "sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
@@ -7580,6 +7584,8 @@ static int sched_dl_global_constraints(void)
int cpu, ret = 0;
unsigned long flags;
+ rcu_read_lock();
+
/*
* Here we want to check the bandwidth not being set to some
* value smaller than the currently allocated bandwidth in
@@ -7601,6 +7607,8 @@ static int sched_dl_global_constraints(void)
break;
}
+ rcu_read_unlock();
+
return ret;
}
@@ -7616,6 +7624,7 @@ static void sched_dl_do_global(void)
if (global_rt_runtime() != RUNTIME_INF)
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
+ rcu_read_lock();
/*
* FIXME: As above...
*/
@@ -7626,6 +7635,7 @@ static void sched_dl_do_global(void)
dl_b->bw = new_bw;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
}
+ rcu_read_unlock();
}
static int sched_rt_global_validate(void)