summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMilton Miller <miltonm@bga.com>2011-03-15 13:27:16 -0600
committerGreg Kroah-Hartman <gregkh@suse.de>2011-03-21 12:45:52 -0700
commit9b1bd836a0acdd1d939417017535d37cfaf0e6d0 (patch)
tree3aae90954d756557605c7bee28f2da7048a0e7fc /kernel
parent9cedf78402729da0d3662ee7c695ac3f232c4f43 (diff)
call_function_many: fix list delete vs add race
commit e6cd1e07a185d5f9b0aa75e020df02d3c1c44940 upstream. Peter pointed out there was nothing preventing the list_del_rcu in smp_call_function_interrupt from running before the list_add_rcu in smp_call_function_many. Fix this by not setting refs until we have gotten the lock for the list. Take advantage of the wmb in list_add_rcu to save an explicit additional one. I tried to force this race with a udelay before the lock & list_add and by mixing all 64 online cpus with just 3 random cpus in the mask, but was unsuccessful. Still, inspection shows a valid race, and the fix is a extension of the existing protection window in the current code. Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/smp.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index f47462b2e827..0195451c0d68 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -469,14 +469,15 @@ void smp_call_function_many(const struct cpumask *mask,
cpumask_clear_cpu(this_cpu, data->cpumask);
/*
- * To ensure the interrupt handler gets an complete view
- * we order the cpumask and refs writes and order the read
- * of them in the interrupt handler. In addition we may
- * only clear our own cpu bit from the mask.
+ * We reuse the call function data without waiting for any grace
+ * period after some other cpu removes it from the global queue.
+ * This means a cpu might find our data block as it is writen.
+ * The interrupt handler waits until it sees refs filled out
+ * while its cpu mask bit is set; here we may only clear our
+ * own cpu mask bit, and must wait to set refs until we are sure
+ * previous writes are complete and we have obtained the lock to
+ * add the element to the queue.
*/
- smp_wmb();
-
- atomic_set(&data->refs, cpumask_weight(data->cpumask));
raw_spin_lock_irqsave(&call_function.lock, flags);
/*
@@ -485,6 +486,11 @@ void smp_call_function_many(const struct cpumask *mask,
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
+ /*
+ * We rely on the wmb() in list_add_rcu to order the writes
+ * to func, data, and cpumask before this write to refs.
+ */
+ atomic_set(&data->refs, cpumask_weight(data->cpumask));
raw_spin_unlock_irqrestore(&call_function.lock, flags);
/*