summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorWang YanQing <udknight@gmail.com>2013-01-26 15:53:57 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-02-03 18:21:38 -0600
commitd915fe310d9edbc349c36d0cef1b5dd0da6b067c (patch)
tree54be4d4a1ecb40793bc65b5850ee6e79ed5c897b /kernel
parent95675e4004fde70caaf119a9cb7ff489f3f0a069 (diff)
smp: Fix SMP function call empty cpu mask race
commit f44310b98ddb7f0d06550d73ed67df5865e3eda5 upstream. I get the following warning every day with v3.7, once or twice a day: [ 2235.186027] WARNING: at /mnt/sda7/kernel/linux/arch/x86/kernel/apic/ipi.c:109 default_send_IPI_mask_logical+0x2f/0xb8() As explained by Linus as well: | | Once we've done the "list_add_rcu()" to add it to the | queue, we can have (another) IPI to the target CPU that can | now see it and clear the mask. | | So by the time we get to actually send the IPI, the mask might | have been cleared by another IPI. | This patch also fixes a system hang problem, if the data->cpumask gets cleared after passing this point: if (WARN_ONCE(!mask, "empty IPI mask")) return; then the problem in commit 83d349f35e1a ("x86: don't send an IPI to the empty set of CPU's") will happen again. Signed-off-by: Wang YanQing <udknight@gmail.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Jan Beulich <jbeulich@suse.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: peterz@infradead.org Cc: mina86@mina86.org Cc: srivatsa.bhat@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/20130126075357.GA3205@udknight [ Tidied up the changelog and the comment in the code. ] Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/smp.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index fb67dfa8394e..38d9e033bbc3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -31,6 +31,7 @@ struct call_function_data {
struct call_single_data csd;
atomic_t refs;
cpumask_var_t cpumask;
+ cpumask_var_t cpumask_ipi;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@ -54,6 +55,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM);
+ if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
+ cpu_to_node(cpu)))
+ return notifier_from_errno(-ENOMEM);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -63,6 +67,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DEAD:
case CPU_DEAD_FROZEN:
free_cpumask_var(cfd->cpumask);
+ free_cpumask_var(cfd->cpumask_ipi);
break;
#endif
};
@@ -524,6 +529,12 @@ void smp_call_function_many(const struct cpumask *mask,
return;
}
+ /*
+ * After we put an entry into the list, data->cpumask
+ * may be cleared again when another CPU sends another IPI for
+ * a SMP function call, so data->cpumask will be zero.
+ */
+ cpumask_copy(data->cpumask_ipi, data->cpumask);
raw_spin_lock_irqsave(&call_function.lock, flags);
/*
* Place entry at the _HEAD_ of the list, so that any cpu still
@@ -547,7 +558,7 @@ void smp_call_function_many(const struct cpumask *mask,
smp_mb();
/* Send a message to all CPUs in the map */
- arch_send_call_function_ipi_mask(data->cpumask);
+ arch_send_call_function_ipi_mask(data->cpumask_ipi);
/* Optionally wait for the CPUs to complete */
if (wait)