From 8b28499a71d3431c9128abc743e2d2bfbdae3ed4 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 24 Feb 2014 16:39:58 +0100 Subject: smp: Consolidate the various smp_call_function_single() declensions __smp_call_function_single() and smp_call_function_single() share some code that can be factorized: execute inline when the target is local, check if the target is online, lock the csd, call generic_exec_single(). Lets move the common parts to generic_exec_single(). Reviewed-by: Jan Kara Cc: Andrew Morton Cc: Christoph Hellwig Cc: Ingo Molnar Cc: Jan Kara Cc: Jens Axboe Signed-off-by: Frederic Weisbecker Signed-off-by: Jens Axboe --- kernel/smp.c | 80 +++++++++++++++++++++++++++++------------------------------- 1 file changed, 39 insertions(+), 41 deletions(-) (limited to 'kernel/smp.c') diff --git a/kernel/smp.c b/kernel/smp.c index 5ff14e3739ca..64bb0d48e96f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd) csd->flags &= ~CSD_FLAG_LOCK; } +static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); + /* * Insert a previously allocated call_single_data element * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */ -static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) +static int generic_exec_single(int cpu, struct call_single_data *csd, + smp_call_func_t func, void *info, int wait) { + struct call_single_data csd_stack = { .flags = 0 }; + unsigned long flags; + + + if (cpu == smp_processor_id()) { + local_irq_save(flags); + func(info); + local_irq_restore(flags); + return 0; + } + + + if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) + return -ENXIO; + + + if (!csd) { + csd = &csd_stack; + if (!wait) + csd = &__get_cpu_var(csd_data); + } + + csd_lock(csd); + + csd->func = func; + csd->info = info; + if (wait) csd->flags |= CSD_FLAG_WAIT; @@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) if (wait) csd_lock_wait(csd); + + return 0; } /* @@ -168,8 +200,6 @@ void generic_smp_call_function_single_interrupt(void) } } -static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); - /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. @@ -181,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { - struct call_single_data d = { - .flags = 0, - }; - unsigned long flags; int this_cpu; - int err = 0; + int err; /* * prevent preemption and reschedule on another processor, @@ -203,26 +229,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); - if (cpu == this_cpu) { - local_irq_save(flags); - func(info); - local_irq_restore(flags); - } else { - if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { - struct call_single_data *csd = &d; - - if (!wait) - csd = &__get_cpu_var(csd_data); - - csd_lock(csd); - - csd->func = func; - csd->info = info; - generic_exec_single(cpu, csd, wait); - } else { - err = -ENXIO; /* CPU not online */ - } - } + err = generic_exec_single(cpu, NULL, func, info, wait); put_cpu(); @@ -285,9 +292,8 @@ EXPORT_SYMBOL_GPL(smp_call_function_any); */ int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait) { - unsigned int this_cpu; - unsigned long flags; int err = 0; + int this_cpu; this_cpu = get_cpu(); /* @@ -296,20 +302,12 @@ int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait) * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ - WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() + WARN_ON_ONCE(cpu_online(this_cpu) && wait && irqs_disabled() && !oops_in_progress); - if (cpu == this_cpu) { - local_irq_save(flags); - csd->func(csd->info); - local_irq_restore(flags); - } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { - csd_lock(csd); - generic_exec_single(cpu, csd, wait); - } else { - err = -ENXIO; /* CPU not online */ - } + err = generic_exec_single(cpu, csd, csd->func, csd->info, wait); put_cpu(); + return err; } EXPORT_SYMBOL_GPL(__smp_call_function_single); -- cgit v1.2.3