summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAnson Huang <b20788@freescale.com>2011-09-27 18:20:19 +0800
committerJason Liu <r64343@freescale.com>2012-01-09 20:23:51 +0800
commitc0a5032d33a78e328ce83cce40f7a598091e9527 (patch)
tree3ddb4d71ea2070fa7c4eba9a2fff1354245c4cbe /kernel
parent4a52a0caab35032e039395838f3ee2a0d472c0f1 (diff)
ENGR00156637 [MX6]Reboot take long time on SMP
Add work around to the reboot issue of SMP, with SMP, all the CPUs need to do _rcu_barrier, if we enqueue an rcu callback, we need to make sure CPU tick to stay alive until we take care of those by completing the appropriate grace period. This work around only work when the reboot command issue, so it didn't impact normal kernel feature. Signed-off-by: Anson Huang <b20788@freescale.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index ba06207b1dd3..3585b42eb7cb 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -50,6 +50,7 @@
#include <linux/wait.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
+#include <linux/tick.h>
#include "rcutree.h"
@@ -1480,6 +1481,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
rcu_needs_cpu_flush();
}
+static atomic_t rcu_barrier_cpu_count;
/*
* Wake up the current CPU's kthread. This replaces raise_softirq()
@@ -1536,6 +1538,14 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
return;
}
+ /* Work around for reboot issue, check rcu_barrier_cpu_count
+ to see whether it is in the _rcu_barrier process, do
+ tick_nohz_restart_sched_tick if yes. If we enqueue an rcu
+ callback, we need the CPU tick to stay alive until we take care
+ of those by completing the appropriate grace period. */
+ if (atomic_read(&rcu_barrier_cpu_count) != 0)
+ tick_nohz_restart_sched_tick();
+
/*
* Force the grace period if too many callbacks or too long waiting.
* Enforce hysteresis, and don't invoke force_quiescent_state()
@@ -1750,7 +1760,6 @@ static int rcu_needs_cpu_quick_check(int cpu)
}
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
-static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion;