summaryrefslogtreecommitdiff
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-22 14:58:03 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 10:32:00 -0800
commit433cdddcd9ac5558068edd7f8d4707a70f7710f5 (patch)
tree7f3686eb64c9ee3a239e5920df588c70837c5637 /kernel/rcutree_plugin.h
parent045fb9315a2129023d70a0eecf0942e18fca4fcd (diff)
rcu: Add tracing for RCU_FAST_NO_HZ
This commit adds trace_rcu_prep_idle(), which is invoked from rcu_prepare_for_idle() and rcu_wake_cpu() to trace attempts on the part of RCU to force CPUs into dyntick-idle mode. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h18
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index b70ca8cc52e1..6467f5669ab7 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2031,10 +2031,13 @@ static void rcu_prepare_for_idle(int cpu)
/* If no callbacks or in the holdoff period, enter dyntick-idle. */
if (!rcu_cpu_has_callbacks(cpu)) {
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+ trace_rcu_prep_idle("No callbacks");
return;
}
- if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
+ if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+ trace_rcu_prep_idle("In holdoff");
return;
+ }
/* Check and update the rcu_dyntick_drain sequencing. */
if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
@@ -2044,9 +2047,11 @@ static void rcu_prepare_for_idle(int cpu)
/* We have hit the limit, so time to give up. */
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
if (!rcu_pending(cpu)) {
+ trace_rcu_prep_idle("Dyntick with callbacks");
per_cpu(rcu_awake_at_gp_end, cpu) = 1;
return; /* Nothing to do immediately. */
}
+ trace_rcu_prep_idle("Begin holdoff");
invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
return;
}
@@ -2073,9 +2078,15 @@ static void rcu_prepare_for_idle(int cpu)
c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
}
- /* If RCU callbacks are still pending, RCU still needs this CPU. */
- if (c)
+ /*
+ * If RCU callbacks are still pending, RCU still needs this CPU.
+ * So try forcing the callbacks through the grace period.
+ */
+ if (c) {
+ trace_rcu_prep_idle("More callbacks");
invoke_rcu_core();
+ } else
+ trace_rcu_prep_idle("Callbacks drained");
}
/*
@@ -2085,6 +2096,7 @@ static void rcu_prepare_for_idle(int cpu)
*/
static void rcu_wake_cpu(void *unused)
{
+ trace_rcu_prep_idle("CPU awakened at GP end");
invoke_rcu_core();
}