summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venki@google.com>2011-02-10 10:23:27 +0100
committerAK <andi@firstfloor.org>2011-03-31 11:58:00 -0700
commit445352de175186a234c6ce1b8e000619ce1d2d1c (patch)
treeaed5135b6c6406c20cf043bc195fe402d6c117f6 /include
parent70501ead1f5f8760d9ec1c3fd7f69cfe62c5f35e (diff)
sched: Add IRQ_TIME_ACCOUNTING, finer accounting of irq time
Commit: b52bfee445d315549d41eacf2fa7c156e7d153d5 upstream s390/powerpc/ia64 have support for CONFIG_VIRT_CPU_ACCOUNTING which does the fine granularity accounting of user, system, hardirq, softirq times. Adding that option on archs like x86 will be challenging however, given the state of TSC reliability on various platforms and also the overhead it will add in syscall entry exit. Instead, add a lighter variant that only does finer accounting of hardirq and softirq times, providing precise irq times (instead of timer tick based samples). This accounting is added with a new config option CONFIG_IRQ_TIME_ACCOUNTING so that there won't be any overhead for users not interested in paying the perf penalty. This accounting is based on sched_clock, with the code being generic. So, other archs may find it useful as well. This patch just adds the core logic and does not enable this logic yet. Signed-off-by: Venkatesh Pallipadi <venki@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andi Kleen <ak@linux.intel.com> LKML-Reference: <1286237003-12406-5-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Mike Galbraith <efault@gmx.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'include')
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/sched.h13
2 files changed, 14 insertions, 1 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 41367c5c3c68..ff43e9268449 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -137,7 +137,7 @@ extern void synchronize_irq(unsigned int irq);
struct task_struct;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
static inline void account_system_vtime(struct task_struct *tsk)
{
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0a8beefb41cd..f359bea0983e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1823,6 +1823,19 @@ extern void sched_clock_idle_wakeup_event(u64 delta_ns);
*/
extern unsigned long long cpu_clock(int cpu);
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
+ * The reason for this explicit opt-in is not to have perf penalty with
+ * slow sched_clocks.
+ */
+extern void enable_sched_clock_irqtime(void);
+extern void disable_sched_clock_irqtime(void);
+#else
+static inline void enable_sched_clock_irqtime(void) {}
+static inline void disable_sched_clock_irqtime(void) {}
+#endif
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);