summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/trace/lockdep.h9
-rw-r--r--include/trace/lockdep_event_types.h44
-rw-r--r--include/trace/trace_event_types.h1
-rw-r--r--include/trace/trace_events.h1
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/trace/trace.c14
-rw-r--r--kernel/trace/trace_events_stage_3.h4
7 files changed, 82 insertions, 8 deletions
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
new file mode 100644
index 000000000000..5ca67df87f2a
--- /dev/null
+++ b/include/trace/lockdep.h
@@ -0,0 +1,9 @@
+#ifndef _TRACE_LOCKDEP_H
+#define _TRACE_LOCKDEP_H
+
+#include <linux/lockdep.h>
+#include <linux/tracepoint.h>
+
+#include <trace/lockdep_event_types.h>
+
+#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
new file mode 100644
index 000000000000..f713d74a82b4
--- /dev/null
+++ b/include/trace/lockdep_event_types.h
@@ -0,0 +1,44 @@
+
+#ifndef TRACE_EVENT_FORMAT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lock
+
+#ifdef CONFIG_LOCKDEP
+
+TRACE_FORMAT(lock_acquire,
+ TPPROTO(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check,
+ struct lockdep_map *next_lock, unsigned long ip),
+ TPARGS(lock, subclass, trylock, read, check, next_lock, ip),
+ TPFMT("%s%s%s", trylock ? "try " : "",
+ read ? "read " : "", lock->name)
+ );
+
+TRACE_FORMAT(lock_release,
+ TPPROTO(struct lockdep_map *lock, int nested, unsigned long ip),
+ TPARGS(lock, nested, ip),
+ TPFMT("%s", lock->name)
+ );
+
+#ifdef CONFIG_LOCK_STAT
+
+TRACE_FORMAT(lock_contended,
+ TPPROTO(struct lockdep_map *lock, unsigned long ip),
+ TPARGS(lock, ip),
+ TPFMT("%s", lock->name)
+ );
+
+TRACE_FORMAT(lock_acquired,
+ TPPROTO(struct lockdep_map *lock, unsigned long ip),
+ TPARGS(lock, ip),
+ TPFMT("%s", lock->name)
+ );
+
+#endif
+#endif
+
+#undef TRACE_SYSTEM
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
index 33c8ed5ccb6c..df56f5694be6 100644
--- a/include/trace/trace_event_types.h
+++ b/include/trace/trace_event_types.h
@@ -2,3 +2,4 @@
#include <trace/sched_event_types.h>
#include <trace/irq_event_types.h>
+#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index ea2ef2051762..fd13750ca4ba 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -2,3 +2,4 @@
#include <trace/sched.h>
#include <trace/irq.h>
+#include <trace/lockdep.h>
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 02014f7ccc86..cb70c1db85d0 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
+#include <trace/lockdep.h>
#include <asm/sections.h>
@@ -2913,6 +2914,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
}
EXPORT_SYMBOL_GPL(lock_set_class);
+DEFINE_TRACE(lock_acquire);
+
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
@@ -2923,6 +2926,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
{
unsigned long flags;
+ trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
if (unlikely(current->lockdep_recursion))
return;
@@ -2937,11 +2942,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
EXPORT_SYMBOL_GPL(lock_acquire);
+DEFINE_TRACE(lock_release);
+
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip)
{
unsigned long flags;
+ trace_lock_release(lock, nested, ip);
+
if (unlikely(current->lockdep_recursion))
return;
@@ -3090,10 +3099,14 @@ found_it:
lock->ip = ip;
}
+DEFINE_TRACE(lock_contended);
+
void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
+ trace_lock_contended(lock, ip);
+
if (unlikely(!lock_stat))
return;
@@ -3109,10 +3122,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
}
EXPORT_SYMBOL_GPL(lock_contended);
+DEFINE_TRACE(lock_acquired);
+
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
+ trace_lock_acquired(lock, ip);
+
if (unlikely(!lock_stat))
return;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 12539f72f4a5..c8abbb0c8397 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -623,7 +623,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static DEFINE_SPINLOCK(trace_cmdline_lock);
+static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -735,7 +735,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!spin_trylock(&trace_cmdline_lock))
+ if (!__raw_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
@@ -753,7 +753,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
- spin_unlock(&trace_cmdline_lock);
+ __raw_spin_unlock(&trace_cmdline_lock);
}
char *trace_find_cmdline(int pid)
@@ -3751,7 +3751,7 @@ static __init int tracer_init_debugfs(void)
int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
{
- static DEFINE_SPINLOCK(trace_buf_lock);
+ static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ring_buffer_event *event;
@@ -3773,7 +3773,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
goto out;
pause_graph_tracing();
- spin_lock_irqsave(&trace_buf_lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&trace_buf_lock);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
len = min(len, TRACE_BUF_SIZE-1);
@@ -3792,7 +3793,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
ring_buffer_unlock_commit(tr->buffer, event);
out_unlock:
- spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
+ __raw_spin_unlock(&trace_buf_lock);
+ raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out:
preempt_enable_notrace();
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h
index 041789ffbac1..2c8d76c7dbed 100644
--- a/kernel/trace/trace_events_stage_3.h
+++ b/kernel/trace/trace_events_stage_3.h
@@ -5,7 +5,7 @@
*
* static void ftrace_event_<call>(proto)
* {
- * event_trace_printk(_RET_IP_, "(<call>) " <fmt>);
+ * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
* }
*
* static int ftrace_reg_event_<call>(void)
@@ -112,7 +112,7 @@
#define _TRACE_FORMAT(call, proto, args, fmt) \
static void ftrace_event_##call(proto) \
{ \
- event_trace_printk(_RET_IP_, "(" #call ") " fmt); \
+ event_trace_printk(_RET_IP_, #call ": " fmt); \
} \
\
static int ftrace_reg_event_##call(void) \