summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/lockdep_internals.h4
2 files changed, 5 insertions, 1 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 1b58a1bbcc87..9cf79858fd82 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2303,7 +2303,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
* so this is racy by nature but loosing one hit
* in a stat is not a big deal.
*/
- this_cpu_inc(lockdep_stats.redundant_hardirqs_on);
+ __debug_atomic_inc(redundant_hardirqs_on);
return;
}
/* we'll do an OFF -> ON transition: */
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 2b174762fa0e..7de27a80f802 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -139,6 +139,9 @@ struct lockdep_stats {
DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
+#define __debug_atomic_inc(ptr) \
+ this_cpu_inc(lockdep_stats.ptr);
+
#define debug_atomic_inc(ptr) { \
WARN_ON_ONCE(!irqs_disabled()); \
this_cpu_inc(lockdep_stats.ptr); \
@@ -160,6 +163,7 @@ DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
__total; \
})
#else
+# define __debug_atomic_inc(ptr) do { } while (0)
# define debug_atomic_inc(ptr) do { } while (0)
# define debug_atomic_dec(ptr) do { } while (0)
# define debug_atomic_read(ptr) 0