summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/jump_label.h
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-11-12 12:59:03 -0800
committerIngo Molnar <mingo@kernel.org>2015-11-24 09:56:44 +0100
commit2671c3e4fe2a34bd9bf2eecdf5d1149d4b55dbdf (patch)
tree5b5b1a0394bf34c6747aa5c2e266a33a16862c1f /arch/x86/include/asm/jump_label.h
parentc28454332fe0b65e22c3a2717e5bf05b5b47ca20 (diff)
x86/asm: Add asm macros for static keys/jump labels
Unfortunately, we can only do this if HAVE_JUMP_LABEL. In principle, we could do some serious surgery on the core jump label infrastructure to keep the patch infrastructure available on x86 on all builds, but that's probably not worth it. Implementing the macros using a conditional branch as a fallback seems like a bad idea: we'd have to clobber flags. This limitation can't cause silent failures -- trying to include asm/jump_label.h at all on a non-HAVE_JUMP_LABEL kernel will error out. The macro's users are responsible for handling this issue themselves. Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/63aa45c4b692e8469e1876d6ccbb5da707972990.1447361906.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/jump_label.h')
-rw-r--r--arch/x86/include/asm/jump_label.h52
1 files changed, 44 insertions, 8 deletions
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 96872dc96597..adc54c12cbd1 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -14,13 +14,6 @@
#error asm/jump_label.h included on a non-jump-label kernel
#endif
-#ifndef __ASSEMBLY__
-
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <asm/nops.h>
-#include <asm/asm.h>
-
#define JUMP_LABEL_NOP_SIZE 5
#ifdef CONFIG_X86_64
@@ -29,6 +22,14 @@
# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
#endif
+#include <asm/asm.h>
+#include <asm/nops.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:"
@@ -72,5 +73,40 @@ struct jump_entry {
jump_label_t key;
};
-#endif /* __ASSEMBLY__ */
+#else /* __ASSEMBLY__ */
+
+.macro STATIC_JUMP_IF_TRUE target, key, def
+.Lstatic_jump_\@:
+ .if \def
+ /* Equivalent to "jmp.d32 \target" */
+ .byte 0xe9
+ .long \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+ .else
+ .byte STATIC_KEY_INIT_NOP
+ .endif
+ .pushsection __jump_table, "aw"
+ _ASM_ALIGN
+ _ASM_PTR .Lstatic_jump_\@, \target, \key
+ .popsection
+.endm
+
+.macro STATIC_JUMP_IF_FALSE target, key, def
+.Lstatic_jump_\@:
+ .if \def
+ .byte STATIC_KEY_INIT_NOP
+ .else
+ /* Equivalent to "jmp.d32 \target" */
+ .byte 0xe9
+ .long \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+ .endif
+ .pushsection __jump_table, "aw"
+ _ASM_ALIGN
+ _ASM_PTR .Lstatic_jump_\@, \target, \key + 1
+ .popsection
+.endm
+
+#endif /* __ASSEMBLY__ */
+
#endif