summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-08-16 23:59:20 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-08-25 10:51:40 +0200
commit5124abda3060e2eab506fb14a27acadee3c3e396 (patch)
treeacce94a18f6b90ecb28babe8166a2118bc1b420c /kernel
parent43729e6fea58fe2c4fcc1e83547018ede540839d (diff)
bpf: get rid of pure_initcall dependency to enable jits
commit fa9dd599b4dae841924b022768354cfde9affecb upstream. Having a pure_initcall() callback just to permanently enable BPF JITs under CONFIG_BPF_JIT_ALWAYS_ON is unnecessary and could leave a small race window in future where JIT is still disabled on boot. Since we know about the setting at compilation time anyway, just initialize it properly there. Also consolidate all the individual bpf_jit_enable variables into a single one and move them under one location. Moreover, don't allow for setting unspecified garbage values on them. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> [bwh: Backported to 4.9 as dependency of commit 2e4a30983b0f "bpf: restrict access to core bpf sysctls": - Drop change in arch/mips/net/ebpf_jit.c - Drop change to bpf_jit_kallsyms - Adjust filenames, context] Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 879ca844ba1d..da03ab4ec578 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -208,6 +208,10 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
}
#ifdef CONFIG_BPF_JIT
+/* All BPF JIT sysctl knobs here. */
+int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
+int bpf_jit_harden __read_mostly;
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
@@ -244,8 +248,6 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
module_memfree(hdr);
}
-int bpf_jit_harden __read_mostly;
-
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)
@@ -925,8 +927,13 @@ load_byte:
STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
#else
-static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
+static unsigned int __bpf_prog_ret0_warn(void *ctx,
+ const struct bpf_insn *insn)
{
+ /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+ * is not working properly, so warn about it!
+ */
+ WARN_ON_ONCE(1);
return 0;
}
#endif
@@ -981,7 +988,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
fp->bpf_func = (void *) __bpf_prog_run;
#else
- fp->bpf_func = (void *) __bpf_prog_ret0;
+ fp->bpf_func = (void *) __bpf_prog_ret0_warn;
#endif
/* eBPF JITs can rewrite the program in case constant