summaryrefslogtreecommitdiff
path: root/samples
diff options
context:
space:
mode:
authorMathias Svensson <idolf@google.com>2017-01-06 13:32:39 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-03-12 06:41:46 +0100
commitd6407e10bcf540a67afbeca83e60a4323cba8b33 (patch)
treeb580a7e2b92b7c2797edd8c8fa0cdfb427624b11 /samples
parentd6dcec965bc53eb375e50642b3b1abb9b835c2a7 (diff)
samples/seccomp: fix 64-bit comparison macros
commit 916cafdc95843fb9af5fd5f83ca499d75473d107 upstream. There were some bugs in the JNE64 and JLT64 comparision macros. This fixes them, improves comments, and cleans up the file while we are at it. Reported-by: Stephen Röttger <sroettger@google.com> Signed-off-by: Mathias Svensson <idolf@google.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: James Morris <james.l.morris@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'samples')
-rw-r--r--samples/seccomp/bpf-helper.h125
1 files changed, 72 insertions, 53 deletions
diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h
index 38ee70f3cd5b..1d8de9edd858 100644
--- a/samples/seccomp/bpf-helper.h
+++ b/samples/seccomp/bpf-helper.h
@@ -138,7 +138,7 @@ union arg64 {
#define ARG_32(idx) \
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
-/* Loads hi into A and lo in X */
+/* Loads lo into M[0] and hi into M[1] and A */
#define ARG_64(idx) \
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
@@ -153,88 +153,107 @@ union arg64 {
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
jt
-/* Checks the lo, then swaps to check the hi. A=lo,X=hi */
+#define JA32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
+ jt
+
+#define JGE32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
+ jt
+
+#define JGT32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
+ jt
+
+#define JLE32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
+ jt
+
+#define JLT32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
+ jt
+
+/*
+ * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
+ * A and M[1]. This invariant is kept by restoring A if necessary.
+ */
#define JEQ64(lo, hi, jt) \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ /* if (lo != arg.lo) goto NOMATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
#define JNE64(lo, hi, jt) \
- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ /* if (hi != arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo != arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
-
-#define JA32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
- jt
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
#define JA64(lo, hi, jt) \
+ /* if (hi & arg.hi) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo & arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
-#define JGE32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
- jt
-
-#define JLT32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
- jt
-
-/* Shortcut checking if hi > arg.hi. */
#define JGE64(lo, hi, jt) \
+ /* if (hi > arg.hi) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo >= arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
- jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
-
-#define JLT64(lo, hi, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
-#define JGT32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
- jt
-
-#define JLE32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
- jt
-
-/* Check hi > args.hi first, then do the GE checking */
#define JGT64(lo, hi, jt) \
+ /* if (hi > arg.hi) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo > arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
#define JLE64(lo, hi, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \
- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ /* if (hi < arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo <= arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JLT64(lo, hi, jt) \
+ /* if (hi < arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo < arg.lo) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
#define LOAD_SYSCALL_NR \
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \