summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-02-28 11:43:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-02-28 11:43:42 -0800
commit78d9e93440dd6a31d6175fbecb2f2b446d821f7c (patch)
tree7fd0f6a9a55e0de1c24b003dc90c2b45ddc60807
parentf94def76020cae105ea73711cd1a0fbcb06025ad (diff)
parentb57fc9e80692043e2a3a74e1d2c047eb700dcd0c (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull ARM64 fixes from Catalin Marinas: - !CONFIG_SMP build fix - pte bit testing macros conversion fix (int truncates top bits of long) - stack unwinding PC calculation fix * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: Fix !CONFIG_SMP kernel build arm64: mm: Add double logical invert to pte accessors ARM64: unwind: Fix PC calculation
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/kernel/stacktrace.c6
3 files changed, 18 insertions, 6 deletions
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 13fb0b3efc5f..453a179469a3 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#ifdef CONFIG_SMP
+
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
}
#define __my_cpu_offset __my_cpu_offset()
+#else /* !CONFIG_SMP */
+
+#define set_my_cpu_offset(x) do { } while (0)
+
+#endif /* CONFIG_SMP */
+
#include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b524dcd17243..aa3917c8b623 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -136,11 +136,11 @@ extern struct page *empty_zero_page;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & PTE_AF)
-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
-#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
+#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
+#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
+#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
+#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
+#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_valid_user(pte) \
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c3b6c63ea5fb..38f0558f0c0a 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
- frame->pc = *(unsigned long *)(fp + 8);
+ /*
+ * -4 here because we care about the PC at time of bl,
+ * not where the return will go.
+ */
+ frame->pc = *(unsigned long *)(fp + 8) - 4;
return 0;
}