summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/paca.h
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2013-08-13 15:54:52 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-08-14 15:33:18 +1000
commitbc2e6c6ac21183a6102a926f83186d9cac6713f8 (patch)
treeff42417bf9a51e94058fce8aa2aa2339415d5d43 /arch/powerpc/include/asm/paca.h
parent15863ff3b8dae4cacd831ce10aa34992e9ababb0 (diff)
powerpc: Avoid link stack corruption for MMU on exceptions
When we have MMU on exceptions (POWER8) and a relocatable kernel, we need to branch from the initial exception vectors at 0x0 to up high where the kernel might be located. Currently we do this using the link register. Unfortunately this corrupts the link stack and instead we should use the count register. We did this for the syscall entry path in: 6a40480 powerpc: Avoid link stack corruption in MMU on syscall entry path but I stupidly forgot to do the same for other exceptions. This patch changes the initial exception vectors to use the count register instead of the link register when we need to branch up to the relocated kernel. I have a dodgy userspace test which loops calling a function that reads the PVR (mfpvr in userspace will be emulated by the kernel via the program check exception). On POWER8 and with CONFIG_RELOCATABLE=y, I get a ~10% performance improvement with my userspace test with this patch. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/paca.h')
-rw-r--r--arch/powerpc/include/asm/paca.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 77c91e74b612..17d40a2a00c4 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -93,9 +93,9 @@ struct paca_struct {
* Now, starting in cacheline 2, the exception save areas
*/
/* used for most interrupts/exceptions */
- u64 exgen[12] __attribute__((aligned(0x80)));
- u64 exmc[12]; /* used for machine checks */
- u64 exslb[12]; /* used for SLB/segment table misses
+ u64 exgen[13] __attribute__((aligned(0x80)));
+ u64 exmc[13]; /* used for machine checks */
+ u64 exslb[13]; /* used for SLB/segment table misses
* on the linear mapping */
/* SLB related definitions */
u16 vmalloc_sllp;