summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-05-07 08:34:36 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-05-07 08:34:40 +0100
commit362607df9faa01fea6ae437b1a50645f33f393c0 (patch)
tree3575f6e29099a5c199ce003d5cbdcc0ecba49d2d /arch
parent52fe116376129b29572f55acc9c73ebd485052c9 (diff)
parentea9df3b168e641e87dbf889afae16390119e4179 (diff)
Merge first four commits of 'zImage_fixes' of git://git.linaro.org/people/nico/linux into fixes
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S35
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in1
3 files changed, 26 insertions, 12 deletions
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 8ebbb511c783..0c6852d93506 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT)
ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS)
else
ZTEXTADDR := 0
-ZBSSADDR := ALIGN(4)
+ZBSSADDR := ALIGN(8)
endif
SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index adf583cd0c35..49f5b2eaaa87 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -179,15 +179,14 @@ not_angel:
bl cache_on
restart: adr r0, LC0
- ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
- ldr sp, [r0, #32]
+ ldmia r0, {r1, r2, r3, r6, r9, r11, r12}
+ ldr sp, [r0, #28]
/*
* We might be running at a different address. We need
* to fix up various pointers.
*/
sub r0, r0, r1 @ calculate the delta offset
- add r5, r5, r0 @ _start
add r6, r6, r0 @ _edata
#ifndef CONFIG_ZBOOT_ROM
@@ -206,31 +205,40 @@ restart: adr r0, LC0
/*
* Check to see if we will overwrite ourselves.
* r4 = final kernel address
- * r5 = start of this image
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want:
- * r4 >= r10 -> OK
- * r4 + image length <= r5 -> OK
+ * r4 - 16k page directory >= r10 -> OK
+ * r4 + image length <= current position (pc) -> OK
*/
+ add r10, r10, #16384
cmp r4, r10
bhs wont_overwrite
add r10, r4, r9
- cmp r10, r5
+ ARM( cmp r10, pc )
+ THUMB( mov lr, pc )
+ THUMB( cmp r10, lr )
bls wont_overwrite
/*
* Relocate ourselves past the end of the decompressed kernel.
- * r5 = start of this image
* r6 = _edata
* r10 = end of the decompressed kernel
* Because we always copy ahead, we need to do it from the end and go
* backward in case the source and destination overlap.
*/
- /* Round up to next 256-byte boundary. */
- add r10, r10, #256
+ /*
+ * Bump to the next 256-byte boundary with the size of
+ * the relocation code added. This avoids overwriting
+ * ourself when the offset is small.
+ */
+ add r10, r10, #((reloc_code_end - restart + 256) & ~255)
bic r10, r10, #255
+ /* Get start of code we want to copy and align it down. */
+ adr r5, restart
+ bic r5, r5, #31
+
sub r9, r6, r5 @ size to copy
add r9, r9, #31 @ rounded up to a multiple
bic r9, r9, #31 @ ... of 32 bytes
@@ -245,6 +253,11 @@ restart: adr r0, LC0
/* Preserve offset to relocated code. */
sub r6, r9, r6
+#ifndef CONFIG_ZBOOT_ROM
+ /* cache_clean_flush may use the stack, so relocate it */
+ add sp, sp, r6
+#endif
+
bl cache_clean_flush
adr r0, BSYM(restart)
@@ -333,7 +346,6 @@ not_relocated: mov r0, #0
LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
- .word _start @ r5
.word _edata @ r6
.word _image_size @ r9
.word _got_start @ r11
@@ -1062,6 +1074,7 @@ memdump: mov r12, r0
#endif
.ltorg
+reloc_code_end:
.align
.section ".stack", "aw", %nobits
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index 5309909d7282..ea80abe78844 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -54,6 +54,7 @@ SECTIONS
.bss : { *(.bss) }
_end = .;
+ . = ALIGN(8); /* the stack must be 64-bit aligned */
.stack : { *(.stack) }
.stab 0 : { *(.stab) }