summaryrefslogtreecommitdiff
path: root/arch/sh
diff options
context:
space:
mode:
authorSimon Horman <horms@verge.net.au>2011-09-15 20:13:00 +0900
committerPaul Mundt <lethal@linux-sh.org>2011-10-28 15:03:43 +0900
commite66ac3f26aef131f5ca60350d25fba95f43acd0d (patch)
tree480337b09468bb5e8d1660c218d04a1a63b940e3 /arch/sh
parentd11584a0449f881181dc94dd697d3f3896c15c73 (diff)
sh: kexec: Add PHYSICAL_START
Add PHYSICAL_START kernel configuration parameter to set the address at which the kernel should be loaded. It has been observed on an sh7757lcr that simply modifying MEMORY_START does not achieve this goal for 32bit sh. This is due to MEMORY_OFFSET in arch/sh/kernel/vmlinux.lds.S bot being based on MEMORY_START on such systems. Signed-off-by: Simon Horman <horms@verge.net.au> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig13
-rw-r--r--arch/sh/boot/Makefile6
-rw-r--r--arch/sh/include/asm/page.h10
-rw-r--r--arch/sh/kernel/vmlinux.lds.S2
-rw-r--r--arch/sh/mm/init.c8
5 files changed, 31 insertions, 8 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 2d39594bcdd6..5629e2099130 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -645,7 +645,7 @@ config CRASH_DUMP
a specially reserved region and then later executed after
a crash by kdump/kexec. The crash dump kernel must be compiled
to a memory address not used by the main kernel using
- MEMORY_START.
+ PHYSICAL_START.
For more details see Documentation/kdump/kdump.txt
@@ -656,6 +656,17 @@ config KEXEC_JUMP
Jump between original kernel and kexeced kernel and invoke
code via KEXEC
+config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
+ default MEMORY_START
+ ---help---
+ This gives the physical address where the kernel is loaded
+ and is ordinarily the same as MEMORY_START.
+
+ Different values are primarily used in the case of kexec on panic
+ where the fail safe kernel needs to run at a different address
+ than the panic-ed kernel.
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index ba515d800245..e4ea31a62c55 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -19,6 +19,7 @@ CONFIG_MEMORY_START ?= 0x0c000000
CONFIG_BOOT_LINK_OFFSET ?= 0x00800000
CONFIG_ZERO_PAGE_OFFSET ?= 0x00001000
CONFIG_ENTRY_OFFSET ?= 0x00001000
+CONFIG_PHYSICAL_START ?= $(CONFIG_MEMORY_START)
suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
@@ -48,7 +49,7 @@ $(obj)/romimage/vmlinux: $(obj)/zImage FORCE
$(Q)$(MAKE) $(build)=$(obj)/romimage $@
KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
- $$[$(CONFIG_MEMORY_START) & 0x1fffffff]')
+ $$[$(CONFIG_PHYSICAL_START) & 0x1fffffff]')
KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_PAGE_OFFSET) + \
@@ -114,4 +115,5 @@ $(obj)/uImage: $(obj)/uImage.$(suffix-y)
@echo ' Image $@ is ready'
export CONFIG_PAGE_OFFSET CONFIG_MEMORY_START CONFIG_BOOT_LINK_OFFSET \
- CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET KERNEL_MEMORY suffix-y
+ CONFIG_PHYSICAL_START CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET \
+ KERNEL_MEMORY suffix-y
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 822d6084195b..0dca9a5c6be6 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -113,6 +113,16 @@ typedef struct page *pgtable_t;
#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
/*
+ * PHYSICAL_OFFSET is the offset in physical memory where the base
+ * of the kernel is loaded.
+ */
+#ifdef CONFIG_PHYSICAL_START
+#define PHYSICAL_OFFSET (CONFIG_PHYSICAL_START - __MEMORY_START)
+#else
+#define PHYSICAL_OFFSET 0
+#endif
+
+/*
* PAGE_OFFSET is the virtual address of the start of kernel address
* space.
*/
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 731c10ce67b5..c98905f71e28 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -23,7 +23,7 @@ OUTPUT_ARCH(sh)
ENTRY(_start)
SECTIONS
{
- . = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
+ . = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
_text = .; /* Text and read-only data */
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 58a93fb3d965..c9dbace35b16 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -287,6 +287,8 @@ static void __init do_init_bootmem(void)
static void __init early_reserve_mem(void)
{
unsigned long start_pfn;
+ u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
+ u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
/*
* Partially used pages are not usable - thus
@@ -300,15 +302,13 @@ static void __init early_reserve_mem(void)
* this catches the (definitely buggy) case of us accidentally
* initializing the bootmem allocator with an invalid RAM area.
*/
- memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
- (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
- (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
+ memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
/*
* Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
*/
if (CONFIG_ZERO_PAGE_OFFSET != 0)
- memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+ memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
/*
* Handle additional early reservations