summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/elf.h68
-rw-r--r--arch/arm/include/asm/page.h5
-rw-r--r--arch/arm/include/asm/pgtable.h47
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/elf.c79
-rw-r--r--arch/arm/kernel/module.c2
6 files changed, 145 insertions, 58 deletions
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 7ea302c14a59..5be016980c19 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -18,9 +18,32 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fp elf_fpregset_t;
#define EM_ARM 40
-#define EF_ARM_APCS26 0x08
-#define EF_ARM_SOFT_FLOAT 0x200
-#define EF_ARM_EABI_MASK 0xFF000000
+
+#define EF_ARM_EABI_MASK 0xff000000
+#define EF_ARM_EABI_UNKNOWN 0x00000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
+#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
+#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
+#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
+#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
+#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
+#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
+#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
+#define EF_ARM_PIC 0x00000020 /* ABI 0 */
+#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
+#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
+#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
+#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
+#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
+#define EF_ARM_HASENTRY 0x00000002 /* All */
+#define EF_ARM_RELEXEC 0x00000001 /* All */
#define R_ARM_NONE 0
#define R_ARM_PC24 1
@@ -57,23 +80,16 @@ typedef struct user_fp elf_fpregset_t;
extern char elf_platform[];
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x))
+struct elf32_hdr;
/*
- * 32-bit code is always OK. Some cpus can do 26-bit, some can't.
+ * This is used to ensure we don't load something for the wrong architecture.
*/
-#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
-
-#define ELF_THUMB_OK(x) \
- ((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \
- ((x)->e_entry & 3) == 0)
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
-#define ELF_26BIT_OK(x) \
- ((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \
- ((x)->e_flags & EF_ARM_APCS26) == 0)
+extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
+#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
@@ -90,23 +106,7 @@ extern char elf_platform[];
have no such handler. */
#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
-/*
- * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
- * and CP1, we only enable access to the iWMMXt coprocessor if the
- * binary is EABI or softfloat (and thus, guaranteed not to use
- * FPA instructions.)
- */
-#define SET_PERSONALITY(ex, ibcs2) \
- do { \
- if ((ex).e_flags & EF_ARM_APCS26) { \
- set_personality(PER_LINUX); \
- } else { \
- set_personality(PER_LINUX_32BIT); \
- if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \
- set_thread_flag(TIF_USING_IWMMXT); \
- else \
- clear_thread_flag(TIF_USING_IWMMXT); \
- } \
- } while (0)
+extern void elf_set_personality(const struct elf32_hdr *);
+#define SET_PERSONALITY(ex, ibcs2) elf_set_personality(&(ex))
#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index cf2e2680daaa..bed1c0a00368 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -184,8 +184,9 @@ typedef struct page *pgtable_t;
#endif /* !__ASSEMBLY__ */
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS \
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
/*
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index e5054b026c24..b02be6c55aef 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -197,22 +197,29 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
* shared mapping bits.
*/
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
-#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
-#define PAGE_NONE pgprot_user
-#define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
-#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \
- L_PTE_WRITE)
-#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
-#define PAGE_KERNEL pgprot_kernel
-
-#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
-#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
-#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
-#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
+#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
+
+#define PAGE_NONE pgprot_user
+#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
+#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
+#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
+#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
+#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
+#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
+#define PAGE_KERNEL pgprot_kernel
+#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
+
+#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
+#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
+#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
+#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
+#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
+#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
+#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
#endif /* __ASSEMBLY__ */
@@ -228,19 +235,19 @@ extern pgprot_t pgprot_kernel;
#define __P001 __PAGE_READONLY
#define __P010 __PAGE_COPY
#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY
-#define __P101 __PAGE_READONLY
-#define __P110 __PAGE_COPY
-#define __P111 __PAGE_COPY
+#define __P100 __PAGE_READONLY_EXEC
+#define __P101 __PAGE_READONLY_EXEC
+#define __P110 __PAGE_COPY_EXEC
+#define __P111 __PAGE_COPY_EXEC
#define __S000 __PAGE_NONE
#define __S001 __PAGE_READONLY
#define __S010 __PAGE_SHARED
#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY
-#define __S101 __PAGE_READONLY
-#define __S110 __PAGE_SHARED
-#define __S111 __PAGE_SHARED
+#define __S100 __PAGE_READONLY_EXEC
+#define __S101 __PAGE_READONLY_EXEC
+#define __S110 __PAGE_SHARED_EXEC
+#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 1d296fc8494e..4305345987d3 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -10,7 +10,7 @@ endif
# Object file lists.
-obj-y := compat.o entry-armv.o entry-common.o irq.o \
+obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \
process.o ptrace.o setup.o signal.o \
sys_arm.o stacktrace.o time.o traps.o
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
new file mode 100644
index 000000000000..513f332f040d
--- /dev/null
+++ b/arch/arm/kernel/elf.c
@@ -0,0 +1,79 @@
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+ unsigned int eflags;
+
+ /* Make sure it's an ARM executable */
+ if (x->e_machine != EM_ARM)
+ return 0;
+
+ /* Make sure the entry address is reasonable */
+ if (x->e_entry & 1) {
+ if (!(elf_hwcap & HWCAP_THUMB))
+ return 0;
+ } else if (x->e_entry & 3)
+ return 0;
+
+ eflags = x->e_flags;
+ if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
+ /* APCS26 is only allowed if the CPU supports it */
+ if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
+ return 0;
+
+ /* VFP requires the supporting code */
+ if ((eflags & EF_ARM_VFP_FLOAT) && !(elf_hwcap & HWCAP_VFP))
+ return 0;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
+
+void elf_set_personality(const struct elf32_hdr *x)
+{
+ unsigned int eflags = x->e_flags;
+ unsigned int personality = PER_LINUX_32BIT;
+
+ /*
+ * APCS-26 is only valid for OABI executables
+ */
+ if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
+ if (eflags & EF_ARM_APCS_26)
+ personality = PER_LINUX;
+ }
+
+ set_personality(personality);
+
+ /*
+ * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
+ * and CP1, we only enable access to the iWMMXt coprocessor if the
+ * binary is EABI or softfloat (and thus, guaranteed not to use
+ * FPA instructions.)
+ */
+ if (elf_hwcap & HWCAP_IWMMXT &&
+ eflags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) {
+ set_thread_flag(TIF_USING_IWMMXT);
+ } else {
+ clear_thread_flag(TIF_USING_IWMMXT);
+ }
+}
+EXPORT_SYMBOL(elf_set_personality);
+
+/*
+ * Set READ_IMPLIES_EXEC if:
+ * - the binary requires an executable stack
+ * - we're running on a CPU which doesn't support NX.
+ */
+int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack)
+{
+ if (executable_stack != EXSTACK_ENABLE_X)
+ return 1;
+ if (cpu_architecture() <= CPU_ARCH_ARMv6)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(arm_elf_read_implies_exec);
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index a68259a0cccd..9203ba7d58ee 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -47,7 +47,7 @@ void *module_alloc(unsigned long size)
if (!area)
return NULL;
- return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+ return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
}
#else /* CONFIG_MMU */
void *module_alloc(unsigned long size)