summaryrefslogtreecommitdiff
path: root/arch/sh/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/traps.c')
-rw-r--r--arch/sh/kernel/traps.c396
1 files changed, 273 insertions, 123 deletions
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index c2c597e09482..ec110157992d 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -1,43 +1,32 @@
-/* $Id: traps.c,v 1.17 2004/05/02 01:46:30 sugioka Exp $
- *
- * linux/arch/sh/traps.c
+/*
+ * 'traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 David Howells
- * Copyright (C) 2002, 2003 Paul Mundt
- */
-
-/*
- * 'Traps.c' handles hardware traps and faults after we have saved some
- * state in 'entry.S'.
+ * Copyright (C) 2002 - 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
-#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
#include <linux/ptrace.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
-#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
-
+#include <linux/io.h>
+#include <linux/debug_locks.h>
+#include <linux/limits.h>
#include <asm/system.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/processor.h>
-#include <asm/sections.h>
#ifdef CONFIG_SH_KGDB
#include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs) \
-{ \
+#define CHK_REMOTE_DEBUG(regs) \
+{ \
if (kgdb_debug_hook && !user_mode(regs))\
(*kgdb_debug_hook)(regs); \
}
@@ -46,20 +35,44 @@
#endif
#ifdef CONFIG_CPU_SH2
-#define TRAP_RESERVED_INST 4
-#define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_RESERVED_INST 4
+# define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_ADDRESS_ERROR 9
+# ifdef CONFIG_CPU_SH2A
+# define TRAP_DIVZERO_ERROR 17
+# define TRAP_DIVOVF_ERROR 18
+# endif
#else
#define TRAP_RESERVED_INST 12
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
-/*
- * These constants are for searching for possible module text
- * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
- * a guess of how much space is likely to be vmalloced.
- */
-#define VMALLOC_OFFSET (8*1024*1024)
-#define MODULE_RANGE (8*1024*1024)
+static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+{
+ unsigned long p;
+ int i;
+
+ printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+
+ for (p = bottom & ~31; p < top; ) {
+ printk("%04lx: ", p & 0xffff);
+
+ for (i = 0; i < 8; i++, p += 4) {
+ unsigned int val;
+
+ if (p < bottom || p >= top)
+ printk(" ");
+ else {
+ if (__get_user(val, (unsigned int __user *)p)) {
+ printk("\n");
+ return;
+ }
+ printk("%08x ", val);
+ }
+ }
+ printk("\n");
+ }
+}
DEFINE_SPINLOCK(die_lock);
@@ -69,21 +82,33 @@ void die(const char * str, struct pt_regs * regs, long err)
console_verbose();
spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+
CHK_REMOTE_DEBUG(regs);
+ print_modules();
show_regs(regs);
+
+ printk("Process: %s (pid: %d, stack limit = %p)\n",
+ current->comm, current->pid, task_stack_page(current) + 1);
+
+ if (!user_mode(regs) || in_interrupt())
+ dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
+ (unsigned long)task_stack_page(current));
+
+ bust_spinlocks(0);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
}
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+static inline void die_if_kernel(const char *str, struct pt_regs *regs,
+ long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
-static int handle_unaligned_notify_count = 10;
-
/*
* try and fix up kernelspace address errors
* - userspace errors just cause EFAULT to be returned, resulting in SEGV
@@ -93,8 +118,7 @@ static int handle_unaligned_notify_count = 10;
*/
static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
- if (!user_mode(regs))
- {
+ if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
@@ -106,6 +130,40 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
return -EFAULT;
}
+#ifdef CONFIG_BUG
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+static inline void do_bug_verbose(struct pt_regs *regs)
+{
+ struct bug_frame f;
+ long len;
+
+ if (__copy_from_user(&f, (const void __user *)regs->pc,
+ sizeof(struct bug_frame)))
+ return;
+
+ len = __strnlen_user(f.file, PATH_MAX) - 1;
+ if (unlikely(len < 0 || len >= PATH_MAX))
+ f.file = "<bad filename>";
+ len = __strnlen_user(f.func, PATH_MAX) - 1;
+ if (unlikely(len < 0 || len >= PATH_MAX))
+ f.func = "<bad function>";
+
+ printk(KERN_ALERT "kernel BUG in %s() at %s:%d!\n",
+ f.func, f.file, f.line);
+}
+#else
+static inline void do_bug_verbose(struct pt_regs *regs)
+{
+}
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+#endif /* CONFIG_BUG */
+
+void handle_BUG(struct pt_regs *regs)
+{
+ do_bug_verbose(regs);
+ die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
+}
+
/*
* handle an instruction that does an unaligned memory access by emulating the
* desired behaviour
@@ -179,7 +237,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
if (copy_to_user(dst,src,4))
goto fetch_fault;
ret = 0;
- break;
+ break;
case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
if (instruction & 4)
@@ -203,7 +261,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
if (copy_from_user(dst,src,4))
goto fetch_fault;
ret = 0;
- break;
+ break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
src = (unsigned char*) *rm;
@@ -211,7 +269,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
*rm += count;
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
-
+
#ifdef __LITTLE_ENDIAN__
if (copy_from_user(dst, src, count))
goto fetch_fault;
@@ -222,7 +280,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
}
#else
dst += 4-count;
-
+
if (copy_from_user(dst, src, count))
goto fetch_fault;
@@ -301,7 +359,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
return -EFAULT;
/* kernel */
- die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
+ die("delay-slot-insn faulting in handle_unaligned_delayslot",
+ regs, 0);
}
return handle_unaligned_ins(instruction,regs);
@@ -323,6 +382,13 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
+/*
+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+ * opcodes..
+ */
+#ifndef CONFIG_CPU_SH2A
+static int handle_unaligned_notify_count = 10;
+
static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
{
u_int rm;
@@ -335,7 +401,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
if (user_mode(regs) && handle_unaligned_notify_count>0) {
handle_unaligned_notify_count--;
- printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ printk(KERN_NOTICE "Fixing up unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
current->comm,current->pid,(u16*)regs->pc,instruction);
}
@@ -459,32 +526,58 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
regs->pc += 2;
return ret;
}
+#endif /* CONFIG_CPU_SH2A */
+
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector(x) \
+ __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+#else
+#define lookup_exception_vector(x) \
+ __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+#endif
/*
- * Handle various address error exceptions
+ * Handle various address error exceptions:
+ * - instruction address error:
+ * misaligned PC
+ * PC >= 0x80000000 in user mode
+ * - data address error (read and write)
+ * misaligned data access
+ * access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read acceses.
*/
-asmlinkage void do_address_error(struct pt_regs *regs,
+asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
- unsigned long error_code;
+ unsigned long error_code = 0;
mm_segment_t oldfs;
+ siginfo_t info;
+#ifndef CONFIG_CPU_SH2A
u16 instruction;
int tmp;
+#endif
- asm volatile("stc r2_bank,%0": "=r" (error_code));
+ /* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+ lookup_exception_vector(error_code);
+#endif
oldfs = get_fs();
if (user_mode(regs)) {
+ int si_code = BUS_ADRERR;
+
local_irq_enable();
- current->thread.error_code = error_code;
- current->thread.trap_no = (writeaccess) ? 8 : 7;
/* bad PC is not something we can fix */
- if (regs->pc & 1)
+ if (regs->pc & 1) {
+ si_code = BUS_ADRALN;
goto uspace_segv;
+ }
+#ifndef CONFIG_CPU_SH2A
set_fs(USER_DS);
if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
/* Argh. Fault on the instruction itself.
@@ -499,14 +592,23 @@ asmlinkage void do_address_error(struct pt_regs *regs,
if (tmp==0)
return; /* sorted */
+#endif
- uspace_segv:
- printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
- force_sig(SIGSEGV, current);
+uspace_segv:
+ printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+ "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+ regs->pr);
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void *) address;
+ force_sig_info(SIGBUS, &info, current);
} else {
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
+#ifndef CONFIG_CPU_SH2A
set_fs(KERNEL_DS);
if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
/* Argh. Fault on the instruction itself.
@@ -518,6 +620,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
handle_unaligned_access(instruction, regs);
set_fs(oldfs);
+#else
+ printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+ "access\n", current->comm);
+
+ force_sig(SIGSEGV, current);
+#endif
}
}
@@ -529,7 +637,7 @@ int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst;
- /*
+ /*
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
@@ -550,24 +658,49 @@ int is_dsp_inst(struct pt_regs *regs)
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
-extern int do_fpu_inst(unsigned short, struct pt_regs*);
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ siginfo_t info;
+
+ switch (r4) {
+ case TRAP_DIVZERO_ERROR:
+ info.si_code = FPE_INTDIV;
+ break;
+ case TRAP_DIVOVF_ERROR:
+ info.si_code = FPE_INTOVF;
+ break;
+ }
+
+ force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
+/* arch/sh/kernel/cpu/sh4/fpu.c */
+extern int do_fpu_inst(unsigned short, struct pt_regs *);
+extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7, struct pt_regs __regs);
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst;
+ unsigned short inst = 0;
int err;
- get_user(inst, (unsigned short*)regs.pc);
+ get_user(inst, (unsigned short*)regs->pc);
- err = do_fpu_inst(inst, &regs);
+ err = do_fpu_inst(inst, regs);
if (!err) {
- regs.pc += 2;
+ regs->pc += 2;
return;
}
/* not a FPU inst. */
@@ -575,20 +708,19 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
#ifdef CONFIG_SH_DSP
/* Check if it's a DSP instruction */
- if (is_dsp_inst(&regs)) {
+ if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
- regs.sr |= SR_DSP;
+ regs->sr |= SR_DSP;
return;
}
#endif
- asm volatile("stc r2_bank, %0": "=r" (error_code));
+ lookup_exception_vector(error_code);
+
local_irq_enable();
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = TRAP_RESERVED_INST;
- CHK_REMOTE_DEBUG(&regs);
+ CHK_REMOTE_DEBUG(regs);
force_sig(SIGILL, tsk);
- die_if_no_fixup("reserved instruction", &regs, error_code);
+ die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
@@ -636,39 +768,41 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs)
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst;
+ unsigned short inst = 0;
- get_user(inst, (unsigned short *)regs.pc + 1);
- if (!do_fpu_inst(inst, &regs)) {
- get_user(inst, (unsigned short *)regs.pc);
- if (!emulate_branch(inst, &regs))
+ get_user(inst, (unsigned short *)regs->pc + 1);
+ if (!do_fpu_inst(inst, regs)) {
+ get_user(inst, (unsigned short *)regs->pc);
+ if (!emulate_branch(inst, regs))
return;
/* fault in branch.*/
}
/* not a FPU inst. */
#endif
- asm volatile("stc r2_bank, %0": "=r" (error_code));
+ lookup_exception_vector(error_code);
+
local_irq_enable();
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = TRAP_RESERVED_INST;
- CHK_REMOTE_DEBUG(&regs);
+ CHK_REMOTE_DEBUG(regs);
force_sig(SIGILL, tsk);
- die_if_no_fixup("illegal slot instruction", &regs, error_code);
+ die_if_no_fixup("illegal slot instruction", regs, error_code);
}
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
long ex;
- asm volatile("stc r2_bank, %0" : "=r" (ex));
- die_if_kernel("exception", &regs, ex);
+
+ lookup_exception_vector(ex);
+ die_if_kernel("exception", regs, ex);
}
#if defined(CONFIG_SH_STANDARD_BIOS)
@@ -709,14 +843,24 @@ void __init per_cpu_trap_init(void)
: "memory");
}
-void __init trap_init(void)
+void *set_exception_table_vec(unsigned int vec, void *handler)
{
extern void *exception_handling_table[];
+ void *old_handler;
+
+ old_handler = exception_handling_table[vec];
+ exception_handling_table[vec] = handler;
+ return old_handler;
+}
- exception_handling_table[TRAP_RESERVED_INST]
- = (void *)do_reserved_inst;
- exception_handling_table[TRAP_ILLEGAL_SLOT_INST]
- = (void *)do_illegal_slot_inst;
+extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+
+void __init trap_init(void)
+{
+ set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
+ set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
defined(CONFIG_SH_FPU_EMU)
@@ -725,61 +869,67 @@ void __init trap_init(void)
* reserved. They'll be handled in the math-emu case, or faulted on
* otherwise.
*/
- /* entry 64 corresponds to EXPEVT=0x800 */
- exception_handling_table[64] = (void *)do_reserved_inst;
- exception_handling_table[65] = (void *)do_illegal_slot_inst;
+ set_exception_table_evt(0x800, do_reserved_inst);
+ set_exception_table_evt(0x820, do_illegal_slot_inst);
+#elif defined(CONFIG_SH_FPU)
+ set_exception_table_evt(0x800, do_fpu_state_restore);
+ set_exception_table_evt(0x820, do_fpu_state_restore);
#endif
-
+
+#ifdef CONFIG_CPU_SH2
+ set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+ set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+ set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#endif
+
/* Setup VBR for boot cpu */
per_cpu_trap_init();
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_trace(struct task_struct *tsk, unsigned long *sp,
+ struct pt_regs *regs)
{
- unsigned long *stack, addr;
- unsigned long module_start = VMALLOC_START;
- unsigned long module_end = VMALLOC_END;
- int i = 1;
+ unsigned long addr;
- if (!tsk)
- tsk = current;
- if (tsk == current)
- sp = (unsigned long *)current_stack_pointer;
- else
- sp = (unsigned long *)tsk->thread.sp;
-
- stack = sp;
+ if (regs && user_mode(regs))
+ return;
printk("\nCall trace: ");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
- while (!kstack_end(stack)) {
- addr = *stack++;
- if (((addr >= (unsigned long)_text) &&
- (addr <= (unsigned long)_etext)) ||
- ((addr >= module_start) && (addr <= module_end))) {
- /*
- * For 80-columns display, 6 entry is maximum.
- * NOTE: '[<8c00abcd>] ' consumes 13 columns .
- */
-#ifndef CONFIG_KALLSYMS
- if (i && ((i % 6) == 0))
- printk("\n ");
-#endif
- printk("[<%08lx>] ", addr);
- print_symbol("%s\n", addr);
- i++;
- }
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (kernel_text_address(addr))
+ print_ip_sym(addr);
}
printk("\n");
+
+ if (!tsk)
+ tsk = current;
+
+ debug_show_held_locks(tsk);
}
-void show_task(unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp)
{
- show_stack(NULL, sp);
+ unsigned long stack;
+
+ if (!tsk)
+ tsk = current;
+ if (tsk == current)
+ sp = (unsigned long *)current_stack_pointer;
+ else
+ sp = (unsigned long *)tsk->thread.sp;
+
+ stack = (unsigned long)sp;
+ dump_mem("Stack: ", stack, THREAD_SIZE +
+ (unsigned long)task_stack_page(tsk));
+ show_trace(tsk, sp, NULL);
}
void dump_stack(void)