summaryrefslogtreecommitdiff
path: root/arch/m32r
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2010-08-04 13:59:13 +0200
committerMichal Marek <mmarek@suse.cz>2010-08-04 13:59:13 +0200
commit772320e84588dcbe1600ffb83e5f328f2209ac2a (patch)
treea7de21b79340aeaa17c58126f6b801b82c77b53a /arch/m32r
parent1ce53adf13a54375d2a5c7cdbe341b2558389615 (diff)
parent9fe6206f400646a2322096b56c59891d530e8d51 (diff)
Merge commit 'v2.6.35' into kbuild/kbuild
Conflicts: arch/powerpc/Makefile
Diffstat (limited to 'arch/m32r')
-rw-r--r--arch/m32r/include/asm/atomic.h2
-rw-r--r--arch/m32r/include/asm/local.h25
-rw-r--r--arch/m32r/include/asm/ptrace.h2
-rw-r--r--arch/m32r/include/asm/scatterlist.h15
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m32r/include/asm/tlbflush.h2
-rw-r--r--arch/m32r/include/asm/unistd.h1
-rw-r--r--arch/m32r/kernel/process.c2
-rw-r--r--arch/m32r/kernel/ptrace.c97
-rw-r--r--arch/m32r/kernel/sys_m32r.c92
-rw-r--r--arch/m32r/kernel/time.c47
-rw-r--r--arch/m32r/mm/fault-nommu.c2
-rw-r--r--arch/m32r/mm/fault.c20
-rw-r--r--arch/m32r/mm/init.c1
14 files changed, 53 insertions, 257 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 63f0cf0f50dd..d44a51e5271b 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -26,7 +26,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/**
* atomic_set - set atomic variable
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 22256d138630..734bca87018a 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr)
* a variable, not an address.
*/
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non local way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* __M32R_LOCAL_H */
diff --git a/arch/m32r/include/asm/ptrace.h b/arch/m32r/include/asm/ptrace.h
index a0755b982028..840a1231edeb 100644
--- a/arch/m32r/include/asm/ptrace.h
+++ b/arch/m32r/include/asm/ptrace.h
@@ -120,6 +120,8 @@ struct pt_regs {
#include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */
+#define arch_has_single_step() (1)
+
struct task_struct;
extern void init_debug_traps(struct task_struct *);
#define arch_ptrace_attach(child) \
diff --git a/arch/m32r/include/asm/scatterlist.h b/arch/m32r/include/asm/scatterlist.h
index 1ed372c73d0b..aeeddd8dac17 100644
--- a/arch/m32r/include/asm/scatterlist.h
+++ b/arch/m32r/include/asm/scatterlist.h
@@ -1,20 +1,7 @@
#ifndef _ASM_M32R_SCATTERLIST_H
#define _ASM_M32R_SCATTERLIST_H
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- char * address; /* Location data is to be transferred to, NULL for
- * highmem page */
- unsigned long page_link;
- unsigned int offset;/* for highmem, page offset */
-
- dma_addr_t dma_address;
- unsigned int length;
-};
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x1fffffff)
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index ed240b6e8e77..71faff5bcc27 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -142,7 +142,7 @@ static inline unsigned int get_thread_fault_code(void)
#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 18 /* OOM killer killed process */
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/arch/m32r/include/asm/tlbflush.h b/arch/m32r/include/asm/tlbflush.h
index 0ef95307784e..92614b0ccf17 100644
--- a/arch/m32r/include/asm/tlbflush.h
+++ b/arch/m32r/include/asm/tlbflush.h
@@ -92,6 +92,6 @@ static __inline__ void __flush_tlb_all(void)
);
}
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#endif /* _ASM_M32R_TLBFLUSH_H */
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index cf701c933249..76125777483c 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -339,6 +339,7 @@
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_UTIME
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 67a01e1e4283..bc8c8c1511b2 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -21,10 +21,10 @@
*/
#include <linux/fs.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
-#include <linux/slab.h>
#include <linux/hardirq.h>
#include <asm/io.h>
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 98682bba0ed9..e555091eb97c 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -580,6 +580,35 @@ init_debug_traps(struct task_struct *child)
}
}
+void user_enable_single_step(struct task_struct *child)
+{
+ unsigned long next_pc;
+ unsigned long pc, insn;
+
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
+ /* Compute next pc. */
+ pc = get_stack_long(child, PT_BPC);
+
+ if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
+ != sizeof(insn))
+ break;
+
+ compute_next_pc(insn, pc, &next_pc, child);
+ if (next_pc & 0x80000000)
+ break;
+
+ if (embed_debug_trap(child, next_pc))
+ break;
+
+ invalidate_cache();
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ unregister_all_debug_traps(child);
+ invalidate_cache();
+}
/*
* Called by kernel/ptrace.c when detaching..
@@ -630,74 +659,6 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = ptrace_write_user(child, addr, data);
break;
- /*
- * continue/restart and stop at next (return from) syscall
- */
- case PTRACE_SYSCALL:
- case PTRACE_CONT:
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
- break;
-
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL: {
- ret = 0;
- unregister_all_debug_traps(child);
- invalidate_cache();
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
- break;
- }
-
- /*
- * execute single instruction.
- */
- case PTRACE_SINGLESTEP: {
- unsigned long next_pc;
- unsigned long pc, insn;
-
- ret = -EIO;
- if (!valid_signal(data))
- break;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-
- /* Compute next pc. */
- pc = get_stack_long(child, PT_BPC);
-
- if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
- != sizeof(insn))
- break;
-
- compute_next_pc(insn, pc, &next_pc, child);
- if (next_pc & 0x80000000)
- break;
-
- if (embed_debug_trap(child, next_pc))
- break;
-
- invalidate_cache();
- child->exit_code = data;
-
- /* give it a chance to run. */
- wake_up_process(child);
- ret = 0;
- break;
- }
-
case PTRACE_GETREGS:
ret = ptrace_getregs(child, (void __user *)data);
break;
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index d3c865c5a6ba..0a00f467edfa 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -76,98 +76,6 @@ asmlinkage int sys_tas(int __user *addr)
return oldval;
}
-/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
- */
-asmlinkage int sys_ipc(uint call, int first, int second,
- int third, void __user *ptr, long fifth)
-{
- int version, ret;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- switch (call) {
- case SEMOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- second, NULL);
- case SEMTIMEDOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- second, (const struct timespec __user *)fifth);
- case SEMGET:
- return sys_semget (first, second, third);
- case SEMCTL: {
- union semun fourth;
- if (!ptr)
- return -EINVAL;
- if (get_user(fourth.__pad, (void __user * __user *) ptr))
- return -EFAULT;
- return sys_semctl (first, second, third, fourth);
- }
-
- case MSGSND:
- return sys_msgsnd (first, (struct msgbuf __user *) ptr,
- second, third);
- case MSGRCV:
- switch (version) {
- case 0: {
- struct ipc_kludge tmp;
- if (!ptr)
- return -EINVAL;
-
- if (copy_from_user(&tmp,
- (struct ipc_kludge __user *) ptr,
- sizeof (tmp)))
- return -EFAULT;
- return sys_msgrcv (first, tmp.msgp, second,
- tmp.msgtyp, third);
- }
- default:
- return sys_msgrcv (first,
- (struct msgbuf __user *) ptr,
- second, fifth, third);
- }
- case MSGGET:
- return sys_msgget ((key_t) first, second);
- case MSGCTL:
- return sys_msgctl (first, second,
- (struct msqid_ds __user *) ptr);
- case SHMAT: {
- ulong raddr;
-
- if (!access_ok(VERIFY_WRITE, (ulong __user *) third,
- sizeof(ulong)))
- return -EFAULT;
- ret = do_shmat (first, (char __user *) ptr, second, &raddr);
- if (ret)
- return ret;
- return put_user (raddr, (ulong __user *) third);
- }
- case SHMDT:
- return sys_shmdt ((char __user *)ptr);
- case SHMGET:
- return sys_shmget (first, second, third);
- case SHMCTL:
- return sys_shmctl (first, second,
- (struct shmid_ds __user *) ptr);
- default:
- return -ENOSYS;
- }
-}
-
-asmlinkage int sys_uname(struct old_utsname __user * name)
-{
- int err;
- if (!name)
- return -EFAULT;
- down_read(&uts_sem);
- err = copy_to_user(name, utsname(), sizeof (*name));
- up_read(&uts_sem);
- return err?-EFAULT:0;
-}
-
asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
{
/* This should flush more selectively ... */
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 9cedcef11575..bda86820bffd 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -106,24 +106,6 @@ u32 arch_gettimeoffset(void)
}
/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you won't notice until after reboot!
- */
-static inline int set_rtc_mmss(unsigned long nowtime)
-{
- return 0;
-}
-
-/* last time the cmos clock got updated */
-static long last_rtc_update = 0;
-
-/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
@@ -138,23 +120,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- write_seqlock(&xtime_lock);
- if (ntp_synced()
- && xtime.tv_sec > last_rtc_update + 660
- && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
- && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
- {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else /* do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- write_sequnlock(&xtime_lock);
/* As we return to user mode fire off the other CPU schedulers..
this is basically because we don't yet share IRQ's around.
This message is rigged to be safe on the 386 - basically it's
@@ -174,7 +139,7 @@ static struct irqaction irq0 = {
.name = "MFT2",
};
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
unsigned int epoch, year, mon, day, hour, min, sec;
@@ -194,11 +159,13 @@ void __init time_init(void)
epoch = 1952;
year += epoch;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+}
+
+void __init time_init(void)
+{
#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
|| defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c
index 88469178ea6b..888aab1157ed 100644
--- a/arch/m32r/mm/fault-nommu.c
+++ b/arch/m32r/mm/fault-nommu.c
@@ -95,7 +95,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
* update_mmu_cache()
*======================================================================*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte)
+ pte_t *ptep)
{
BUG();
}
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 7274b47f4c22..b8ec002aef8e 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -188,7 +188,6 @@ good_area:
if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
goto bad_area;
-survive:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
@@ -271,15 +270,10 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(tsk)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (error_code & ACE_USERMODE)
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!(error_code & ACE_USERMODE))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
do_sigbus:
up_read(&mm->mmap_sem);
@@ -336,7 +330,7 @@ vmalloc_fault:
addr = (address & PAGE_MASK);
set_thread_fault_code(error_code);
- update_mmu_cache(NULL, addr, *pte_k);
+ update_mmu_cache(NULL, addr, pte_k);
set_thread_fault_code(0);
return;
}
@@ -349,7 +343,7 @@ vmalloc_fault:
#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
- pte_t pte)
+ pte_t *ptep)
{
volatile unsigned long *entry1, *entry2;
unsigned long pte_data, flags;
@@ -365,7 +359,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
vaddr = (vaddr & PAGE_MASK) | get_asid();
- pte_data = pte_val(pte);
+ pte_data = pte_val(*ptep);
#ifdef CONFIG_CHIP_OPSP
entry1 = (unsigned long *)ITLB_BASE;
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 9f581df3952b..73e2205ebf5a 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/bitops.h>
#include <linux/nodemask.h>
#include <linux/pfn.h>
+#include <linux/gfp.h>
#include <asm/types.h>
#include <asm/processor.h>
#include <asm/page.h>