summaryrefslogtreecommitdiff
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/cpu/clock-cpg.c2
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c4
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c2
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c6
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c2
-rw-r--r--arch/sh/kernel/crash_dump.c22
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/process.c18
-rw-r--r--arch/sh/kernel/ptrace_32.c8
-rw-r--r--arch/sh/kernel/ptrace_64.c6
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/syscalls_32.S1
-rw-r--r--arch/sh/kernel/syscalls_64.S1
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c6
14 files changed, 39 insertions, 43 deletions
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
index dd0e0f211359..8f63a264a842 100644
--- a/arch/sh/kernel/cpu/clock-cpg.c
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -67,7 +67,7 @@ int __init __deprecated cpg_clk_init(void)
}
/*
- * Placeholder for compatability, until the lazy CPUs do this
+ * Placeholder for compatibility, until the lazy CPUs do this
* on their own.
*/
int __init __weak arch_clk_init(void)
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index 32c825c9488e..39b6a24c159d 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -80,6 +80,6 @@ static struct irq_chip imask_irq_chip = {
void make_imask_irq(unsigned int irq)
{
- set_irq_chip_and_handler_name(irq, &imask_irq_chip,
- handle_level_irq, "level");
+ irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq,
+ "level");
}
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 5af48f8357e5..9e056a3a0c73 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -135,7 +135,7 @@ void __init plat_irq_setup(void)
/* Set default: per-line enable/disable, priority driven ack/eoi */
for (i = 0; i < NR_INTC_IRQS; i++)
- set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
/* Disable all interrupts and set all priorities to 0 to avoid trouble */
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 7516c35ee514..5de6dff5c21b 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -74,9 +74,9 @@ void register_ipr_controller(struct ipr_desc *desc)
}
disable_irq_nosync(p->irq);
- set_irq_chip_and_handler_name(p->irq, &desc->chip,
- handle_level_irq, "level");
- set_irq_chip_data(p->irq, p);
+ irq_set_chip_and_handler_name(p->irq, &desc->chip,
+ handle_level_irq, "level");
+ irq_set_chip_data(p->irq, p);
disable_ipr_irq(irq_get_irq_data(p->irq));
}
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 1656b8c91faf..beba32beb6d9 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -648,7 +648,7 @@ static void __init sh7786_usb_setup(void)
* The following settings are necessary
* for using the USB modules.
*
- * see "USB Inital Settings" for detail
+ * see "USB Initial Settings" for detail
*/
__raw_writel(USBINITVAL1, USBINITREG1);
__raw_writel(USBINITVAL2, USBINITREG2);
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 37c97d444576..569e7b171c01 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -9,28 +9,6 @@
#include <linux/io.h>
#include <asm/uaccess.h>
-/* Stores the physical address of elf header of crash image. */
-unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
-
-/*
- * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
- * is_kdump_kernel() to determine if we are booting after a panic. Hence
- * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
- *
- * elfcorehdr= specifies the location of elf core header
- * stored by the crashed kernel.
- */
-static int __init parse_elfcorehdr(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- elfcorehdr_addr = memparse(arg, &arg);
-
- return 0;
-}
-early_param("elfcorehdr", parse_elfcorehdr);
-
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 64ea0b165399..91971103b62b 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -183,7 +183,7 @@ asmlinkage void do_softirq(void)
);
/*
- * Shouldnt happen, we returned above if in_interrupt():
+ * Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index dcb126dc76fd..325f98b1736d 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -32,16 +32,16 @@ void free_thread_xstate(struct task_struct *tsk)
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
-struct thread_info *alloc_thread_info(struct task_struct *tsk)
+struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ti;
-
- ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
- if (unlikely(ti == NULL))
- return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
- memset(ti, 0, THREAD_SIZE);
+ gfp_t mask = GFP_KERNEL | __GFP_ZERO;
+#else
+ gfp_t mask = GFP_KERNEL;
#endif
+
+ ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
return ti;
}
@@ -57,14 +57,16 @@ void thread_info_cache_init(void)
THREAD_SIZE, SLAB_PANIC, NULL);
}
#else
-struct thread_info *alloc_thread_info(struct task_struct *tsk)
+struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
- return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
+ struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
+
+ return page ? page_address(page) : NULL;
}
void free_thread_info(struct thread_info *ti)
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 90a15d29feeb..2130ca674e9b 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -101,6 +101,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr)
attr = bp->attr;
attr.bp_addr = addr;
+ /* reenable breakpoint */
+ attr.disabled = false;
err = modify_user_hw_breakpoint(bp, &attr);
if (unlikely(err))
return err;
@@ -392,6 +394,9 @@ long arch_ptrace(struct task_struct *child, long request,
tmp = 0;
} else {
unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
index = addr - offsetof(struct user, fpu);
tmp = ((unsigned long *)child->thread.xstate)
[index >> 2];
@@ -423,6 +428,9 @@ long arch_ptrace(struct task_struct *child, long request,
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
((unsigned long *)child->thread.xstate)
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 4436eacddb15..c8f97649f354 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -403,6 +403,9 @@ long arch_ptrace(struct task_struct *child, long request,
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
index = addr - offsetof(struct user, fpu);
tmp = get_fpu_long(child, index);
} else if (addr == offsetof(struct user, u_fpvalid)) {
@@ -442,6 +445,9 @@ long arch_ptrace(struct task_struct *child, long request,
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
index = addr - offsetof(struct user, fpu);
ret = put_fpu_long(child, index, data);
}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 4f267160c515..58bff45d1156 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -150,7 +150,7 @@ void __init check_for_initrd(void)
}
/*
- * If we got this far inspite of the boot loader's best efforts
+ * If we got this far in spite of the boot loader's best efforts
* to the contrary, assume we actually have a valid initrd and
* fix up the root dev.
*/
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 768fb33fdd35..030966a9305c 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -379,3 +379,4 @@ ENTRY(sys_call_table)
.long sys_name_to_handle_at
.long sys_open_by_handle_at /* 360 */
.long sys_clock_adjtime
+ .long sys_syncfs
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 44e7b00c8067..ca0a6142ab63 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -399,3 +399,4 @@ sys_call_table:
.long sys_name_to_handle_at /* 370 */
.long sys_open_by_handle_at
.long sys_clock_adjtime
+ .long sys_syncfs
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 242117cbad67..1d6d51a1ce79 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -94,17 +94,17 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return NULL;
}
-struct vm_area_struct *get_gate_vma(struct task_struct *task)
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
-int in_gate_area(struct task_struct *task, unsigned long address)
+int in_gate_area(struct mm_struct *mm, unsigned long address)
{
return 0;
}
-int in_gate_area_no_task(unsigned long address)
+int in_gate_area_no_mm(unsigned long address)
{
return 0;
}