From 84fe98ea6866625a19969892ff3e83d8e94dc976 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 27 May 2011 20:30:48 +0100 Subject: MIPS: NILE4: Remove useless inclusion of GT64120 header. Signed-off-by: Ralf Baechle --- arch/mips/pci/ops-nile4.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/mips/pci/ops-nile4.c b/arch/mips/pci/ops-nile4.c index b7f0fb0210f4..99929cf88419 100644 --- a/arch/mips/pci/ops-nile4.c +++ b/arch/mips/pci/ops-nile4.c @@ -4,7 +4,6 @@ #include #include -#include #include #define PCI_ACCESS_READ 0 -- cgit v1.2.3 From 1bed3b9c71fdf241da2c09a91d39b5302145f7c6 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 27 May 2011 20:32:17 +0100 Subject: MIPS: GT64120: Remove useless inclusion of clocksource.h. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/gt64120.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/mips/include/asm/gt64120.h b/arch/mips/include/asm/gt64120.h index e64b41093c49..0aa44abc77fe 100644 --- a/arch/mips/include/asm/gt64120.h +++ b/arch/mips/include/asm/gt64120.h @@ -21,8 +21,6 @@ #ifndef _ASM_GT64120_H #define _ASM_GT64120_H -#include - #include #include -- cgit v1.2.3 From 34ed9506aecdf2ab9d73497e11b9d160920973f5 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sat, 28 May 2011 00:57:13 +0100 Subject: MIPS: Malta SMTC: Fix build. Commit a561b02a2577aec51277ba39c82bd192a79c0267 (lmo) rsp. 7c8d948f1633da5ff81e4f5b31ef237d74c40127 (kernel.org) ["MIPS: i8259: Convert to new irq_chip functions"] missed one location to modify resulting in build breakage. Signed-off-by: Ralf Baechle --- arch/mips/mti-malta/malta-smtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 49a38b09a488..1efc8c394486 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c @@ -152,7 +152,7 @@ int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, * runtime code can anyway deal with the null set */ printk(KERN_WARNING - "IRQ affinity leaves no legal CPU for IRQ %d\n", irq); + "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq); /* Do any generic SMTC IRQ affinity setup */ smtc_set_irq_affinity(d->irq, tmask); -- cgit v1.2.3 From 731f90fae9a94313916a51ef80c4d906c85d9f64 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sat, 28 May 2011 01:09:02 +0100 Subject: MIPS: SMTC: Fix build. Commit ee6114202e48dc282c6d04741e9c5d7171eb8bb8 (lmo) rsp. 1685f3b158a244d4f6e205e67c84483fffcb2d9f (kernel.org) ["MIPS: SMTC: Move declaration of smtc_init_secondary to ."] didn't quite do that - it rather lost the declaration of smtc_init_secondary resulting in a build error. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/smtc.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h index ea60bf08dcb0..c9736fc06325 100644 --- a/arch/mips/include/asm/smtc.h +++ b/arch/mips/include/asm/smtc.h @@ -46,6 +46,7 @@ extern void smtc_prepare_cpus(int cpus); extern void smtc_smp_finish(void); extern void smtc_boot_secondary(int cpu, struct task_struct *t); extern void smtc_cpus_done(void); +extern void smtc_init_secondary(void); /* -- cgit v1.2.3 From b12acf163f6e52ff7d41aca51382dde17c506068 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sat, 28 May 2011 13:22:58 +0100 Subject: MIPS: Wire up sendmmsg and renumber setns syscall. Renumbering was necessary because I had already wired up setns(2) in the linux-mips.org tree in commit c3fce54644cabbb90700cc3acc040718a377f609 [MIPS: Wire up new sendmmsg syscall.] but the same syscall numbers were used by 7b21fddd087678a70ad64afc0f632e0f1071b092 [ns: Wire up the setns system call] resulting in a conflict. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/unistd.h | 21 ++++++++++++--------- arch/mips/kernel/scall32-o32.S | 1 + arch/mips/kernel/scall64-64.S | 1 + arch/mips/kernel/scall64-n32.S | 1 + arch/mips/kernel/scall64-o32.S | 1 + 5 files changed, 16 insertions(+), 9 deletions(-) diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index 6fcfc480e9d0..ecea7871dec2 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -363,17 +363,18 @@ #define __NR_open_by_handle_at (__NR_Linux + 340) #define __NR_clock_adjtime (__NR_Linux + 341) #define __NR_syncfs (__NR_Linux + 342) -#define __NR_setns (__NR_Linux + 343) +#define __NR_sendmmsg (__NR_Linux + 343) +#define __NR_setns (__NR_Linux + 344) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 343 +#define __NR_Linux_syscalls 344 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 343 +#define __NR_O32_Linux_syscalls 344 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -683,17 +684,18 @@ #define __NR_open_by_handle_at (__NR_Linux + 299) #define __NR_clock_adjtime (__NR_Linux + 300) #define __NR_syncfs (__NR_Linux + 301) -#define __NR_setns (__NR_Linux + 302) +#define __NR_sendmmsg (__NR_Linux + 302) +#define __NR_setns (__NR_Linux + 303) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 302 +#define __NR_Linux_syscalls 303 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 302 +#define __NR_64_Linux_syscalls 303 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1008,17 +1010,18 @@ #define __NR_open_by_handle_at (__NR_Linux + 304) #define __NR_clock_adjtime (__NR_Linux + 305) #define __NR_syncfs (__NR_Linux + 306) -#define __NR_setns (__NR_Linux + 307) +#define __NR_sendmmsg (__NR_Linux + 307) +#define __NR_setns (__NR_Linux + 308) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 307 +#define __NR_Linux_syscalls 308 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 307 +#define __NR_N32_Linux_syscalls 308 #ifdef __KERNEL__ diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 99e656e425f3..e521420a45a5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -589,6 +589,7 @@ einval: li v0, -ENOSYS sys sys_open_by_handle_at 3 /* 4340 */ sys sys_clock_adjtime 2 sys sys_syncfs 1 + sys sys_sendmmsg 4 sys sys_setns 2 .endm diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index fb0575f47f3d..85874d6a8a70 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -428,5 +428,6 @@ sys_call_table: PTR sys_open_by_handle_at PTR sys_clock_adjtime /* 5300 */ PTR sys_syncfs + PTR sys_sendmmsg PTR sys_setns .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 4de0c5534e73..b85842fc87ae 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -428,5 +428,6 @@ EXPORT(sysn32_call_table) PTR sys_open_by_handle_at PTR compat_sys_clock_adjtime /* 6305 */ PTR sys_syncfs + PTR compat_sys_sendmmsg PTR sys_setns .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 4a387de08bfa..46c4763edf21 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -546,5 +546,6 @@ sys_call_table: PTR compat_sys_open_by_handle_at /* 4340 */ PTR compat_sys_clock_adjtime PTR sys_syncfs + PTR compat_sys_sendmmsg PTR sys_setns .size sys_call_table,.-sys_call_table -- cgit v1.2.3 From 852fe3105e94ca26d1b3df7e2cb6878ebdd67608 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sat, 28 May 2011 15:27:59 +0100 Subject: MIPS: Malta: Fix crash in SMP kernel on non-CMP systems. Since 6be63bbbdab66b9185dc6f67c8b1bacb6f37f946 (lmo) rsp. af3a1f6f4813907e143f87030cde67a9971db533 (kernel.org) the Malta code does no longer probe for presence of GCMP if CMP is not configured. This means that the variable gcmp_present well be left at its default value of -1 which normally is meant to indicate that GCMP has not yet been mmapped. This non-zero value is now interpreted as GCMP being present resulting in a write attempt to a GCMP register resulting in a crash. Reported and a build fix on top of my fix by Rob Landley . Reported-by: Rob Landley Signed-off-by: Ralf Baechle Patchwork: https://patchwork.linux-mips.org/patch/2413/ --- arch/mips/include/asm/smp-ops.h | 41 +++++++++++++++++++++++++++++--- arch/mips/mipssim/sim_setup.c | 17 +++++++------ arch/mips/mti-malta/malta-init.c | 14 +++++------ arch/mips/pmc-sierra/msp71xx/msp_setup.c | 8 +++---- 4 files changed, 56 insertions(+), 24 deletions(-) diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index 9e09af34c8a8..48b03fff31e5 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -56,8 +56,43 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) #endif /* !CONFIG_SMP */ -extern struct plat_smp_ops up_smp_ops; -extern struct plat_smp_ops cmp_smp_ops; -extern struct plat_smp_ops vsmp_smp_ops; +static inline int register_up_smp_ops(void) +{ +#ifdef CONFIG_SMP_UP + extern struct plat_smp_ops up_smp_ops; + + register_smp_ops(&up_smp_ops); + + return 0; +#else + return -ENODEV; +#endif +} + +static inline int register_cmp_smp_ops(void) +{ +#ifdef CONFIG_MIPS_CMP + extern struct plat_smp_ops cmp_smp_ops; + + register_smp_ops(&cmp_smp_ops); + + return 0; +#else + return -ENODEV; +#endif +} + +static inline int register_vsmp_smp_ops(void) +{ +#ifdef CONFIG_MIPS_MT_SMP + extern struct plat_smp_ops vsmp_smp_ops; + + register_smp_ops(&vsmp_smp_ops); + + return 0; +#else + return -ENODEV; +#endif +} #endif /* __ASM_SMP_OPS_H */ diff --git a/arch/mips/mipssim/sim_setup.c b/arch/mips/mipssim/sim_setup.c index 55f22a3afe61..19700696a847 100644 --- a/arch/mips/mipssim/sim_setup.c +++ b/arch/mips/mipssim/sim_setup.c @@ -59,18 +59,17 @@ void __init prom_init(void) prom_meminit(); -#ifdef CONFIG_MIPS_MT_SMP - if (cpu_has_mipsmt) - register_smp_ops(&vsmp_smp_ops); - else - register_smp_ops(&up_smp_ops); -#endif + if (cpu_has_mipsmt) { + if (!register_vsmp_smp_ops()) + return; + #ifdef CONFIG_MIPS_MT_SMTC - if (cpu_has_mipsmt) register_smp_ops(&ssmtc_smp_ops); - else - register_smp_ops(&up_smp_ops); + return; #endif + } + + register_up_smp_ops(); } static void __init serial_init(void) diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 31180c321a1a..4b988b9a30d5 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -358,15 +359,14 @@ void __init prom_init(void) #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif -#ifdef CONFIG_MIPS_CMP /* Early detection of CMP support */ if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) - register_smp_ops(&cmp_smp_ops); - else -#endif -#ifdef CONFIG_MIPS_MT_SMP - register_smp_ops(&vsmp_smp_ops); -#endif + if (!register_cmp_smp_ops()) + return; + + if (!register_vsmp_smp_ops()) + return; + #ifdef CONFIG_MIPS_MT_SMTC register_smp_ops(&msmtc_smp_ops); #endif diff --git a/arch/mips/pmc-sierra/msp71xx/msp_setup.c b/arch/mips/pmc-sierra/msp71xx/msp_setup.c index 2413ea67877e..0abfbe04ffc9 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_setup.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_setup.c @@ -228,13 +228,11 @@ void __init prom_init(void) */ msp_serial_setup(); -#ifdef CONFIG_MIPS_MT_SMP - register_smp_ops(&vsmp_smp_ops); -#endif - + if (register_vsmp_smp_ops()) { #ifdef CONFIG_MIPS_MT_SMTC - register_smp_ops(&msp_smtc_smp_ops); + register_smp_ops(&msp_smtc_smp_ops); #endif + } #ifdef CONFIG_PMCTWILED /* -- cgit v1.2.3 From bf45e583715eae15e0adecb6f55d97878e127d7d Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 30 May 2011 14:59:47 +0100 Subject: MIPS: XLR, XLS: Move makefile bits to were they belong. This patch combines linux-mips.org patches 637d69600fb1773da56487271ec2a79c33d237ed [MIPS: Netlogic: Yank out crap.] and 5e3c263b9658a4b1c6c5577793e9347efb44854e [MIPS: XLR, XLS: Add Kbuild files for platform.] Signed-off-by: Ralf Baechle Signed-off-by: Jayachandran C Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2415/ Signed-off-by: Ralf Baechle --- arch/mips/Kbuild.platforms | 1 + arch/mips/Makefile | 12 ------------ arch/mips/netlogic/Platform | 11 +++++++++++ 3 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 arch/mips/netlogic/Platform diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index aef6c917b45a..5ce8029f558b 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -16,6 +16,7 @@ platforms += lasat platforms += loongson platforms += mipssim platforms += mti-malta +platforms += netlogic platforms += pmc-sierra platforms += pnx833x platforms += pnx8550 diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 884819cd0607..53e3514ba10e 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -191,18 +191,6 @@ endif # include $(srctree)/arch/mips/Kbuild.platforms -# -# NETLOGIC SOC Common (common) -# -cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic -cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic - -# -# NETLOGIC XLR/XLS SoC, Simulator and boards -# -core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/ -load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000 - cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic drivers-$(CONFIG_PCI) += arch/mips/pci/ diff --git a/arch/mips/netlogic/Platform b/arch/mips/netlogic/Platform new file mode 100644 index 000000000000..f87c1640abb5 --- /dev/null +++ b/arch/mips/netlogic/Platform @@ -0,0 +1,11 @@ +# +# NETLOGIC includes +# +cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic +cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic + +# +# NETLOGIC XLR/XLS SoC, Simulator and boards +# +core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/ +load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000 -- cgit v1.2.3 From 4f55fd752fb4770a354dd2401d5043c98236cc29 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 1 Jun 2011 14:20:09 +0100 Subject: MIPS: ARC: Fix build of firmware library on uniprocessor. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/smp-ops.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index 48b03fff31e5..ef2a8041e78b 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -11,6 +11,8 @@ #ifndef __ASM_SMP_OPS_H #define __ASM_SMP_OPS_H +#include + #ifdef CONFIG_SMP #include -- cgit v1.2.3 From 56eccc36dd8888f44c1de24c52b150adbf8ec427 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 6 Jun 2011 01:17:25 +0100 Subject: MIPS: MIPSsim: Fix uniprocessor build. Signed-off-by: Ralf Baechle --- arch/mips/mipssim/sim_setup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/mipssim/sim_setup.c b/arch/mips/mipssim/sim_setup.c index 19700696a847..256e0cdaa499 100644 --- a/arch/mips/mipssim/sim_setup.c +++ b/arch/mips/mipssim/sim_setup.c @@ -34,6 +34,7 @@ #include #include #include +#include static void __init serial_init(void); -- cgit v1.2.3 From 1544129da2de9fa276429deed8fac3fbc45634be Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 11:53:01 +0200 Subject: MIPS: SB1250: Restore dropped irq_mask function Commit d6d5d5c4a (MIPS: Sibyte: Convert to new irq_chip functions) removed the mask function which breaks irq_shutdown(). Restore it. Reported-by: Matt Turner Signed-off-by: Thomas Gleixner Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2460/ Tested-by: Matt Turner Signed-off-by: Ralf Baechle --- arch/mips/sibyte/sb1250/irq.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index be4460a5f6a8..76ee045e2ce4 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@ -123,6 +123,13 @@ static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask, } #endif +static void disable_sb1250_irq(struct irq_data *d) +{ + unsigned int irq = d->irq; + + sb1250_mask_irq(sb1250_irq_owner[irq], irq); +} + static void enable_sb1250_irq(struct irq_data *d) { unsigned int irq = d->irq; @@ -180,6 +187,7 @@ static struct irq_chip sb1250_irq_type = { .name = "SB1250-IMR", .irq_mask_ack = ack_sb1250_irq, .irq_unmask = enable_sb1250_irq, + .irq_mask = disable_sb1250_irq, #ifdef CONFIG_SMP .irq_set_affinity = sb1250_set_affinity #endif -- cgit v1.2.3 From 39263eeb44308a5d6ea6117376721a6091d2b622 Mon Sep 17 00:00:00 2001 From: Jayachandran C Date: Tue, 7 Jun 2011 03:14:12 +0530 Subject: MIPS: Netlogic: SMP fixes for XLR/XLS platform code. Fix few issues in the Netlogic code: - Use handle_percpu_irq to handle per-cpu interrupts - Remove unused function nlm_common_ipi_handler() - Call scheduler_ipi() on SMP_RESCHEDULE_YOURSELF - Enable interrupts in nlm_smp_finish() Signed-off-by: Jayachandran C Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2460/ Signed-off-by: Ralf Baechle --- arch/mips/netlogic/xlr/irq.c | 2 +- arch/mips/netlogic/xlr/smp.c | 13 ++----------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/arch/mips/netlogic/xlr/irq.c b/arch/mips/netlogic/xlr/irq.c index 1446d58e364c..521bb7377eb0 100644 --- a/arch/mips/netlogic/xlr/irq.c +++ b/arch/mips/netlogic/xlr/irq.c @@ -209,7 +209,7 @@ void __init init_xlr_irqs(void) irq_set_chip_and_handler(i, &xlr_pic, handle_level_irq); else irq_set_chip_and_handler(i, &nlm_cpu_intr, - handle_level_irq); + handle_percpu_irq); } #ifdef CONFIG_SMP irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, diff --git a/arch/mips/netlogic/xlr/smp.c b/arch/mips/netlogic/xlr/smp.c index b495a7f1433b..d842bce5c940 100644 --- a/arch/mips/netlogic/xlr/smp.c +++ b/arch/mips/netlogic/xlr/smp.c @@ -87,17 +87,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) /* IRQ_IPI_SMP_RESCHEDULE handler */ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc) { - set_need_resched(); -} - -void nlm_common_ipi_handler(int irq, struct pt_regs *regs) -{ - if (irq == IRQ_IPI_SMP_FUNCTION) { - smp_call_function_interrupt(); - } else { - /* Announce that we are for reschduling */ - set_need_resched(); - } + scheduler_ipi(); } /* @@ -122,6 +112,7 @@ void nlm_smp_finish(void) #ifdef notyet nlm_common_msgring_cpu_init(); #endif + local_irq_enable(); } void nlm_cpus_done(void) -- cgit v1.2.3 From b29af676a2e11126e9bf9205c792faa81f775df0 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Wed, 8 Jun 2011 12:03:02 +0200 Subject: MIPS: BCM63xx: Remove duplicate PERF_IRQSTAT_REG definition Signed-off-by: Jonas Gorski Cc: linux-mips@linux-mips.org Acked-by: Florian Fainelli Patchwork: https://patchwork.linux-mips.org/patch/2461/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 85fd27509aac..0ed5230243c9 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -89,7 +89,6 @@ /* Interrupt Mask register */ #define PERF_IRQMASK_REG 0xc -#define PERF_IRQSTAT_REG 0x10 /* Interrupt Status register */ #define PERF_IRQSTAT_REG 0x10 -- cgit v1.2.3 From 6097050d26458ccd6ffcba2accc55d0edfc9a4cd Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 9 Jun 2011 10:32:22 +0100 Subject: MAINTAINERS: Update MIPS entry. o Add entry for MIPS patchworks o Reorder entries for readability. Signed-off-by: Ralf Baechle --- MAINTAINERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 187282da9213..020f4fbb66ef 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4193,9 +4193,10 @@ F: drivers/usb/image/microtek.* MIPS M: Ralf Baechle -W: http://www.linux-mips.org/ L: linux-mips@linux-mips.org +W: http://www.linux-mips.org/ T: git git://git.linux-mips.org/pub/scm/linux.git +Q: http://patchwork.linux-mips.org/project/linux-mips/list/ S: Supported F: Documentation/mips/ F: arch/mips/ -- cgit v1.2.3 From de8839728742a1745370b643f3ac771126dff12f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 12 Jun 2011 20:57:17 +0200 Subject: MIPS: AR7: Fix trailing semicolon bug in clock.c Signed-off-by: Florian Fainelli To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2489/ Signed-off-by: Ralf Baechle --- arch/mips/ar7/clock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c index 2ca4ada1c291..2460f9d23f1b 100644 --- a/arch/mips/ar7/clock.c +++ b/arch/mips/ar7/clock.c @@ -443,7 +443,7 @@ struct clk *clk_get(struct device *dev, const char *id) return &vbus_clk; if (!strcmp(id, "cpu")) return &cpu_clk; - if (!strcmp(id, "dsp")); + if (!strcmp(id, "dsp")) return &dsp_clk; if (!strcmp(id, "vbus")) return &vbus_clk; -- cgit v1.2.3 From 744120aadd60bc64a79b49681786b5e217dafbc1 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Thu, 9 Jun 2011 20:15:21 +0200 Subject: MIPS: Lantiq: Add missing clk_enable and clk_disable functions. Signed-of-by: John Crispin Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2465/ Signed-off-by: Ralf Baechle --- arch/mips/lantiq/clk.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index 94560899d13e..7e9c0ffc11a5 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -100,6 +100,19 @@ void clk_put(struct clk *clk) } EXPORT_SYMBOL(clk_put); +int clk_enable(struct clk *clk) +{ + /* not used */ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ + /* not used */ +} +EXPORT_SYMBOL(clk_disable); + static inline u32 ltq_get_counter_resolution(void) { u32 res; -- cgit v1.2.3 From 11454100f41f43f128da02c0235ac85636a2cf70 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 12 Jun 2011 20:57:18 +0200 Subject: MIPS: AR7: Remove 'space before tabs' in platform.c Signed-off-by: Florian Fainelli To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2490/ Signed-off-by: Ralf Baechle --- arch/mips/ar7/platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 7d2fab392327..33ffecf6a6d6 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -229,7 +229,7 @@ static struct resource cpmac_low_res[] = { .name = "irq", .flags = IORESOURCE_IRQ, .start = 27, - .end = 27, + .end = 27, }, }; -- cgit v1.2.3 From c4a50541611d097c92f9e599f53d19d438074b12 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 12 Jun 2011 20:57:19 +0200 Subject: MIPS: AR7: Replace __attribute__((__packed__)) with __packed Signed-off-by: Florian Fainelli To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2491/ Signed-off-by: Ralf Baechle Date: Fri, 10 Jun 2011 15:10:04 +0200 Subject: MIPS: Remove redundant addr_limit assignment on exec. The address limit is already set in flush_old_exec() via set_fs(USER_DS) so this assignment is redundant. [ralf@linux-mips.org: also see dac853ae89043f1b7752875300faf614de43c74b for further explanation.] Signed-off-by: Mathias Krause Cc: Andrew Morton Cc: Linus Torvalds Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/2466/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/process.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d2112d3cf115..a8d53e508bb7 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -103,7 +103,6 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) __init_dsp(); regs->cp0_epc = pc; regs->regs[29] = sp; - current_thread_info()->addr_limit = USER_DS; } void exit_thread(void) -- cgit v1.2.3 From d0be89f6c2570a63ac44ccdd12473a54243cd296 Mon Sep 17 00:00:00 2001 From: Jian Peng Date: Tue, 17 May 2011 12:27:49 -0700 Subject: MIPS: topdown mmap support This patch introduced topdown mmap support in user process address space allocation policy. Recently, we ran some large applications that use mmap heavily and lead to OOM due to inflexible mmap allocation policy on MIPS32. Since most other major archs supported it for years, it is reasonable to follow the trend and reduce the pain of porting applications. Due to cache aliasing concern, arch_get_unmapped_area_topdown() and other helper functions are implemented in arch/mips/kernel/syscall.c. Signed-off-by: Jian Peng Cc: David Daney Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2389/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/pgtable.h | 1 + arch/mips/mm/mmap.c | 193 +++++++++++++++++++++++++++++++++++----- 2 files changed, 171 insertions(+), 23 deletions(-) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 7e40f3778179..b2202a68cf0f 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -414,6 +414,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, * constraints placed on us by the cache architecture. */ #define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN /* * No page table caches to initialise diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index ae3c20a9556e..9ff5d0fac556 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -17,21 +18,65 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); +/* gap between mmap and stack */ +#define MIN_GAP (128*1024*1024UL) +#define MAX_GAP ((TASK_SIZE)/6*5) + +static int mmap_is_legacy(void) +{ + if (current->personality & ADDR_COMPAT_LAYOUT) + return 1; + + if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) + return 1; + + return sysctl_legacy_va_layout; +} + +static unsigned long mmap_base(unsigned long rnd) +{ + unsigned long gap = rlimit(RLIMIT_STACK); + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + + return PAGE_ALIGN(TASK_SIZE - gap - rnd); +} + +static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = addr & ~shm_align_mask; + unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; + + if (base + off <= addr) + return base + off; + + return base - off; +} + #define COLOUR_ALIGN(addr,pgoff) \ ((((addr) + shm_align_mask) & ~shm_align_mask) + \ (((pgoff) << PAGE_SHIFT) & shm_align_mask)) -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) +enum mmap_allocation_direction {UP, DOWN}; + +static unsigned long arch_get_unmapped_area_foo(struct file *filp, + unsigned long addr0, unsigned long len, unsigned long pgoff, + unsigned long flags, enum mmap_allocation_direction dir) { - struct vm_area_struct * vmm; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long addr = addr0; int do_color_align; - if (len > TASK_SIZE) + if (unlikely(len > TASK_SIZE)) return -ENOMEM; if (flags & MAP_FIXED) { - /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ + /* Even MAP_FIXED mappings must reside within TASK_SIZE */ if (TASK_SIZE - len < addr) return -EINVAL; @@ -48,34 +93,130 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; + + /* requesting a specific address */ if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); - vmm = find_vma(current->mm, addr); + + vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vmm || addr + len <= vmm->vm_start)) + (!vma || addr + len <= vma->vm_start)) return addr; } - addr = current->mm->mmap_base; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) - return addr; - addr = vmm->vm_end; + if (dir == UP) { + addr = mm->mmap_base; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vma || addr + len <= vma->vm_start) + return addr; + addr = vma->vm_end; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + } + } else { + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + if (do_color_align) { + unsigned long base = + COLOUR_ALIGN_DOWN(addr - len, pgoff); + + addr = base + len; + } + + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr - len); + if (!vma || addr <= vma->vm_start) { + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr-len; + } + } + + if (unlikely(mm->mmap_base < len)) + goto bottomup; + + addr = mm->mmap_base-len; if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (likely(!vma || addr+len <= vma->vm_start)) { + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr; + } + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + if (do_color_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + } while (likely(len < vma->vm_start)); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + + return addr; } } +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + return arch_get_unmapped_area_foo(filp, + addr0, len, pgoff, flags, UP); +} + +/* + * There is no need to export this but sched.h declares the function as + * extern so making it static here results in an error. + */ +unsigned long arch_get_unmapped_area_topdown(struct file *filp, + unsigned long addr0, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + return arch_get_unmapped_area_foo(filp, + addr0, len, pgoff, flags, DOWN); +} + void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; @@ -89,9 +230,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm) random_factor &= 0xffffffful; } - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(random_factor); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } } static inline unsigned long brk_rnd(void) -- cgit v1.2.3 From e36863a550da44595b155c6b86ff46b50cbff5c0 Mon Sep 17 00:00:00 2001 From: Dezhong Diao Date: Wed, 13 Oct 2010 16:57:35 -0700 Subject: MIPS: HIGHMEM DMA on noncoherent MIPS32 processors [v4: Patch applies to linux-queue.git with kmap_atomic patches: https://patchwork.kernel.org/patch/189932/ https://patchwork.kernel.org/patch/194552/ https://patchwork.kernel.org/patch/189912/ ] The MIPS DMA coherency functions do not work properly (i.e. kernel oops) when HIGHMEM pages are passed in as arguments. Use kmap_atomic() to temporarily map high pages for cache maintenance operations. Tested on a 2.6.36-rc7 1GB HIGHMEM SMP no-alias system. Signed-off-by: Dezhong Diao Signed-off-by: Kevin Cernekee Cc: Dezhong Diao Cc: David Daney Cc: David VomLehn Cc: Sergei Shtylyov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1695/ Signed-off-by: Ralf Baechle --- arch/mips/mm/dma-default.c | 114 +++++++++++++++++++++++++++------------------ 1 file changed, 68 insertions(+), 46 deletions(-) diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 21ea14efb837..46084912e588 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -15,18 +15,18 @@ #include #include #include +#include #include #include #include -static inline unsigned long dma_addr_to_virt(struct device *dev, +static inline struct page *dma_addr_to_page(struct device *dev, dma_addr_t dma_addr) { - unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr); - - return (unsigned long)phys_to_virt(addr); + return pfn_to_page( + plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); } /* @@ -148,20 +148,20 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, free_pages(addr, get_order(size)); } -static inline void __dma_sync(unsigned long addr, size_t size, +static inline void __dma_sync_virtual(void *addr, size_t size, enum dma_data_direction direction) { switch (direction) { case DMA_TO_DEVICE: - dma_cache_wback(addr, size); + dma_cache_wback((unsigned long)addr, size); break; case DMA_FROM_DEVICE: - dma_cache_inv(addr, size); + dma_cache_inv((unsigned long)addr, size); break; case DMA_BIDIRECTIONAL: - dma_cache_wback_inv(addr, size); + dma_cache_wback_inv((unsigned long)addr, size); break; default: @@ -169,12 +169,49 @@ static inline void __dma_sync(unsigned long addr, size_t size, } } +/* + * A single sg entry may refer to multiple physically contiguous + * pages. But we still need to process highmem pages individually. + * If highmem is not configured then the bulk of this loop gets + * optimized out. + */ +static inline void __dma_sync(struct page *page, + unsigned long offset, size_t size, enum dma_data_direction direction) +{ + size_t left = size; + + do { + size_t len = left; + + if (PageHighMem(page)) { + void *addr; + + if (offset + len > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + } + len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + __dma_sync_virtual(addr + offset, len, direction); + kunmap_atomic(addr); + } else + __dma_sync_virtual(page_address(page) + offset, + size, direction); + offset = 0; + page++; + left -= len; + } while (left); +} + static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { if (cpu_is_noncoherent_r10000(dev)) - __dma_sync(dma_addr_to_virt(dev, dma_addr), size, - direction); + __dma_sync(dma_addr_to_page(dev, dma_addr), + dma_addr & ~PAGE_MASK, size, direction); plat_unmap_dma_mem(dev, dma_addr, size, direction); } @@ -185,13 +222,11 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, int i; for (i = 0; i < nents; i++, sg++) { - unsigned long addr; - - addr = (unsigned long) sg_virt(sg); - if (!plat_device_is_coherent(dev) && addr) - __dma_sync(addr, sg->length, direction); - sg->dma_address = plat_map_dma_mem(dev, - (void *)addr, sg->length); + if (!plat_device_is_coherent(dev)) + __dma_sync(sg_page(sg), sg->offset, sg->length, + direction); + sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + + sg->offset; } return nents; @@ -201,30 +236,23 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { - unsigned long addr; - - addr = (unsigned long) page_address(page) + offset; - if (!plat_device_is_coherent(dev)) - __dma_sync(addr, size, direction); + __dma_sync(page, offset, size, direction); - return plat_map_dma_mem(dev, (void *)addr, size); + return plat_map_dma_mem_page(dev, page) + offset; } static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction, struct dma_attrs *attrs) { - unsigned long addr; int i; for (i = 0; i < nhwentries; i++, sg++) { if (!plat_device_is_coherent(dev) && - direction != DMA_TO_DEVICE) { - addr = (unsigned long) sg_virt(sg); - if (addr) - __dma_sync(addr, sg->length, direction); - } + direction != DMA_TO_DEVICE) + __dma_sync(sg_page(sg), sg->offset, sg->length, + direction); plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); } } @@ -232,24 +260,18 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, static void mips_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - if (cpu_is_noncoherent_r10000(dev)) { - unsigned long addr; - - addr = dma_addr_to_virt(dev, dma_handle); - __dma_sync(addr, size, direction); - } + if (cpu_is_noncoherent_r10000(dev)) + __dma_sync(dma_addr_to_page(dev, dma_handle), + dma_handle & ~PAGE_MASK, size, direction); } static void mips_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { plat_extra_sync_for_device(dev); - if (!plat_device_is_coherent(dev)) { - unsigned long addr; - - addr = dma_addr_to_virt(dev, dma_handle); - __dma_sync(addr, size, direction); - } + if (!plat_device_is_coherent(dev)) + __dma_sync(dma_addr_to_page(dev, dma_handle), + dma_handle & ~PAGE_MASK, size, direction); } static void mips_dma_sync_sg_for_cpu(struct device *dev, @@ -260,8 +282,8 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev, /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (cpu_is_noncoherent_r10000(dev)) - __dma_sync((unsigned long)page_address(sg_page(sg)), - sg->length, direction); + __dma_sync(sg_page(sg), sg->offset, sg->length, + direction); } } @@ -273,8 +295,8 @@ static void mips_dma_sync_sg_for_device(struct device *dev, /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (!plat_device_is_coherent(dev)) - __dma_sync((unsigned long)page_address(sg_page(sg)), - sg->length, direction); + __dma_sync(sg_page(sg), sg->offset, sg->length, + direction); } } @@ -295,7 +317,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, plat_extra_sync_for_device(dev); if (!plat_device_is_coherent(dev)) - __dma_sync((unsigned long)vaddr, size, direction); + __dma_sync_virtual(vaddr, size, direction); } EXPORT_SYMBOL(dma_cache_sync); -- cgit v1.2.3 From b6da0ffb09ad4468e6749488909f04f1efac5de3 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Sun, 30 May 2010 00:32:51 -0700 Subject: MIPS: pfn_valid() is broken on low memory HIGHMEM systems pfn_valid() compares the PFN to max_mapnr: __pfn >= min_low_pfn && __pfn < max_mapnr; On HIGHMEM kernels, highend_pfn is used to set the value of max_mapnr. Unfortunately, highend_pfn is left at zero if the system does not actually have enough RAM to reach into the HIGHMEM range. This causes pfn_valid() to always return false, and when debug checks are enabled the kernel will fail catastrophically: Memory: 22432k/32768k available (2249k kernel code, 10336k reserved, 653k data, 1352k init, 0k highmem) NR_IRQS:128 kfree_debugcheck: out of range ptr 81c02900h. Kernel bug detected[#1]: Cpu 0 $ 0 : 00000000 10008400 00000034 00000000 $ 4 : 8003e160 802a0000 8003e160 00000000 $ 8 : 00000000 0000003e 00000747 00000747 ... On such a configuration, max_low_pfn should be used to set max_mapnr. This was seen on 2.6.34. Signed-off-by: Kevin Cernekee To: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1992/ Signed-off-by: Ralf Baechle --- arch/mips/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 1aadeb42c5a5..11689e1a2924 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -368,7 +368,7 @@ void __init mem_init(void) #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" #endif - max_mapnr = highend_pfn; + max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; #else max_mapnr = max_low_pfn; #endif -- cgit v1.2.3 From d0023c4a0af1ff16fe183257682025bfcc068e85 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Mon, 6 Sep 2010 21:03:46 -0700 Subject: MIPS: Add SYNC after cacheflush On processors with deep write buffers, it is likely that many cycles will pass between a CACHE instruction and the time the data actually gets written out to DRAM. Add a SYNC instruction to ensure that the buffers get emptied before the flush functions return. Actual problem seen in the wild: 1) dma_alloc_coherent() allocates cached memory 2) memset() is called to clear the new pages 3) dma_cache_wback_inv() is called to flush the zero data out to memory 4) dma_alloc_coherent() returns an uncached (kseg1) pointer to the freshly allocated pages 5) Caller writes data through the kseg1 pointer 6) Buffered writeback data finally gets flushed out to DRAM 7) Part of caller's data is inexplicably zeroed out This patch adds SYNC between steps 3 and 4, which fixed the problem. Signed-off-by: Kevin Cernekee Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: Signed-off-by: Ralf Baechle --- arch/mips/mm/c-r4k.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index eeb642e4066e..b9aabb998a32 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -604,6 +604,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) r4k_blast_scache(); else blast_scache_range(addr, addr + size); + __sync(); return; } @@ -620,6 +621,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) } bc_wback_inv(addr, size); + __sync(); } static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) @@ -647,6 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); } + __sync(); return; } @@ -663,6 +666,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) } bc_inv(addr, size); + __sync(); } #endif /* CONFIG_DMA_NONCOHERENT */ -- cgit v1.2.3 From 565b60de325070ccc54b18346a1238d4fae17954 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Tue, 7 Sep 2010 12:59:15 -0700 Subject: MIPS: Move FIXADDR_TOP into spaces.h Memory maps and addressing quirks are normally defined in . There are already three targets that need to override FIXADDR_TOP, and others exist. This will be a cleaner approach than adding lots of ifdefs in fixmap.h . Signed-off-by: Kevin Cernekee Cc: Atsushi Nemoto Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1573/ Acked-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/include/asm/fixmap.h | 10 +--------- arch/mips/include/asm/mach-bcm63xx/spaces.h | 17 +++++++++++++++++ arch/mips/include/asm/mach-generic/spaces.h | 4 ++++ arch/mips/include/asm/mach-tx39xx/spaces.h | 17 +++++++++++++++++ arch/mips/include/asm/mach-tx49xx/spaces.h | 17 +++++++++++++++++ 5 files changed, 56 insertions(+), 9 deletions(-) create mode 100644 arch/mips/include/asm/mach-bcm63xx/spaces.h create mode 100644 arch/mips/include/asm/mach-tx39xx/spaces.h create mode 100644 arch/mips/include/asm/mach-tx49xx/spaces.h diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h index 0b89b83e2055..98bcc98cf29b 100644 --- a/arch/mips/include/asm/fixmap.h +++ b/arch/mips/include/asm/fixmap.h @@ -14,6 +14,7 @@ #define _ASM_FIXMAP_H #include +#include #ifdef CONFIG_HIGHMEM #include #include @@ -67,15 +68,6 @@ enum fixed_addresses { * the start of the fixmap, and leave one page empty * at the top of mem.. */ -#ifdef CONFIG_BCM63XX -#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000) -#else -#if defined(CONFIG_CPU_TX39XX) || defined(CONFIG_CPU_TX49XX) -#define FIXADDR_TOP ((unsigned long)(long)(int)(0xff000000 - 0x20000)) -#else -#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) -#endif -#endif #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) diff --git a/arch/mips/include/asm/mach-bcm63xx/spaces.h b/arch/mips/include/asm/mach-bcm63xx/spaces.h new file mode 100644 index 000000000000..61e750fb4653 --- /dev/null +++ b/arch/mips/include/asm/mach-bcm63xx/spaces.h @@ -0,0 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_BCM63XX_SPACES_H +#define _ASM_BCM63XX_SPACES_H + +#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000) + +#include + +#endif /* __ASM_BCM63XX_SPACES_H */ diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h index c9fa4b14968d..d7a9efd3a5ce 100644 --- a/arch/mips/include/asm/mach-generic/spaces.h +++ b/arch/mips/include/asm/mach-generic/spaces.h @@ -82,4 +82,8 @@ #define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET) #endif +#ifndef FIXADDR_TOP +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) +#endif + #endif /* __ASM_MACH_GENERIC_SPACES_H */ diff --git a/arch/mips/include/asm/mach-tx39xx/spaces.h b/arch/mips/include/asm/mach-tx39xx/spaces.h new file mode 100644 index 000000000000..151fe7a1cf1d --- /dev/null +++ b/arch/mips/include/asm/mach-tx39xx/spaces.h @@ -0,0 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_TX39XX_SPACES_H +#define _ASM_TX39XX_SPACES_H + +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000) + +#include + +#endif /* __ASM_TX39XX_SPACES_H */ diff --git a/arch/mips/include/asm/mach-tx49xx/spaces.h b/arch/mips/include/asm/mach-tx49xx/spaces.h new file mode 100644 index 000000000000..0cb10a6f489e --- /dev/null +++ b/arch/mips/include/asm/mach-tx49xx/spaces.h @@ -0,0 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_TX49XX_SPACES_H +#define _ASM_TX49XX_SPACES_H + +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000) + +#include + +#endif /* __ASM_TX49XX_SPACES_H */ -- cgit v1.2.3 From 273f2d7e64f9fd22192b4cd31e7408284a721e69 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Sat, 16 Oct 2010 14:22:33 -0700 Subject: MIPS: Install handlers for software IRQs BMIPS4350/4380/5000 CMT/SMT all use SW INT0/INT1 for inter-thread signaling. Signed-off-by: Kevin Cernekee Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1709/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/irq_cpu.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 6e71b284f6c9..191eb52228c4 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -103,14 +103,12 @@ void __init mips_cpu_irq_init(void) clear_c0_status(ST0_IM); clear_c0_cause(CAUSEF_IP); - /* - * Only MT is using the software interrupts currently, so we just - * leave them uninitialized for other processors. - */ - if (cpu_has_mipsmt) - for (i = irq_base; i < irq_base + 2; i++) - irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller, - handle_percpu_irq); + /* Software interrupts are used for MT/CMT IPI */ + for (i = irq_base; i < irq_base + 2; i++) + irq_set_chip_and_handler(i, cpu_has_mipsmt ? + &mips_mt_cpu_irq_controller : + &mips_cpu_irq_controller, + handle_percpu_irq); for (i = irq_base + 2; i < irq_base + 8; i++) irq_set_chip_and_handler(i, &mips_cpu_irq_controller, -- cgit v1.2.3 From 464fd83e841a16f4ea1325b33eb08170ef5cd1f4 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Wed, 5 Jan 2011 23:31:30 -0800 Subject: MIPS: Limit fixrange_init() to the FIXMAP region fixrange_init() allocates page tables for all addresses higher than FIXADDR_TOP. On processors that override the default FIXADDR_TOP address of 0xfffe_0000, this can consume up to 4 pages (1 page per 4MB) for pgd's that are never used. Signed-off-by: Kevin Cernekee Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1980/ Signed-off-by: Ralf Baechle --- arch/mips/mm/init.c | 6 +++--- arch/mips/mm/pgtable-32.c | 2 +- arch/mips/mm/pgtable-64.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 11689e1a2924..b7ebc4fa89bc 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -277,11 +277,11 @@ void __init fixrange_init(unsigned long start, unsigned long end, k = __pmd_offset(vaddr); pgd = pgd_base + i; - for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { pud = (pud_t *)pgd; - for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { pmd = (pmd_t *)pud; - for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pmd, __pmd((unsigned long)pte)); diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index 575e4019227b..adc6911ba748 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -52,7 +52,7 @@ void __init pagetable_init(void) * Fixed mappings: */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, 0, pgd_base); + fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); #ifdef CONFIG_HIGHMEM /* diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index 78eaa4f0b0ec..cda4e300eb0a 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -76,5 +76,5 @@ void __init pagetable_init(void) * Fixed mappings: */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, 0, pgd_base); + fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); } -- cgit v1.2.3 From 98f4a2c27c76e7eaf75c2f3f25487fabca62ef3d Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 25 Jul 2011 17:26:55 +0100 Subject: MIPS: Remove pointless return statement from empty void functions. Signed-off-by: Ralf Baechle To: Sergei Shtylyov Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2391/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/irq.h | 1 - arch/mips/include/asm/mach-generic/dma-coherence.h | 1 - arch/mips/include/asm/mach-ip27/dma-coherence.h | 1 - arch/mips/include/asm/mach-jazz/dma-coherence.h | 1 - arch/mips/include/asm/mach-loongson/dma-coherence.h | 1 - arch/mips/include/asm/mach-powertv/dma-coherence.h | 1 - arch/mips/kernel/cpu-probe.c | 1 - arch/mips/kernel/perf_event.c | 2 -- arch/mips/loongson/lemote-2f/ec_kb3310b.c | 2 -- arch/mips/nxp/pnx8550/common/setup.c | 2 -- arch/mips/pnx8550/common/setup.c | 2 -- 11 files changed, 15 deletions(-) diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 0ec01294b063..2354c870a63a 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -18,7 +18,6 @@ static inline void irq_dispose_mapping(unsigned int virq) { - return; } #ifdef CONFIG_I8259 diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index 8da98073e952..9c95177f7a7e 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -49,7 +49,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h index 016d0989b141..06c441968e6e 100644 --- a/arch/mips/include/asm/mach-ip27/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h @@ -60,7 +60,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h index 302101b54acb..9fc1e9ad7038 100644 --- a/arch/mips/include/asm/mach-jazz/dma-coherence.h +++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h @@ -50,7 +50,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h index 981c75f91a7d..e1433055fe98 100644 --- a/arch/mips/include/asm/mach-loongson/dma-coherence.h +++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h @@ -55,7 +55,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h index a8e72cf12142..62c094085947 100644 --- a/arch/mips/include/asm/mach-powertv/dma-coherence.h +++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h @@ -102,7 +102,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) static inline void plat_extra_sync_for_device(struct device *dev) { - return; } static inline int plat_dma_mapping_error(struct device *dev, diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index bb133d10b145..ebc0cd20b35d 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -71,7 +71,6 @@ void r4k_wait_irqoff(void) local_irq_enable(); __asm__(" .globl __pastwait \n" "__pastwait: \n"); - return; } /* diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index a8244854d3dc..ff1840f8e764 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -192,8 +192,6 @@ again: local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); - - return; } static void mipspmu_start(struct perf_event *event, int flags) diff --git a/arch/mips/loongson/lemote-2f/ec_kb3310b.c b/arch/mips/loongson/lemote-2f/ec_kb3310b.c index 64057244eec5..2b666d3a3947 100644 --- a/arch/mips/loongson/lemote-2f/ec_kb3310b.c +++ b/arch/mips/loongson/lemote-2f/ec_kb3310b.c @@ -45,8 +45,6 @@ void ec_write(unsigned short addr, unsigned char val) /* flush the write action */ inb(EC_IO_PORT_DATA); spin_unlock_irqrestore(&index_access_lock, flags); - - return; } EXPORT_SYMBOL_GPL(ec_write); diff --git a/arch/mips/nxp/pnx8550/common/setup.c b/arch/mips/nxp/pnx8550/common/setup.c index 64246c9c875c..71adac323323 100644 --- a/arch/mips/nxp/pnx8550/common/setup.c +++ b/arch/mips/nxp/pnx8550/common/setup.c @@ -140,6 +140,4 @@ void __init plat_mem_setup(void) PNX8XXX_UART_LCR_8BIT; ip3106_baud(UART_BASE, pnx8550_console_port) = 5; } - - return; } diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c index 43cb3945fdbf..fccd6b0c6d3f 100644 --- a/arch/mips/pnx8550/common/setup.c +++ b/arch/mips/pnx8550/common/setup.c @@ -139,6 +139,4 @@ void __init plat_mem_setup(void) PNX8XXX_UART_LCR_8BIT; ip3106_baud(UART_BASE, pnx8550_console_port) = 5; } - - return; } -- cgit v1.2.3 From c2b78c8e82f2b8aa0e8703f9931d42ad4f041260 Mon Sep 17 00:00:00 2001 From: David VomLehn Date: Tue, 13 Jul 2010 00:41:12 +0900 Subject: MIPS: PowerTV: Provide cpu-feature-overrides.h This will optimize fls() and __fls() to use CLZ throughout the kernel, and any other optimizations that depend on constant cpu_has_* values will also be used. Signed-off-by: David VomLehn Signed-off-by: Shinya Kuribayashi To: David VomLehn To: macro@linux-mips.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1452/ Signed-off-by: Ralf Baechle --- .../asm/mach-powertv/cpu-feature-overrides.h | 59 ++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h diff --git a/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h b/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h new file mode 100644 index 000000000000..f751e3ec56fb --- /dev/null +++ b/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2010 Cisco Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_ +#define _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_ +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 +#define cpu_has_vce 0 +#define cpu_has_cache_cdex_p 0 +#define cpu_has_cache_cdex_s 0 +#define cpu_has_mcheck 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 +#define cpu_has_mips16 0 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 +#define cpu_has_vtag_icache 0 +#define cpu_has_dc_aliases 0 +#define cpu_has_ic_fills_f_dc 0 +#define cpu_has_mips32r1 0 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 +#define cpu_has_dsp 0 +#define cpu_has_mipsmt 0 +#define cpu_has_userlocal 0 +#define cpu_has_nofpuex 0 +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_vint 1 +#define cpu_has_veic 1 +#define cpu_has_inclusive_pcaches 0 + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 +#endif -- cgit v1.2.3 From 5fba0960809534f008260a621046f38a5f46f417 Mon Sep 17 00:00:00 2001 From: Shinya Kuribayashi Date: Tue, 13 Jul 2010 00:41:51 +0900 Subject: MIPS: Enable cpu_has_clo_clz for MIPS Technologies' platforms Enable cpu_has_clo_clz only when CONFIG_CPU_MIPS32 or CONFIG_CPU_MIPS64 is selected. This will optimize fls() and __fls() to use CLZ insn, and eventually ffs() and __ffs() as well. Malta and MIPSSim are development platforms, and need to take care of various processor configurations, release rivisions and so on, even across different MIPS ISAs. For such platforms we have to be careful, for instance, with turning on cpu_has_mips{32,64}r[12] features. As for CLZ, all MIPS32/64 processors support it, regardless of release revisions. Signed-off-by: Shinya Kuribayashi To: David VomLehn To: macro@linux-mips.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1453/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/mach-malta/cpu-feature-overrides.h | 2 ++ arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h index 2848cea42bce..37e3583a9fdd 100644 --- a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h @@ -32,6 +32,7 @@ /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ +#define cpu_has_clo_clz 1 #define cpu_has_nofpuex 0 /* #define cpu_has_64bits ? */ /* #define cpu_has_64bit_zero_reg ? */ @@ -58,6 +59,7 @@ /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ +#define cpu_has_clo_clz 1 #define cpu_has_nofpuex 0 /* #define cpu_has_64bits ? */ /* #define cpu_has_64bit_zero_reg ? */ diff --git a/arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h b/arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h index 779b02205737..27aaaa5d925e 100644 --- a/arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-mipssim/cpu-feature-overrides.h @@ -31,6 +31,7 @@ /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ +#define cpu_has_clo_clz 1 #define cpu_has_nofpuex 0 /* #define cpu_has_64bits ? */ /* #define cpu_has_64bit_zero_reg ? */ @@ -56,6 +57,7 @@ /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ +#define cpu_has_clo_clz 1 #define cpu_has_nofpuex 0 /* #define cpu_has_64bits ? */ /* #define cpu_has_64bit_zero_reg ? */ -- cgit v1.2.3 From c8e58856f2f288bfe076ad9205d9a10623498c96 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 11 Sep 2010 16:33:29 +0300 Subject: MIPS: RB532: Use hex_to_bin() Remove custom implementation of hex_to_bin(). Signed-off-by: Andy Shevchenko Cc: Ralf Baechle Cc: linux-mips@linux-mips.org To: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1580/ Acked-by: Florian Fainelli Signed-off-by: Ralf Baechle --- arch/mips/rb532/devices.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index 041fc1afc3f4..a969eb826634 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -251,28 +251,22 @@ static struct platform_device *rb532_devs[] = { static void __init parse_mac_addr(char *macstr) { - int i, j; - unsigned char result, value; + int i, h, l; for (i = 0; i < 6; i++) { - result = 0; - if (i != 5 && *(macstr + 2) != ':') return; - for (j = 0; j < 2; j++) { - if (isxdigit(*macstr) - && (value = - isdigit(*macstr) ? *macstr - - '0' : toupper(*macstr) - 'A' + 10) < 16) { - result = result * 16 + value; - macstr++; - } else - return; - } + h = hex_to_bin(*macstr++); + if (h == -1) + return; + + l = hex_to_bin(*macstr++); + if (l == -1) + return; macstr++; - korina_dev0_data.mac[i] = result; + korina_dev0_data.mac[i] = (h << 4) + l; } } -- cgit v1.2.3 From f0daaaf5236297ea81ec7732cd0df5dbd84a5042 Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 5 Jul 2011 16:34:45 -0700 Subject: MIPS: Add uasm UASM_i_SRL_SAFE macro. This can be used from either 32-bit or 64-bit code to generate logical right shifts of any constant amount. Signed-off-by: David Daney To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2576/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/uasm.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index dcbd4bb417ec..504d40aedfae 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -150,6 +150,7 @@ static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \ # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh) # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh) # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh) +# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh) # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh) # define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd) # define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd) @@ -165,6 +166,7 @@ static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \ # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh) # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh) # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) +# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh) # define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd) # define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd) -- cgit v1.2.3 From bf28607fbe529e20180080c4a0295b0a47834fde Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 5 Jul 2011 16:34:46 -0700 Subject: MIPS: Close races in TLB modify handlers. Page table entries are made invalid by writing a zero into the the PTE slot in a page table. This creates a race condition with the TLB modify handlers when they are updating the PTE. CPU0 CPU1 Test for _PAGE_PRESENT . set to not _PAGE_PRESENT (zero) Set to _PAGE_VALID So now the page not present value (zero) is suddenly valid and user space programs have access to physical page zero. We close the race by putting the test for _PAGE_PRESENT and setting of _PAGE_VALID into an atomic LL/SC section. This requires more registers than just K0 and K1 in the handlers, so we need to save some registers to a save area and then restore them when we are done. The save area is an array of cacheline aligned structures that should not suffer cache line bouncing as they are CPU private. [ralf@linux-mips.org: Fix !defined(CONFIG_MIPS_PGD_C0_CONTEXT) build error.] Signed-off-by: David Daney To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2577/ Signed-off-by: Ralf Baechle --- arch/mips/mm/tlbex.c | 292 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 194 insertions(+), 98 deletions(-) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 424ed4b92e6d..b6e1cff50667 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -42,6 +42,18 @@ extern void tlb_do_page_fault_0(void); extern void tlb_do_page_fault_1(void); +struct work_registers { + int r1; + int r2; + int r3; +}; + +struct tlb_reg_save { + unsigned long a; + unsigned long b; +} ____cacheline_aligned_in_smp; + +static struct tlb_reg_save handler_reg_save[NR_CPUS]; static inline int r45k_bvahwbug(void) { @@ -248,6 +260,73 @@ static int scratch_reg __cpuinitdata; static int pgd_reg __cpuinitdata; enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; +static struct work_registers __cpuinit build_get_work_registers(u32 **p) +{ + struct work_registers r; + + int smp_processor_id_reg; + int smp_processor_id_sel; + int smp_processor_id_shift; + + if (scratch_reg > 0) { + /* Save in CPU local C0_KScratch? */ + UASM_i_MTC0(p, 1, 31, scratch_reg); + r.r1 = K0; + r.r2 = K1; + r.r3 = 1; + return r; + } + + if (num_possible_cpus() > 1) { +#ifdef CONFIG_MIPS_PGD_C0_CONTEXT + smp_processor_id_shift = 51; + smp_processor_id_reg = 20; /* XContext */ + smp_processor_id_sel = 0; +#else +# ifdef CONFIG_32BIT + smp_processor_id_shift = 25; + smp_processor_id_reg = 4; /* Context */ + smp_processor_id_sel = 0; +# endif +# ifdef CONFIG_64BIT + smp_processor_id_shift = 26; + smp_processor_id_reg = 4; /* Context */ + smp_processor_id_sel = 0; +# endif +#endif + /* Get smp_processor_id */ + UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel); + UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift); + + /* handler_reg_save index in K0 */ + UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); + + UASM_i_LA(p, K1, (long)&handler_reg_save); + UASM_i_ADDU(p, K0, K0, K1); + } else { + UASM_i_LA(p, K0, (long)&handler_reg_save); + } + /* K0 now points to save area, save $1 and $2 */ + UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); + UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); + + r.r1 = K1; + r.r2 = 1; + r.r3 = 2; + return r; +} + +static void __cpuinit build_restore_work_registers(u32 **p) +{ + if (scratch_reg > 0) { + UASM_i_MFC0(p, 1, 31, scratch_reg); + return; + } + /* K0 already points to save area, restore $1 and $2 */ + UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); + UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); +} + #ifndef CONFIG_MIPS_PGD_C0_CONTEXT /* @@ -1160,9 +1239,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) memset(relocs, 0, sizeof(relocs)); memset(final_handler, 0, sizeof(final_handler)); - if (scratch_reg == 0) - scratch_reg = allocate_kscratch(); - if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, scratch_reg); @@ -1462,22 +1538,28 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, */ static void __cpuinit build_pte_present(u32 **p, struct uasm_reloc **r, - unsigned int pte, unsigned int ptr, enum label_id lid) + int pte, int ptr, int scratch, enum label_id lid) { + int t = scratch >= 0 ? scratch : pte; + if (kernel_uses_smartmips_rixi) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); uasm_i_nop(p); } else { - uasm_i_andi(p, pte, pte, _PAGE_PRESENT); - uasm_il_beqz(p, r, pte, lid); - iPTE_LW(p, pte, ptr); + uasm_i_andi(p, t, pte, _PAGE_PRESENT); + uasm_il_beqz(p, r, t, lid); + if (pte == t) + /* You lose the SMP race :-(*/ + iPTE_LW(p, pte, ptr); } } else { - uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); - uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); - uasm_il_bnez(p, r, pte, lid); - iPTE_LW(p, pte, ptr); + uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); + uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); + uasm_il_bnez(p, r, t, lid); + if (pte == t) + /* You lose the SMP race :-(*/ + iPTE_LW(p, pte, ptr); } } @@ -1497,19 +1579,19 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, */ static void __cpuinit build_pte_writable(u32 **p, struct uasm_reloc **r, - unsigned int pte, unsigned int ptr, enum label_id lid) + unsigned int pte, unsigned int ptr, int scratch, + enum label_id lid) { - if (use_bbit_insns()) { - uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); - uasm_i_nop(p); - uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); - uasm_i_nop(p); - } else { - uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); - uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); - uasm_il_bnez(p, r, pte, lid); + int t = scratch >= 0 ? scratch : pte; + + uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); + uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); + uasm_il_bnez(p, r, t, lid); + if (pte == t) + /* You lose the SMP race :-(*/ iPTE_LW(p, pte, ptr); - } + else + uasm_i_nop(p); } /* Make PTE writable, update software status bits as well, then store @@ -1531,15 +1613,19 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, */ static void __cpuinit build_pte_modifiable(u32 **p, struct uasm_reloc **r, - unsigned int pte, unsigned int ptr, enum label_id lid) + unsigned int pte, unsigned int ptr, int scratch, + enum label_id lid) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); uasm_i_nop(p); } else { - uasm_i_andi(p, pte, pte, _PAGE_WRITE); - uasm_il_beqz(p, r, pte, lid); - iPTE_LW(p, pte, ptr); + int t = scratch >= 0 ? scratch : pte; + uasm_i_andi(p, t, pte, _PAGE_WRITE); + uasm_il_beqz(p, r, t, lid); + if (pte == t) + /* You lose the SMP race :-(*/ + iPTE_LW(p, pte, ptr); } } @@ -1619,7 +1705,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); + build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); uasm_i_nop(&p); /* load delay */ build_make_valid(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); @@ -1649,7 +1735,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); + build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); @@ -1673,13 +1759,14 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; + struct work_registers wr; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); + build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); @@ -1702,15 +1789,16 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) /* * R4000 style TLB load/store/modify handlers. */ -static void __cpuinit +static struct work_registers __cpuinit build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, - struct uasm_reloc **r, unsigned int pte, - unsigned int ptr) + struct uasm_reloc **r) { + struct work_registers wr = build_get_work_registers(p); + #ifdef CONFIG_64BIT - build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ + build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ #else - build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ + build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ #endif #ifdef CONFIG_HUGETLB_PAGE @@ -1719,21 +1807,22 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); + build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); #endif - UASM_i_MFC0(p, pte, C0_BADVADDR); - UASM_i_LW(p, ptr, 0, ptr); - UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); - uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); - UASM_i_ADDU(p, ptr, ptr, pte); + UASM_i_MFC0(p, wr.r1, C0_BADVADDR); + UASM_i_LW(p, wr.r2, 0, wr.r2); + UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); + uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); + UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); #ifdef CONFIG_SMP uasm_l_smp_pgtable_change(l, *p); #endif - iPTE_LW(p, pte, ptr); /* get even pte */ + iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ if (!m4kc_tlbp_war()) build_tlb_probe_entry(p); + return wr; } static void __cpuinit @@ -1746,6 +1835,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, build_update_entries(p, tmp, ptr); build_tlb_write_entry(p, l, r, tlb_indexed); uasm_l_leave(l, *p); + build_restore_work_registers(p); uasm_i_eret(p); /* return from trap */ #ifdef CONFIG_64BIT @@ -1758,6 +1848,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) u32 *p = handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; + struct work_registers wr; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); @@ -1777,8 +1868,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void) /* No need for uasm_i_nop */ } - build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); + wr = build_r4000_tlbchange_handler_head(&p, &l, &r); + build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); @@ -1788,44 +1879,43 @@ static void __cpuinit build_r4000_tlb_load_handler(void) * have triggered it. Skip the expensive test.. */ if (use_bbit_insns()) { - uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID), + uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), label_tlbl_goaround1); } else { - uasm_i_andi(&p, K0, K0, _PAGE_VALID); - uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); + uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); + uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); } uasm_i_nop(&p); uasm_i_tlbr(&p); /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { - uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8); + uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); } else { - uasm_i_andi(&p, K0, K1, sizeof(pte_t)); - uasm_i_beqz(&p, K0, 8); + uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); + uasm_i_beqz(&p, wr.r3, 8); } - - UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ - UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ + /* load it in the delay slot*/ + UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); + /* load it if ptr is odd */ + UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); /* - * If the entryLo (now in K0) is valid (bit 1), RI or + * If the entryLo (now in wr.r3) is valid (bit 1), RI or * XI must have triggered it. */ if (use_bbit_insns()) { - uasm_il_bbit1(&p, &r, K0, 1, label_nopage_tlbl); - /* Reload the PTE value */ - iPTE_LW(&p, K0, K1); + uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); + uasm_i_nop(&p); uasm_l_tlbl_goaround1(&l, p); } else { - uasm_i_andi(&p, K0, K0, 2); - uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); - uasm_l_tlbl_goaround1(&l, p); - /* Reload the PTE value */ - iPTE_LW(&p, K0, K1); + uasm_i_andi(&p, wr.r3, wr.r3, 2); + uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); + uasm_i_nop(&p); } + uasm_l_tlbl_goaround1(&l, p); } - build_make_valid(&p, &r, K0, K1); - build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); + build_make_valid(&p, &r, wr.r1, wr.r2); + build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* @@ -1833,8 +1923,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void) * spots a huge page. */ uasm_l_tlb_huge_update(&l, p); - iPTE_LW(&p, K0, K1); - build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); + iPTE_LW(&p, wr.r1, wr.r2); + build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); build_tlb_probe_entry(&p); if (kernel_uses_smartmips_rixi) { @@ -1843,50 +1933,51 @@ static void __cpuinit build_r4000_tlb_load_handler(void) * have triggered it. Skip the expensive test.. */ if (use_bbit_insns()) { - uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID), + uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), label_tlbl_goaround2); } else { - uasm_i_andi(&p, K0, K0, _PAGE_VALID); - uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); + uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); + uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); } uasm_i_nop(&p); uasm_i_tlbr(&p); /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { - uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8); + uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); } else { - uasm_i_andi(&p, K0, K1, sizeof(pte_t)); - uasm_i_beqz(&p, K0, 8); + uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); + uasm_i_beqz(&p, wr.r3, 8); } - UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ - UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ + /* load it in the delay slot*/ + UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); + /* load it if ptr is odd */ + UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); /* - * If the entryLo (now in K0) is valid (bit 1), RI or + * If the entryLo (now in wr.r3) is valid (bit 1), RI or * XI must have triggered it. */ if (use_bbit_insns()) { - uasm_il_bbit0(&p, &r, K0, 1, label_tlbl_goaround2); + uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); } else { - uasm_i_andi(&p, K0, K0, 2); - uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); + uasm_i_andi(&p, wr.r3, wr.r3, 2); + uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); } - /* Reload the PTE value */ - iPTE_LW(&p, K0, K1); /* * We clobbered C0_PAGEMASK, restore it. On the other branch * it is restored in build_huge_tlb_write_entry. */ - build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0); + build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); uasm_l_tlbl_goaround2(&l, p); } - uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); - build_huge_handler_tail(&p, &r, &l, K0, K1); + uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbl(&l, p); + build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); @@ -1905,17 +1996,18 @@ static void __cpuinit build_r4000_tlb_store_handler(void) u32 *p = handle_tlbs; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; + struct work_registers wr; memset(handle_tlbs, 0, sizeof(handle_tlbs)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); - build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); + wr = build_r4000_tlbchange_handler_head(&p, &l, &r); + build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); - build_make_write(&p, &r, K0, K1); - build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); + build_make_write(&p, &r, wr.r1, wr.r2); + build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* @@ -1923,15 +2015,16 @@ static void __cpuinit build_r4000_tlb_store_handler(void) * build_r4000_tlbchange_handler_head spots a huge page. */ uasm_l_tlb_huge_update(&l, p); - iPTE_LW(&p, K0, K1); - build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); + iPTE_LW(&p, wr.r1, wr.r2); + build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); build_tlb_probe_entry(&p); - uasm_i_ori(&p, K0, K0, + uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, K0, K1); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbs(&l, p); + build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -1950,18 +2043,19 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; + struct work_registers wr; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); - build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); + wr = build_r4000_tlbchange_handler_head(&p, &l, &r); + build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ - build_make_write(&p, &r, K0, K1); - build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); + build_make_write(&p, &r, wr.r1, wr.r2); + build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_HUGETLB_PAGE /* @@ -1969,15 +2063,16 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) * build_r4000_tlbchange_handler_head spots a huge page. */ uasm_l_tlb_huge_update(&l, p); - iPTE_LW(&p, K0, K1); - build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); + iPTE_LW(&p, wr.r1, wr.r2); + build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); build_tlb_probe_entry(&p); - uasm_i_ori(&p, K0, K0, + uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, K0, K1); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); #endif uasm_l_nopage_tlbm(&l, p); + build_restore_work_registers(&p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -2036,6 +2131,7 @@ void __cpuinit build_tlb_refill_handler(void) default: if (!run_once) { + scratch_reg = allocate_kscratch(); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT build_r4000_setup_pgd(); #endif -- cgit v1.2.3