summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 10:51:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 10:51:43 -0700
commite31a94ed371c70855eb30b77c490d6d85dd4da26 (patch)
tree58d9f1a75a22319f97731db8d9ac07b78a8d8aaf /arch/mips/kernel
parent9d9ad4b51d2b29b5bbeb4011f5e76f7538119cf9 (diff)
parentfcbd3b4b92efe29b59df16b910138cf43683be88 (diff)
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (45 commits) [MIPS] Pb1200/DBAu1200: move platform code to its proper place [MIPS] Fix handling of trap and breakpoint instructions [MIPS] Pb1200: do register SMC 91C111 [MIPS] DBAu1200: fix bad SMC 91C111 resource size [NET] Kconfig: Rename MIKROTIK_RB500 -> MIKROTIK_RB532 [MIPS] IP27: Fix build bug due to missing include [MIPS] Fix some sparse warnings on traps.c and irq-msc01.c [MIPS] cevt-gt641xx: Kill unnecessary include [MIPS] DS1287: Add clockevent driver [MIPS] add DECstation I/O ASIC clocksource [MIPS] rbtx4938: minor cleanup [MIPS] Alchemy: kill unused PCI_IRQ_TABLE_LOOKUP macro [MIPS] rbtx4938: misc cleanups [MIPS] jmr3927: use generic txx9 gpio [MIPS] rbhma4500: use generic txx9 gpio [MIPS] generic txx9 gpio support [MIPS] make fallback gpio.h gpiolib-friendly [MIPS] unexport null_perf_irq() and make it static [MIPS] unexport rtc_mips_set_time() [MIPS] unexport copy_from_user_page() ...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile8
-rw-r--r--arch/mips/kernel/cevt-ds1287.c129
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c21
-rw-r--r--arch/mips/kernel/csrc-ioasic.c65
-rw-r--r--arch/mips/kernel/gpio_txx9.c87
-rw-r--r--arch/mips/kernel/irq-gic.c295
-rw-r--r--arch/mips/kernel/irq-msc01.c10
-rw-r--r--arch/mips/kernel/signal-common.h2
-rw-r--r--arch/mips/kernel/smp-cmp.c265
-rw-r--r--arch/mips/kernel/smp-mt.c143
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/smtc.c11
-rw-r--r--arch/mips/kernel/spram.c221
-rw-r--r--arch/mips/kernel/sync-r4k.c159
-rw-r--r--arch/mips/kernel/time.c5
-rw-r--r--arch/mips/kernel/traps.c213
17 files changed, 1421 insertions, 219 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 6fcdb6fda2e2..45545be3eb86 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -10,12 +10,15 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
+obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
irix5sys.o sysirix.o
@@ -50,6 +53,8 @@ obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
+obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
+obj-$(CONFIG_CPU_MIPSR2) += spram.o
obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
@@ -62,6 +67,7 @@ obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
+obj-$(CONFIG_IRQ_GIC) += irq-gic.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
@@ -77,6 +83,8 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o
obj-$(CONFIG_I8253) += i8253.o
+obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
+
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
new file mode 100644
index 000000000000..df4acb68bfb5
--- /dev/null
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -0,0 +1,129 @@
+/*
+ * DS1287 clockevent driver
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+
+#include <asm/time.h>
+
+int ds1287_timer_state(void)
+{
+ return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
+}
+
+int ds1287_set_base_clock(unsigned int hz)
+{
+ u8 rate;
+
+ switch (hz) {
+ case 128:
+ rate = 0x9;
+ break;
+ case 256:
+ rate = 0x8;
+ break;
+ case 1024:
+ rate = 0x6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
+
+ return 0;
+}
+
+static int ds1287_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ return -EINVAL;
+}
+
+static void ds1287_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ val |= RTC_PIE;
+ break;
+ default:
+ val &= ~RTC_PIE;
+ break;
+ }
+
+ CMOS_WRITE(val, RTC_REG_B);
+
+ spin_unlock(&rtc_lock);
+}
+
+static void ds1287_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device ds1287_clockevent = {
+ .name = "ds1287",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .cpumask = CPU_MASK_CPU0,
+ .set_next_event = ds1287_set_next_event,
+ .set_mode = ds1287_set_mode,
+ .event_handler = ds1287_event_handler,
+};
+
+static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &ds1287_clockevent;
+
+ /* Ack the RTC interrupt. */
+ CMOS_READ(RTC_REG_C);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction ds1287_irqaction = {
+ .handler = ds1287_interrupt,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "ds1287",
+};
+
+int __init ds1287_clockevent_init(int irq)
+{
+ struct clock_event_device *cd;
+
+ cd = &ds1287_clockevent;
+ cd->rating = 100;
+ cd->irq = irq;
+ clockevent_set_clock(cd, 32768);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ clockevents_register_device(&ds1287_clockevent);
+
+ return setup_irq(irq, &ds1287_irqaction);
+}
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index c36772631fe0..6e2f58520afb 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -25,8 +25,6 @@
#include <asm/gt64120.h>
#include <asm/time.h>
-#include <irq.h>
-
static DEFINE_SPINLOCK(gt641xx_timer_lock);
static unsigned int gt641xx_base_clock;
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 89c3304cb93c..335a6ae3d594 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -169,6 +169,7 @@ static inline void check_wait(void)
case CPU_24K:
case CPU_34K:
+ case CPU_1004K:
cpu_wait = r4k_wait;
if (read_c0_config7() & MIPS_CONF7_WII)
cpu_wait = r4k_wait_irqoff;
@@ -675,6 +676,12 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
return;
}
+#ifdef CONFIG_CPU_MIPSR2
+extern void spram_config(void);
+#else
+static inline void spram_config(void) {}
+#endif
+
static inline void cpu_probe_mips(struct cpuinfo_mips *c)
{
decode_configs(c);
@@ -711,7 +718,12 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
case PRID_IMP_74K:
c->cputype = CPU_74K;
break;
+ case PRID_IMP_1004K:
+ c->cputype = CPU_1004K;
+ break;
}
+
+ spram_config();
}
static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
@@ -778,7 +790,7 @@ static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
}
}
-static inline void cpu_probe_philips(struct cpuinfo_mips *c)
+static inline void cpu_probe_nxp(struct cpuinfo_mips *c)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
@@ -787,7 +799,7 @@ static inline void cpu_probe_philips(struct cpuinfo_mips *c)
c->isa_level = MIPS_CPU_ISA_M32R1;
break;
default:
- panic("Unknown Philips Core!"); /* REVISIT: die? */
+ panic("Unknown NXP Core!"); /* REVISIT: die? */
break;
}
}
@@ -876,6 +888,7 @@ static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
case CPU_24K: name = "MIPS 24K"; break;
case CPU_25KF: name = "MIPS 25Kf"; break;
case CPU_34K: name = "MIPS 34K"; break;
+ case CPU_1004K: name = "MIPS 1004K"; break;
case CPU_74K: name = "MIPS 74K"; break;
case CPU_VR4111: name = "NEC VR4111"; break;
case CPU_VR4121: name = "NEC VR4121"; break;
@@ -925,8 +938,8 @@ __cpuinit void cpu_probe(void)
case PRID_COMP_SANDCRAFT:
cpu_probe_sandcraft(c);
break;
- case PRID_COMP_PHILIPS:
- cpu_probe_philips(c);
+ case PRID_COMP_NXP:
+ cpu_probe_nxp(c);
break;
default:
c->cputype = CPU_UNKNOWN;
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
new file mode 100644
index 000000000000..1d5f63cf8997
--- /dev/null
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -0,0 +1,65 @@
+/*
+ * DEC I/O ASIC's counter clocksource
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/ds1287.h>
+#include <asm/time.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+
+static cycle_t dec_ioasic_hpt_read(void)
+{
+ return ioasic_read(IO_REG_FCTR);
+}
+
+static struct clocksource clocksource_dec = {
+ .name = "dec-ioasic",
+ .read = dec_ioasic_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init dec_ioasic_clocksource_init(void)
+{
+ unsigned int freq;
+ u32 start, end;
+ int i = HZ / 10;
+
+
+ while (!ds1287_timer_state())
+ ;
+
+ start = dec_ioasic_hpt_read();
+
+ while (i--)
+ while (!ds1287_timer_state())
+ ;
+
+ end = dec_ioasic_hpt_read();
+
+ freq = (end - start) * 10;
+ printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
+
+ clocksource_dec.rating = 200 + freq / 10000000;
+ clocksource_set_clock(&clocksource_dec, freq);
+
+ clocksource_register(&clocksource_dec);
+}
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
new file mode 100644
index 000000000000..b1436a857998
--- /dev/null
+++ b/arch/mips/kernel/gpio_txx9.c
@@ -0,0 +1,87 @@
+/*
+ * A gpio chip driver for TXx9 SoCs
+ *
+ * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/txx9pio.h>
+
+static DEFINE_SPINLOCK(txx9_gpio_lock);
+
+static struct txx9_pio_reg __iomem *txx9_pioptr;
+
+static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return __raw_readl(&txx9_pioptr->din) & (1 << offset);
+}
+
+static void txx9_gpio_set_raw(unsigned int offset, int value)
+{
+ u32 val;
+ val = __raw_readl(&txx9_pioptr->dout);
+ if (value)
+ val |= 1 << offset;
+ else
+ val &= ~(1 << offset);
+ __raw_writel(val, &txx9_pioptr->dout);
+}
+
+static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+}
+
+static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
+{
+ spin_lock_irq(&txx9_gpio_lock);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irq(&txx9_gpio_lock);
+ return 0;
+}
+
+static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ spin_lock_irq(&txx9_gpio_lock);
+ txx9_gpio_set_raw(offset, value);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irq(&txx9_gpio_lock);
+ return 0;
+}
+
+static struct gpio_chip txx9_gpio_chip = {
+ .get = txx9_gpio_get,
+ .set = txx9_gpio_set,
+ .direction_input = txx9_gpio_dir_in,
+ .direction_output = txx9_gpio_dir_out,
+ .label = "TXx9",
+};
+
+int __init txx9_gpio_init(unsigned long baseaddr,
+ unsigned int base, unsigned int num)
+{
+ txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
+ if (!txx9_pioptr)
+ return -ENODEV;
+ txx9_gpio_chip.base = base;
+ txx9_gpio_chip.ngpio = num;
+ return gpiochip_add(&txx9_gpio_chip);
+}
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
new file mode 100644
index 000000000000..f0a4bb19e096
--- /dev/null
+++ b/arch/mips/kernel/irq-gic.c
@@ -0,0 +1,295 @@
+#undef DEBUG
+
+#include <linux/bitmap.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/gic.h>
+#include <asm/gcmpregs.h>
+#include <asm/mips-boards/maltaint.h>
+#include <asm/irq.h>
+#include <linux/hardirq.h>
+#include <asm-generic/bitops/find.h>
+
+
+static unsigned long _gic_base;
+static unsigned int _irqbase, _mapsize, numvpes, numintrs;
+static struct gic_intr_map *_intrmap;
+
+static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
+static struct gic_pending_regs pending_regs[NR_CPUS];
+static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
+
+#define gic_wedgeb2bok 0 /*
+ * Can GIC handle b2b writes to wedge register?
+ */
+#if gic_wedgeb2bok == 0
+static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
+#endif
+
+void gic_send_ipi(unsigned int intr)
+{
+#if gic_wedgeb2bok == 0
+ unsigned long flags;
+#endif
+ pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
+ read_c0_status());
+ if (!gic_wedgeb2bok)
+ spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
+ GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
+ if (!gic_wedgeb2bok) {
+ (void) GIC_REG(SHARED, GIC_SH_CONFIG);
+ spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
+ }
+}
+
+/* This is Malta specific and needs to be exported */
+static void vpe_local_setup(unsigned int numvpes)
+{
+ int i;
+ unsigned long timer_interrupt = 5, perf_interrupt = 5;
+ unsigned int vpe_ctl;
+
+ /*
+ * Setup the default performance counter timer interrupts
+ * for all VPEs
+ */
+ for (i = 0; i < numvpes; i++) {
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+
+ /* Are Interrupts locally routable? */
+ GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
+ if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+ GIC_MAP_TO_PIN_MSK | timer_interrupt);
+
+ if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+ GIC_MAP_TO_PIN_MSK | perf_interrupt);
+ }
+}
+
+unsigned int gic_get_int(void)
+{
+ unsigned int i;
+ unsigned long *pending, *intrmask, *pcpu_mask;
+ unsigned long *pending_abs, *intrmask_abs;
+
+ /* Get per-cpu bitmaps */
+ pending = pending_regs[smp_processor_id()].pending;
+ intrmask = intrmask_regs[smp_processor_id()].intrmask;
+ pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
+
+ pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+ GIC_SH_PEND_31_0_OFS);
+ intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+ GIC_SH_MASK_31_0_OFS);
+
+ for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
+ GICREAD(*pending_abs, pending[i]);
+ GICREAD(*intrmask_abs, intrmask[i]);
+ pending_abs++;
+ intrmask_abs++;
+ }
+
+ bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
+ bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
+
+ i = find_first_bit(pending, GIC_NUM_INTRS);
+
+ pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
+
+ return i;
+}
+
+static unsigned int gic_irq_startup(unsigned int irq)
+{
+ pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+ irq -= _irqbase;
+ /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+ GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
+ 1 << (irq % 32));
+ return 0;
+}
+
+static void gic_irq_ack(unsigned int irq)
+{
+#if gic_wedgeb2bok == 0
+ unsigned long flags;
+#endif
+ pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+ irq -= _irqbase;
+ GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
+ 1 << (irq % 32));
+
+ if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
+ if (!gic_wedgeb2bok)
+ spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
+ GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
+ if (!gic_wedgeb2bok) {
+ (void) GIC_REG(SHARED, GIC_SH_CONFIG);
+ spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
+ }
+ }
+}
+
+static void gic_mask_irq(unsigned int irq)
+{
+ pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+ irq -= _irqbase;
+ /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+ GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
+ 1 << (irq % 32));
+}
+
+static void gic_unmask_irq(unsigned int irq)
+{
+ pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+ irq -= _irqbase;
+ /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+ GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
+ 1 << (irq % 32));
+}
+
+#ifdef CONFIG_SMP
+
+static DEFINE_SPINLOCK(gic_lock);
+
+static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+ cpumask_t tmp = CPU_MASK_NONE;
+ unsigned long flags;
+ int i;
+
+ pr_debug(KERN_DEBUG "%s called\n", __func__);
+ irq -= _irqbase;
+
+ cpus_and(tmp, cpumask, cpu_online_map);
+ if (cpus_empty(tmp))
+ return;
+
+ /* Assumption : cpumask refers to a single CPU */
+ spin_lock_irqsave(&gic_lock, flags);
+ for (;;) {
+ /* Re-route this IRQ */
+ GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
+
+ /*
+ * FIXME: assumption that _intrmap is ordered and has no holes
+ */
+
+ /* Update the intr_map */
+ _intrmap[irq].cpunum = first_cpu(tmp);
+
+ /* Update the pcpu_masks */
+ for (i = 0; i < NR_CPUS; i++)
+ clear_bit(irq, pcpu_masks[i].pcpu_mask);
+ set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+
+ }
+ irq_desc[irq].affinity = cpumask;
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+}
+#endif
+
+static struct irq_chip gic_irq_controller = {
+ .name = "MIPS GIC",
+ .startup = gic_irq_startup,
+ .ack = gic_irq_ack,
+ .mask = gic_mask_irq,
+ .mask_ack = gic_mask_irq,
+ .unmask = gic_unmask_irq,
+ .eoi = gic_unmask_irq,
+#ifdef CONFIG_SMP
+ .set_affinity = gic_set_affinity,
+#endif
+};
+
+static void __init setup_intr(unsigned int intr, unsigned int cpu,
+ unsigned int pin, unsigned int polarity, unsigned int trigtype)
+{
+ /* Setup Intr to Pin mapping */
+ if (pin & GIC_MAP_TO_NMI_MSK) {
+ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
+ /* FIXME: hack to route NMI to all cpu's */
+ for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
+ GICWRITE(GIC_REG_ADDR(SHARED,
+ GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
+ 0xffffffff);
+ }
+ } else {
+ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
+ GIC_MAP_TO_PIN_MSK | pin);
+ /* Setup Intr to CPU mapping */
+ GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
+ }
+
+ /* Setup Intr Polarity */
+ GIC_SET_POLARITY(intr, polarity);
+
+ /* Setup Intr Trigger Type */
+ GIC_SET_TRIGGER(intr, trigtype);
+
+ /* Init Intr Masks */
+ GIC_SET_INTR_MASK(intr, 0);
+}
+
+static void __init gic_basic_init(void)
+{
+ unsigned int i, cpu;
+
+ /* Setup defaults */
+ for (i = 0; i < GIC_NUM_INTRS; i++) {
+ GIC_SET_POLARITY(i, GIC_POL_POS);
+ GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
+ GIC_SET_INTR_MASK(i, 0);
+ }
+
+ /* Setup specifics */
+ for (i = 0; i < _mapsize; i++) {
+ cpu = _intrmap[i].cpunum;
+ if (cpu == X)
+ continue;
+
+ setup_intr(_intrmap[i].intrnum,
+ _intrmap[i].cpunum,
+ _intrmap[i].pin,
+ _intrmap[i].polarity,
+ _intrmap[i].trigtype);
+ /* Initialise per-cpu Interrupt software masks */
+ if (_intrmap[i].ipiflag)
+ set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
+ }
+
+ vpe_local_setup(numvpes);
+
+ for (i = _irqbase; i < (_irqbase + numintrs); i++)
+ set_irq_chip(i, &gic_irq_controller);
+}
+
+void __init gic_init(unsigned long gic_base_addr,
+ unsigned long gic_addrspace_size,
+ struct gic_intr_map *intr_map, unsigned int intr_map_size,
+ unsigned int irqbase)
+{
+ unsigned int gicconfig;
+
+ _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
+ gic_addrspace_size);
+ _irqbase = irqbase;
+ _intrmap = intr_map;
+ _mapsize = intr_map_size;
+
+ GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
+ numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
+ GIC_SH_CONFIG_NUMINTRS_SHF;
+ numintrs = ((numintrs + 1) * 8);
+
+ numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
+ GIC_SH_CONFIG_NUMVPES_SHF;
+
+ pr_debug("%s called\n", __func__);
+
+ gic_basic_init();
+}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 4edc7e451d91..963c16d266ab 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -17,6 +17,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/msc01_ic.h>
+#include <asm/traps.h>
static unsigned long _icctrl_msc;
#define MSC01_IC_REG_BASE _icctrl_msc
@@ -98,14 +99,13 @@ void ll_msc_irq(void)
}
}
-void
-msc_bind_eic_interrupt(unsigned int irq, unsigned int set)
+static void msc_bind_eic_interrupt(int irq, int set)
{
MSCIC_WRITE(MSC01_IC_RAMW,
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
}
-struct irq_chip msc_levelirq_type = {
+static struct irq_chip msc_levelirq_type = {
.name = "SOC-it-Level",
.ack = level_mask_and_ack_msc_irq,
.mask = mask_msc_irq,
@@ -115,7 +115,7 @@ struct irq_chip msc_levelirq_type = {
.end = end_msc_irq,
};
-struct irq_chip msc_edgeirq_type = {
+static struct irq_chip msc_edgeirq_type = {
.name = "SOC-it-Edge",
.ack = edge_mask_and_ack_msc_irq,
.mask = mask_msc_irq,
@@ -128,8 +128,6 @@ struct irq_chip msc_edgeirq_type = {
void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
{
- extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
-
_icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
/* Reset interrupt controller - initialises all registers to 0 */
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index c0faabd52010..6c8e8c4246f7 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -14,7 +14,7 @@
/* #define DEBUG_SIG */
#ifdef DEBUG_SIG
-# define DEBUGP(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ##args)
+# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
#else
# define DEBUGP(fmt, args...)
#endif
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
new file mode 100644
index 000000000000..ca476c4f62a5
--- /dev/null
+++ b/arch/mips/kernel/smp-cmp.c
@@ -0,0 +1,265 @@
+/*
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Chris Dearman (chris@mips.com)
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+
+/*
+ * Crude manipulation of the CPU masks to control which
+ * which CPU's are brought online during initialisation
+ *
+ * Beware... this needs to be called after CPU discovery
+ * but before CPU bringup
+ */
+static int __init allowcpus(char *str)
+{
+ cpumask_t cpu_allow_map;
+ char buf[256];
+ int len;
+
+ cpus_clear(cpu_allow_map);
+ if (cpulist_parse(str, cpu_allow_map) == 0) {
+ cpu_set(0, cpu_allow_map);
+ cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
+ len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map);
+ buf[len] = '\0';
+ pr_debug("Allowable CPUs: %s\n", buf);
+ return 1;
+ } else
+ return 0;
+}
+__setup("allowcpus=", allowcpus);
+
+static void ipi_call_function(unsigned int cpu)
+{
+ unsigned int action = 0;
+
+ pr_debug("CPU%d: %s cpu %d status %08x\n",
+ smp_processor_id(), __func__, cpu, read_c0_status());
+
+ switch (cpu) {
+ case 0:
+ action = GIC_IPI_EXT_INTR_CALLFNC_VPE0;
+ break;
+ case 1:
+ action = GIC_IPI_EXT_INTR_CALLFNC_VPE1;
+ break;
+ case 2:
+ action = GIC_IPI_EXT_INTR_CALLFNC_VPE2;
+ break;
+ case 3:
+ action = GIC_IPI_EXT_INTR_CALLFNC_VPE3;
+ break;
+ }
+ gic_send_ipi(action);
+}
+
+
+static void ipi_resched(unsigned int cpu)
+{
+ unsigned int action = 0;
+
+ pr_debug("CPU%d: %s cpu %d status %08x\n",
+ smp_processor_id(), __func__, cpu, read_c0_status());
+
+ switch (cpu) {
+ case 0:
+ action = GIC_IPI_EXT_INTR_RESCHED_VPE0;
+ break;
+ case 1:
+ action = GIC_IPI_EXT_INTR_RESCHED_VPE1;
+ break;
+ case 2:
+ action = GIC_IPI_EXT_INTR_RESCHED_VPE2;
+ break;
+ case 3:
+ action = GIC_IPI_EXT_INTR_RESCHED_VPE3;
+ break;
+ }
+ gic_send_ipi(action);
+}
+
+/*
+ * FIXME: This isn't restricted to CMP
+ * The SMVP kernel could use GIC interrupts if available
+ */
+void cmp_send_ipi_single(int cpu, unsigned int action)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ switch (action) {
+ case SMP_CALL_FUNCTION:
+ ipi_call_function(cpu);
+ break;
+
+ case SMP_RESCHEDULE_YOURSELF:
+ ipi_resched(cpu);
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu_mask(i, mask)
+ cmp_send_ipi_single(i, action);
+}
+
+static void cmp_init_secondary(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ /* Assume GIC is present */
+ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
+ STATUSF_IP7);
+
+ /* Enable per-cpu interrupts: platform specific */
+
+ c->core = (read_c0_ebase() >> 1) & 0xff;
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+ c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
+#endif
+#ifdef CONFIG_MIPS_MT_SMTC
+ c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
+#endif
+}
+
+static void cmp_smp_finish(void)
+{
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+ /* CDFIXME: remove this? */
+ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpu_set(smp_processor_id(), mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+static void cmp_cpus_done(void)
+{
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it running
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ */
+static void cmp_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct thread_info *gp = task_thread_info(idle);
+ unsigned long sp = __KSTK_TOS(idle);
+ unsigned long pc = (unsigned long)&smp_bootstrap;
+ unsigned long a0 = 0;
+
+ pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
+ __func__, cpu);
+
+#if 0
+ /* Needed? */
+ flush_icache_range((unsigned long)gp,
+ (unsigned long)(gp + sizeof(struct thread_info)));
+#endif
+
+ amon_cpu_start(cpu, pc, sp, gp, a0);
+}
+
+/*
+ * Common setup before any secondaries are started
+ */
+void __init cmp_smp_setup(void)
+{
+ int i;
+ int ncpu = 0;
+
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpu_set(0, mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ for (i = 1; i < NR_CPUS; i++) {
+ if (amon_cpu_avail(i)) {
+ cpu_set(i, phys_cpu_present_map);
+ __cpu_number_map[i] = ++ncpu;
+ __cpu_logical_map[ncpu] = i;
+ }
+ }
+
+ if (cpu_has_mipsmt) {
+ unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
+
+ nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+ smp_num_siblings = nvpe;
+ }
+ pr_info("Detected %i available secondary CPU(s)\n", ncpu);
+}
+
+void __init cmp_prepare_cpus(unsigned int max_cpus)
+{
+ pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
+ smp_processor_id(), __func__, max_cpus);
+
+ /*
+ * FIXME: some of these options are per-system, some per-core and
+ * some per-cpu
+ */
+ mips_mt_set_cpuoptions();
+}
+
+struct plat_smp_ops cmp_smp_ops = {
+ .send_ipi_single = cmp_send_ipi_single,
+ .send_ipi_mask = cmp_send_ipi_mask,
+ .init_secondary = cmp_init_secondary,
+ .smp_finish = cmp_smp_finish,
+ .cpus_done = cmp_cpus_done,
+ .boot_secondary = cmp_boot_secondary,
+ .smp_setup = cmp_smp_setup,
+ .prepare_cpus = cmp_prepare_cpus,
+};
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 89e6f6aa5166..87a1816c1f45 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -36,110 +36,7 @@
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
-#define MIPS_CPU_IPI_RESCHED_IRQ 0
-#define MIPS_CPU_IPI_CALL_IRQ 1
-
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
-
-#if 0
-static void dump_mtregisters(int vpe, int tc)
-{
- printk("vpe %d tc %d\n", vpe, tc);
-
- settc(tc);
-
- printk(" c0 status 0x%lx\n", read_vpe_c0_status());
- printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
- printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0());
- printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus());
- printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart());
- printk(" tcbind 0x%lx\n", read_tc_c0_tcbind());
- printk(" tchalt 0x%lx\n", read_tc_c0_tchalt());
-}
-#endif
-
-void __init sanitize_tlb_entries(void)
-{
- int i, tlbsiz;
- unsigned long mvpconf0, ncpu;
-
- if (!cpu_has_mipsmt)
- return;
-
- /* Enable VPC */
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- back_to_back_c0_hazard();
-
- /* Disable TLB sharing */
- clear_c0_mvpcontrol(MVPCONTROL_STLB);
-
- mvpconf0 = read_c0_mvpconf0();
-
- printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0,
- (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT,
- (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT);
-
- tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT;
- ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
-
- printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu);
-
- if (tlbsiz > 0) {
- /* share them out across the vpe's */
- tlbsiz /= ncpu;
-
- printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz);
-
- for (i = 0; i < ncpu; i++) {
- settc(i);
-
- if (i == 0)
- write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25));
- else
- write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) |
- (tlbsiz << 25));
- }
- }
-
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
-}
-
-static void ipi_resched_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ipi_call_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
- smp_call_function_interrupt();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_DISABLED|IRQF_PERCPU,
- .name = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_DISABLED|IRQF_PERCPU,
- .name = "IPI_call"
-};
-
-static void __init smp_copy_vpe_config(void)
+static void __init smvp_copy_vpe_config(void)
{
write_vpe_c0_status(
(read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
@@ -156,7 +53,7 @@ static void __init smp_copy_vpe_config(void)
write_vpe_c0_count(read_c0_count());
}
-static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
+static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
unsigned int ncpu)
{
if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
@@ -182,12 +79,12 @@ static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
if (tc != 0)
- smp_copy_vpe_config();
+ smvp_copy_vpe_config();
return ncpu;
}
-static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
+static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
{
unsigned long tmp;
@@ -254,15 +151,20 @@ static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
static void __cpuinit vsmp_init_secondary(void)
{
- /* Enable per-cpu interrupts */
+ extern int gic_present;
/* This is Malta specific: IPI,performance and timer inetrrupts */
- write_c0_status((read_c0_status() & ~ST0_IM ) |
- (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
+ if (gic_present)
+ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+ STATUSF_IP6 | STATUSF_IP7);
+ else
+ change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
+ STATUSF_IP6 | STATUSF_IP7);
}
static void __cpuinit vsmp_smp_finish(void)
{
+ /* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
@@ -323,7 +225,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
- * secondarys
+ * secondaries
*/
static void __init vsmp_smp_setup(void)
{
@@ -356,8 +258,8 @@ static void __init vsmp_smp_setup(void)
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
- smp_tc_init(tc, mvpconf0);
- ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
+ smvp_tc_init(tc, mvpconf0);
+ ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
}
/* Release config state */
@@ -371,21 +273,6 @@ static void __init vsmp_smp_setup(void)
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
mips_mt_set_cpuoptions();
-
- /* set up ipi interrupts */
- if (cpu_has_vint) {
- set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
- set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
- }
-
- cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
- cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
-
- setup_irq(cpu_ipi_resched_irq, &irq_resched);
- setup_irq(cpu_ipi_call_irq, &irq_call);
-
- set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
- set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
}
struct plat_smp_ops vsmp_smp_ops = {
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9d41dab90a80..33780cc61ce9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -35,6 +35,7 @@
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
+#include <asm/r4k-timer.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
@@ -125,6 +126,8 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_set(cpu, cpu_callin_map);
+ synchronise_count_slave();
+
cpu_idle();
}
@@ -287,6 +290,7 @@ void smp_send_stop(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
mp_ops->cpus_done();
+ synchronise_count_master();
}
/* called from main before smp_init() */
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index b42e71c71119..3e863186cd22 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -174,14 +174,6 @@ static int clock_hang_reported[NR_CPUS];
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
-/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
-
-void __init sanitize_tlb_entries(void)
-{
- printk("Deprecated sanitize_tlb_entries() invoked\n");
-}
-
-
/*
* Configure shared TLB - VPC configuration bit must be set by caller
*/
@@ -339,7 +331,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
/* In general, all TCs should have the same cpu_data indications */
memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
/* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
- if (cpu_data[0].cputype == CPU_34K)
+ if (cpu_data[0].cputype == CPU_34K ||
+ cpu_data[0].cputype == CPU_1004K)
cpu_data[cpu].options &= ~MIPS_CPU_FPU;
cpu_data[cpu].vpe_id = vpe;
cpu_data[cpu].tc_id = tc;
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
new file mode 100644
index 000000000000..6ddb507a87ef
--- /dev/null
+++ b/arch/mips/kernel/spram.c
@@ -0,0 +1,221 @@
+/*
+ * MIPS SPRAM support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (C) 2007, 2008 MIPS Technologies, Inc.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/stddef.h>
+
+#include <asm/cpu.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/system.h>
+#include <asm/r4kcache.h>
+#include <asm/hazards.h>
+
+/*
+ * These definitions are correct for the 24K/34K/74K SPRAM sample
+ * implementation. The 4KS interpreted the tags differently...
+ */
+#define SPRAM_TAG0_ENABLE 0x00000080
+#define SPRAM_TAG0_PA_MASK 0xfffff000
+#define SPRAM_TAG1_SIZE_MASK 0xfffff000
+
+#define SPRAM_TAG_STRIDE 8
+
+#define ERRCTL_SPRAM (1 << 28)
+
+/* errctl access */
+#define read_c0_errctl(x) read_c0_ecc(x)
+#define write_c0_errctl(x) write_c0_ecc(x)
+
+/*
+ * Different semantics to the set_c0_* function built by __BUILD_SET_C0
+ */
+static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
+{
+ unsigned int res;
+ res = read_c0_errctl();
+ write_c0_errctl(res | set);
+ return res;
+}
+
+static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+
+ write_c0_taglo(data);
+ ehb();
+
+ cache_op(Index_Store_Tag_I, CKSEG0|offset);
+ ehb();
+
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+
+static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
+{
+ unsigned int data;
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ cache_op(Index_Load_Tag_I, CKSEG0 | offset);
+ ehb();
+ data = read_c0_taglo();
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+
+ return data;
+}
+
+static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ write_c0_dtaglo(data);
+ ehb();
+ cache_op(Index_Store_Tag_D, CKSEG0 | offset);
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+
+static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
+{
+ unsigned int data;
+ unsigned int errctl;
+
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ cache_op(Index_Load_Tag_D, CKSEG0 | offset);
+ ehb();
+ data = read_c0_dtaglo();
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+
+ return data;
+}
+
+static __cpuinit void probe_spram(char *type,
+ unsigned int base,
+ unsigned int (*read)(unsigned int),
+ void (*write)(unsigned int, unsigned int))
+{
+ unsigned int firstsize = 0, lastsize = 0;
+ unsigned int firstpa = 0, lastpa = 0, pa = 0;
+ unsigned int offset = 0;
+ unsigned int size, tag0, tag1;
+ unsigned int enabled;
+ int i;
+
+ /*
+ * The limit is arbitrary but avoids the loop running away if
+ * the SPRAM tags are implemented differently
+ */
+
+ for (i = 0; i < 8; i++) {
+ tag0 = read(offset);
+ tag1 = read(offset+SPRAM_TAG_STRIDE);
+ pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
+ type, i, tag0, tag1);
+
+ size = tag1 & SPRAM_TAG1_SIZE_MASK;
+
+ if (size == 0)
+ break;
+
+ if (i != 0) {
+ /* tags may repeat... */
+ if ((pa == firstpa && size == firstsize) ||
+ (pa == lastpa && size == lastsize))
+ break;
+ }
+
+ /* Align base with size */
+ base = (base + size - 1) & ~(size-1);
+
+ /* reprogram the base address base address and enable */
+ tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
+ write(offset, tag0);
+
+ base += size;
+
+ /* reread the tag */
+ tag0 = read(offset);
+ pa = tag0 & SPRAM_TAG0_PA_MASK;
+ enabled = tag0 & SPRAM_TAG0_ENABLE;
+
+ if (i == 0) {
+ firstpa = pa;
+ firstsize = size;
+ }
+
+ lastpa = pa;
+ lastsize = size;
+
+ if (strcmp(type, "DSPRAM") == 0) {
+ unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
+ unsigned int v;
+#define TDAT 0x5a5aa5a5
+ vp[0] = TDAT;
+ vp[1] = ~TDAT;
+
+ mb();
+
+ v = vp[0];
+ if (v != TDAT)
+ printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+ vp, TDAT, v);
+ v = vp[1];
+ if (v != ~TDAT)
+ printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+ vp+1, ~TDAT, v);
+ }
+
+ pr_info("%s%d: PA=%08x,Size=%08x%s\n",
+ type, i, pa, size, enabled ? ",enabled" : "");
+ offset += 2 * SPRAM_TAG_STRIDE;
+ }
+}
+
+__cpuinit void spram_config(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config0;
+
+ switch (c->cputype) {
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ config0 = read_c0_config();
+ /* FIXME: addresses are Malta specific */
+ if (config0 & (1<<24)) {
+ probe_spram("ISPRAM", 0x1c000000,
+ &ispram_load_tag, &ispram_store_tag);
+ }
+ if (config0 & (1<<23))
+ probe_spram("DSPRAM", 0x1c100000,
+ &dspram_load_tag, &dspram_store_tag);
+ }
+}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
new file mode 100644
index 000000000000..9021108eb9c1
--- /dev/null
+++ b/arch/mips/kernel/sync-r4k.c
@@ -0,0 +1,159 @@
+/*
+ * Count register synchronisation.
+ *
+ * All CPUs will have their count registers synchronised to the CPU0 expirelo
+ * value. This can cause a small timewarp for CPU0. All other CPU's should
+ * not have done anything significant (but they may have had interrupts
+ * enabled briefly - prom_smp_finish() should not be responsible for enabling
+ * interrupts...)
+ *
+ * FIXME: broken for SMTC
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/r4k-timer.h>
+
+#include <asm/atomic.h>
+#include <asm/barrier.h>
+#include <asm/cpumask.h>
+#include <asm/mipsregs.h>
+
+static atomic_t __initdata count_start_flag = ATOMIC_INIT(0);
+static atomic_t __initdata count_count_start = ATOMIC_INIT(0);
+static atomic_t __initdata count_count_stop = ATOMIC_INIT(0);
+
+#define COUNTON 100
+#define NR_LOOPS 5
+
+void __init synchronise_count_master(void)
+{
+ int i;
+ unsigned long flags;
+ unsigned int initcount;
+ int nslaves;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC needs to synchronise per VPE, not per CPU
+ * ignore for now
+ */
+ return;
+#endif
+
+ pr_info("Checking COUNT synchronization across %u CPUs: ",
+ num_online_cpus());
+
+ local_irq_save(flags);
+
+ /*
+ * Notify the slaves that it's time to start
+ */
+ atomic_set(&count_start_flag, 1);
+ smp_wmb();
+
+ /* Count will be initialised to expirelo for all CPU's */
+ initcount = expirelo;
+
+ /*
+ * We loop a few times to get a primed instruction cache,
+ * then the last pass is more or less synchronised and
+ * the master and slaves each set their cycle counters to a known
+ * value all at once. This reduces the chance of having random offsets
+ * between the processors, and guarantees that the maximum
+ * delay between the cycle counters is never bigger than
+ * the latency of information-passing (cachelines) between
+ * two CPUs.
+ */
+
+ nslaves = num_online_cpus()-1;
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= ncpus' */
+ while (atomic_read(&count_count_start) != nslaves)
+ mb();
+ atomic_set(&count_count_stop, 0);
+ smp_wmb();
+
+ /* this lets the slaves write their count register */
+ atomic_inc(&count_count_start);
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+ /*
+ * Wait for all slaves to leave the synchronization point:
+ */
+ while (atomic_read(&count_count_stop) != nslaves)
+ mb();
+ atomic_set(&count_count_start, 0);
+ smp_wmb();
+ atomic_inc(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+
+ local_irq_restore(flags);
+
+ /*
+ * i386 code reported the skew here, but the
+ * count registers were almost certainly out of sync
+ * so no point in alarming people
+ */
+ printk("done.\n");
+}
+
+void __init synchronise_count_slave(void)
+{
+ int i;
+ unsigned long flags;
+ unsigned int initcount;
+ int ncpus;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC needs to synchronise per VPE, not per CPU
+ * ignore for now
+ */
+ return;
+#endif
+
+ local_irq_save(flags);
+
+ /*
+ * Not every cpu is online at the time this gets called,
+ * so we first wait for the master to say everyone is ready
+ */
+
+ while (!atomic_read(&count_start_flag))
+ mb();
+
+ /* Count will be initialised to expirelo for all CPU's */
+ initcount = expirelo;
+
+ ncpus = num_online_cpus();
+ for (i = 0; i < NR_LOOPS; i++) {
+ atomic_inc(&count_count_start);
+ while (atomic_read(&count_count_start) != ncpus)
+ mb();
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+ atomic_inc(&count_count_stop);
+ while (atomic_read(&count_count_stop) != ncpus)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+
+ local_irq_restore(flags);
+}
+#undef NR_LOOPS
+#endif
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index b45a7093ca2d..1f467d534642 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -38,7 +38,6 @@ int __weak rtc_mips_set_time(unsigned long sec)
{
return 0;
}
-EXPORT_SYMBOL(rtc_mips_set_time);
int __weak rtc_mips_set_mmss(unsigned long nowtime)
{
@@ -50,13 +49,11 @@ int update_persistent_clock(struct timespec now)
return rtc_mips_set_mmss(now.tv_sec);
}
-int null_perf_irq(void)
+static int null_perf_irq(void)
{
return 0;
}
-EXPORT_SYMBOL(null_perf_irq);
-
int (*perf_irq)(void) = null_perf_irq;
EXPORT_SYMBOL(perf_irq);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 984c0d0a7b4d..cb8b0e2c7954 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -22,6 +22,7 @@
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <linux/interrupt.h>
+#include <linux/ptrace.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -80,19 +81,22 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
static void show_raw_backtrace(unsigned long reg29)
{
- unsigned long *sp = (unsigned long *)reg29;
+ unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
- while (!kstack_end(sp)) {
- addr = *sp++;
- if (__kernel_text_address(addr))
- print_ip_sym(addr);
+#define IS_KVA01(a) ((((unsigned int)a) & 0xc0000000) == 0x80000000)
+ if (IS_KVA01(sp)) {
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr))
+ print_ip_sym(addr);
+ }
+ printk("\n");
}
- printk("\n");
}
#ifdef CONFIG_KALLSYMS
@@ -192,16 +196,19 @@ EXPORT_SYMBOL(dump_stack);
static void show_code(unsigned int __user *pc)
{
long i;
+ unsigned short __user *pc16 = NULL;
printk("\nCode:");
+ if ((unsigned long)pc & 1)
+ pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
- if (__get_user(insn, pc + i)) {
+ if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
printk(" (Bad address in epc)\n");
break;
}
- printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
+ printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
}
@@ -311,10 +318,21 @@ void show_regs(struct pt_regs *regs)
void show_registers(const struct pt_regs *regs)
{
+ const int field = 2 * sizeof(unsigned long);
+
__show_regs(regs);
print_modules();
- printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
- current->comm, task_pid_nr(current), current_thread_info(), current);
+ printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+ current->comm, current->pid, current_thread_info(), current,
+ field, current_thread_info()->tp_value);
+ if (cpu_has_userlocal) {
+ unsigned long tls;
+
+ tls = read_c0_userlocal();
+ if (tls != current_thread_info()->tp_value)
+ printk("*HwTLS: %0*lx\n", field, tls);
+ }
+
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
@@ -657,35 +675,24 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
force_sig_info(SIGFPE, &info, current);
}
-asmlinkage void do_bp(struct pt_regs *regs)
+static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ const char *str)
{
- unsigned int opcode, bcode;
siginfo_t info;
-
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
- goto out_sigsegv;
-
- /*
- * There is the ancient bug in the MIPS assemblers that the break
- * code starts left to bit 16 instead to bit 6 in the opcode.
- * Gas is bug-compatible, but not always, grrr...
- * We handle both cases with a simple heuristics. --macro
- */
- bcode = ((opcode >> 6) & ((1 << 20) - 1));
- if (bcode < (1 << 10))
- bcode <<= 10;
+ char b[40];
/*
- * (A short test says that IRIX 5.3 sends SIGTRAP for all break
- * insns, even for break codes that indicate arithmetic failures.
- * Weird ...)
+ * A short test says that IRIX 5.3 sends SIGTRAP for all trap
+ * insns, even for trap and break codes that indicate arithmetic
+ * failures. Weird ...
* But should we continue the brokenness??? --macro
*/
- switch (bcode) {
- case BRK_OVERFLOW << 10:
- case BRK_DIVZERO << 10:
- die_if_kernel("Break instruction in kernel code", regs);
- if (bcode == (BRK_DIVZERO << 10))
+ switch (code) {
+ case BRK_OVERFLOW:
+ case BRK_DIVZERO:
+ scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+ die_if_kernel(b, regs);
+ if (code == BRK_DIVZERO)
info.si_code = FPE_INTDIV;
else
info.si_code = FPE_INTOVF;
@@ -695,12 +702,34 @@ asmlinkage void do_bp(struct pt_regs *regs)
force_sig_info(SIGFPE, &info, current);
break;
case BRK_BUG:
- die("Kernel bug detected", regs);
+ die_if_kernel("Kernel bug detected", regs);
+ force_sig(SIGTRAP, current);
break;
default:
- die_if_kernel("Break instruction in kernel code", regs);
+ scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+ die_if_kernel(b, regs);
force_sig(SIGTRAP, current);
}
+}
+
+asmlinkage void do_bp(struct pt_regs *regs)
+{
+ unsigned int opcode, bcode;
+
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ goto out_sigsegv;
+
+ /*
+ * There is the ancient bug in the MIPS assemblers that the break
+ * code starts left to bit 16 instead to bit 6 in the opcode.
+ * Gas is bug-compatible, but not always, grrr...
+ * We handle both cases with a simple heuristics. --macro
+ */
+ bcode = ((opcode >> 6) & ((1 << 20) - 1));
+ if (bcode >= (1 << 10))
+ bcode >>= 10;
+
+ do_trap_or_bp(regs, bcode, "Break");
return;
out_sigsegv:
@@ -710,7 +739,6 @@ out_sigsegv:
asmlinkage void do_tr(struct pt_regs *regs)
{
unsigned int opcode, tcode = 0;
- siginfo_t info;
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
@@ -719,32 +747,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
if (!(opcode & OPCODE))
tcode = ((opcode >> 6) & ((1 << 10) - 1));
- /*
- * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
- * insns, even for trap codes that indicate arithmetic failures.
- * Weird ...)
- * But should we continue the brokenness??? --macro
- */
- switch (tcode) {
- case BRK_OVERFLOW:
- case BRK_DIVZERO:
- die_if_kernel("Trap instruction in kernel code", regs);
- if (tcode == BRK_DIVZERO)
- info.si_code = FPE_INTDIV;
- else
- info.si_code = FPE_INTOVF;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *) regs->cp0_epc;
- force_sig_info(SIGFPE, &info, current);
- break;
- case BRK_BUG:
- die("Kernel bug detected", regs);
- break;
- default:
- die_if_kernel("Trap instruction in kernel code", regs);
- force_sig(SIGTRAP, current);
- }
+ do_trap_or_bp(regs, tcode, "Trap");
return;
out_sigsegv:
@@ -985,6 +988,21 @@ asmlinkage void do_reserved(struct pt_regs *regs)
(regs->cp0_cause & 0x7f) >> 2);
}
+static int __initdata l1parity = 1;
+static int __init nol1parity(char *s)
+{
+ l1parity = 0;
+ return 1;
+}
+__setup("nol1par", nol1parity);
+static int __initdata l2parity = 1;
+static int __init nol2parity(char *s)
+{
+ l2parity = 0;
+ return 1;
+}
+__setup("nol2par", nol2parity);
+
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
@@ -994,6 +1012,62 @@ static inline void parity_protection_init(void)
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ {
+#define ERRCTL_PE 0x80000000
+#define ERRCTL_L2P 0x00800000
+ unsigned long errctl;
+ unsigned int l1parity_present, l2parity_present;
+
+ errctl = read_c0_ecc();
+ errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
+
+ /* probe L1 parity support */
+ write_c0_ecc(errctl | ERRCTL_PE);
+ back_to_back_c0_hazard();
+ l1parity_present = (read_c0_ecc() & ERRCTL_PE);
+
+ /* probe L2 parity support */
+ write_c0_ecc(errctl|ERRCTL_L2P);
+ back_to_back_c0_hazard();
+ l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
+
+ if (l1parity_present && l2parity_present) {
+ if (l1parity)
+ errctl |= ERRCTL_PE;
+ if (l1parity ^ l2parity)
+ errctl |= ERRCTL_L2P;
+ } else if (l1parity_present) {
+ if (l1parity)
+ errctl |= ERRCTL_PE;
+ } else if (l2parity_present) {
+ if (l2parity)
+ errctl |= ERRCTL_L2P;
+ } else {
+ /* No parity available */
+ }
+
+ printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
+
+ write_c0_ecc(errctl);
+ back_to_back_c0_hazard();
+ errctl = read_c0_ecc();
+ printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
+
+ if (l1parity_present)
+ printk(KERN_INFO "Cache parity protection %sabled\n",
+ (errctl & ERRCTL_PE) ? "en" : "dis");
+
+ if (l2parity_present) {
+ if (l1parity_present && l1parity)
+ errctl ^= ERRCTL_L2P;
+ printk(KERN_INFO "L2 cache parity protection %sabled\n",
+ (errctl & ERRCTL_L2P) ? "en" : "dis");
+ }
+ }
+ break;
+
case CPU_5KC:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
@@ -1306,6 +1380,17 @@ int cp0_compare_irq;
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
+static int __cpuinitdata noulri;
+
+static int __init ulri_disable(char *s)
+{
+ pr_info("Disabling ulri\n");
+ noulri = 1;
+
+ return 1;
+}
+__setup("noulri", ulri_disable);
+
void __cpuinit per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
@@ -1342,16 +1427,14 @@ void __cpuinit per_cpu_trap_init(void)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
-#ifdef CONFIG_CPU_MIPSR2
if (cpu_has_mips_r2) {
unsigned int enable = 0x0000000f;
- if (cpu_has_userlocal)
+ if (!noulri && cpu_has_userlocal)
enable |= (1 << 29);
write_c0_hwrena(enable);
}
-#endif
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {